mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-28 13:03:43 +00:00
Merge pull request #41582 from dashpole/unit_test_status
Automatic merge from submit-queue (batch tested with PRs 42973, 41582) Improve status manager unit testing This is designed to simplify testing logic in the status manager, and decrease reliance on syncBatch. This is a smaller portion of #37119, and should be easier to review than that change. It makes the following changes: - creates convenience functions for get, update, and delete core.Action - prefers using syncPod on elements in the podStatusChannel to using syncBatch to reduce unintended reliance on syncBatch - combines consuming, validating, and clearing actions into single verifyActions function. This replaces calls to testSyncBatch(), verifyActions(), and ClearActions - changes comments in testing functions into log statements for easier debugging @Random-Liu
This commit is contained in:
commit
e2d011e455
@ -88,8 +88,10 @@ func getRandomPodStatus() v1.PodStatus {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func verifyActions(t *testing.T, kubeClient clientset.Interface, expectedActions []core.Action) {
|
func verifyActions(t *testing.T, manager *manager, expectedActions []core.Action) {
|
||||||
actions := kubeClient.(*fake.Clientset).Actions()
|
manager.consumeUpdates()
|
||||||
|
actions := manager.kubeClient.(*fake.Clientset).Actions()
|
||||||
|
defer manager.kubeClient.(*fake.Clientset).ClearActions()
|
||||||
if len(actions) != len(expectedActions) {
|
if len(actions) != len(expectedActions) {
|
||||||
t.Fatalf("unexpected actions, got: %+v expected: %+v", actions, expectedActions)
|
t.Fatalf("unexpected actions, got: %+v expected: %+v", actions, expectedActions)
|
||||||
return
|
return
|
||||||
@ -105,26 +107,25 @@ func verifyActions(t *testing.T, kubeClient clientset.Interface, expectedActions
|
|||||||
|
|
||||||
func verifyUpdates(t *testing.T, manager *manager, expectedUpdates int) {
|
func verifyUpdates(t *testing.T, manager *manager, expectedUpdates int) {
|
||||||
// Consume all updates in the channel.
|
// Consume all updates in the channel.
|
||||||
numUpdates := 0
|
numUpdates := manager.consumeUpdates()
|
||||||
for {
|
|
||||||
hasUpdate := true
|
|
||||||
select {
|
|
||||||
case <-manager.podStatusChannel:
|
|
||||||
numUpdates++
|
|
||||||
default:
|
|
||||||
hasUpdate = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !hasUpdate {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if numUpdates != expectedUpdates {
|
if numUpdates != expectedUpdates {
|
||||||
t.Errorf("unexpected number of updates %d, expected %d", numUpdates, expectedUpdates)
|
t.Errorf("unexpected number of updates %d, expected %d", numUpdates, expectedUpdates)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *manager) consumeUpdates() int {
|
||||||
|
updates := 0
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case syncRequest := <-m.podStatusChannel:
|
||||||
|
m.syncPod(syncRequest.podUID, syncRequest.status)
|
||||||
|
updates++
|
||||||
|
default:
|
||||||
|
return updates
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestNewStatus(t *testing.T) {
|
func TestNewStatus(t *testing.T) {
|
||||||
syncer := newTestManager(&fake.Clientset{})
|
syncer := newTestManager(&fake.Clientset{})
|
||||||
testPod := getTestPod()
|
testPod := getTestPod()
|
||||||
@ -285,34 +286,25 @@ func TestUnchangedStatusPreservesLastTransitionTime(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncBatchIgnoresNotFound(t *testing.T) {
|
func TestSyncPodIgnoresNotFound(t *testing.T) {
|
||||||
client := fake.Clientset{}
|
client := fake.Clientset{}
|
||||||
syncer := newTestManager(&client)
|
syncer := newTestManager(&client)
|
||||||
client.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
client.AddReactor("get", "pods", func(action core.Action) (bool, runtime.Object, error) {
|
||||||
return true, nil, errors.NewNotFound(api.Resource("pods"), "test-pod")
|
return true, nil, errors.NewNotFound(api.Resource("pods"), "test-pod")
|
||||||
})
|
})
|
||||||
syncer.SetPodStatus(getTestPod(), getRandomPodStatus())
|
syncer.SetPodStatus(getTestPod(), getRandomPodStatus())
|
||||||
syncer.testSyncBatch()
|
verifyActions(t, syncer, []core.Action{getAction()})
|
||||||
|
|
||||||
verifyActions(t, syncer.kubeClient, []core.Action{
|
|
||||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncBatch(t *testing.T) {
|
func TestSyncPod(t *testing.T) {
|
||||||
syncer := newTestManager(&fake.Clientset{})
|
syncer := newTestManager(&fake.Clientset{})
|
||||||
testPod := getTestPod()
|
testPod := getTestPod()
|
||||||
syncer.kubeClient = fake.NewSimpleClientset(testPod)
|
syncer.kubeClient = fake.NewSimpleClientset(testPod)
|
||||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||||
syncer.testSyncBatch()
|
verifyActions(t, syncer, []core.Action{getAction(), updateAction()})
|
||||||
verifyActions(t, syncer.kubeClient, []core.Action{
|
|
||||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
|
|
||||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncBatchChecksMismatchedUID(t *testing.T) {
|
func TestSyncPodChecksMismatchedUID(t *testing.T) {
|
||||||
syncer := newTestManager(&fake.Clientset{})
|
syncer := newTestManager(&fake.Clientset{})
|
||||||
pod := getTestPod()
|
pod := getTestPod()
|
||||||
pod.UID = "first"
|
pod.UID = "first"
|
||||||
@ -322,13 +314,10 @@ func TestSyncBatchChecksMismatchedUID(t *testing.T) {
|
|||||||
syncer.podManager.AddPod(differentPod)
|
syncer.podManager.AddPod(differentPod)
|
||||||
syncer.kubeClient = fake.NewSimpleClientset(pod)
|
syncer.kubeClient = fake.NewSimpleClientset(pod)
|
||||||
syncer.SetPodStatus(differentPod, getRandomPodStatus())
|
syncer.SetPodStatus(differentPod, getRandomPodStatus())
|
||||||
syncer.testSyncBatch()
|
verifyActions(t, syncer, []core.Action{getAction()})
|
||||||
verifyActions(t, syncer.kubeClient, []core.Action{
|
|
||||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncBatchNoDeadlock(t *testing.T) {
|
func TestSyncPodNoDeadlock(t *testing.T) {
|
||||||
client := &fake.Clientset{}
|
client := &fake.Clientset{}
|
||||||
m := newTestManager(client)
|
m := newTestManager(client)
|
||||||
pod := getTestPod()
|
pod := getTestPod()
|
||||||
@ -350,53 +339,38 @@ func TestSyncBatchNoDeadlock(t *testing.T) {
|
|||||||
|
|
||||||
pod.Status.ContainerStatuses = []v1.ContainerStatus{{State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}}
|
pod.Status.ContainerStatuses = []v1.ContainerStatus{{State: v1.ContainerState{Running: &v1.ContainerStateRunning{}}}}
|
||||||
|
|
||||||
getAction := core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}}
|
t.Logf("Pod not found.")
|
||||||
updateAction := core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}
|
|
||||||
|
|
||||||
// Pod not found.
|
|
||||||
ret = *pod
|
ret = *pod
|
||||||
err = errors.NewNotFound(api.Resource("pods"), pod.Name)
|
err = errors.NewNotFound(api.Resource("pods"), pod.Name)
|
||||||
m.SetPodStatus(pod, getRandomPodStatus())
|
m.SetPodStatus(pod, getRandomPodStatus())
|
||||||
m.testSyncBatch()
|
verifyActions(t, m, []core.Action{getAction()})
|
||||||
verifyActions(t, client, []core.Action{getAction})
|
|
||||||
client.ClearActions()
|
|
||||||
|
|
||||||
// Pod was recreated.
|
t.Logf("Pod was recreated.")
|
||||||
ret.UID = "other_pod"
|
ret.UID = "other_pod"
|
||||||
err = nil
|
err = nil
|
||||||
m.SetPodStatus(pod, getRandomPodStatus())
|
m.SetPodStatus(pod, getRandomPodStatus())
|
||||||
m.testSyncBatch()
|
verifyActions(t, m, []core.Action{getAction()})
|
||||||
verifyActions(t, client, []core.Action{getAction})
|
|
||||||
client.ClearActions()
|
|
||||||
|
|
||||||
// Pod not deleted (success case).
|
t.Logf("Pod not deleted (success case).")
|
||||||
ret = *pod
|
ret = *pod
|
||||||
m.SetPodStatus(pod, getRandomPodStatus())
|
m.SetPodStatus(pod, getRandomPodStatus())
|
||||||
m.testSyncBatch()
|
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||||
verifyActions(t, client, []core.Action{getAction, updateAction})
|
|
||||||
client.ClearActions()
|
|
||||||
|
|
||||||
// Pod is terminated, but still running.
|
t.Logf("Pod is terminated, but still running.")
|
||||||
pod.DeletionTimestamp = new(metav1.Time)
|
pod.DeletionTimestamp = new(metav1.Time)
|
||||||
m.SetPodStatus(pod, getRandomPodStatus())
|
m.SetPodStatus(pod, getRandomPodStatus())
|
||||||
m.testSyncBatch()
|
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||||
verifyActions(t, client, []core.Action{getAction, updateAction})
|
|
||||||
client.ClearActions()
|
|
||||||
|
|
||||||
// Pod is terminated successfully.
|
t.Logf("Pod is terminated successfully.")
|
||||||
pod.Status.ContainerStatuses[0].State.Running = nil
|
pod.Status.ContainerStatuses[0].State.Running = nil
|
||||||
pod.Status.ContainerStatuses[0].State.Terminated = &v1.ContainerStateTerminated{}
|
pod.Status.ContainerStatuses[0].State.Terminated = &v1.ContainerStateTerminated{}
|
||||||
m.SetPodStatus(pod, getRandomPodStatus())
|
m.SetPodStatus(pod, getRandomPodStatus())
|
||||||
m.testSyncBatch()
|
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||||
verifyActions(t, client, []core.Action{getAction, updateAction})
|
|
||||||
client.ClearActions()
|
|
||||||
|
|
||||||
// Error case.
|
t.Logf("Error case.")
|
||||||
err = fmt.Errorf("intentional test error")
|
err = fmt.Errorf("intentional test error")
|
||||||
m.SetPodStatus(pod, getRandomPodStatus())
|
m.SetPodStatus(pod, getRandomPodStatus())
|
||||||
m.testSyncBatch()
|
verifyActions(t, m, []core.Action{getAction()})
|
||||||
verifyActions(t, client, []core.Action{getAction})
|
|
||||||
client.ClearActions()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStaleUpdates(t *testing.T) {
|
func TestStaleUpdates(t *testing.T) {
|
||||||
@ -410,21 +384,13 @@ func TestStaleUpdates(t *testing.T) {
|
|||||||
m.SetPodStatus(pod, status)
|
m.SetPodStatus(pod, status)
|
||||||
status.Message = "second version bump"
|
status.Message = "second version bump"
|
||||||
m.SetPodStatus(pod, status)
|
m.SetPodStatus(pod, status)
|
||||||
|
|
||||||
|
t.Logf("sync batch before syncPods pushes latest status, so we should see three statuses in the channel, but only one update")
|
||||||
|
m.syncBatch()
|
||||||
verifyUpdates(t, m, 3)
|
verifyUpdates(t, m, 3)
|
||||||
|
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||||
t.Logf("First sync pushes latest status.")
|
t.Logf("Nothing left in the channel to sync")
|
||||||
m.testSyncBatch()
|
verifyActions(t, m, []core.Action{})
|
||||||
verifyActions(t, m.kubeClient, []core.Action{
|
|
||||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
|
|
||||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
|
|
||||||
})
|
|
||||||
client.ClearActions()
|
|
||||||
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
t.Logf("Next 2 syncs should be ignored (%d).", i)
|
|
||||||
m.testSyncBatch()
|
|
||||||
verifyActions(t, m.kubeClient, []core.Action{})
|
|
||||||
}
|
|
||||||
|
|
||||||
t.Log("Unchanged status should not send an update.")
|
t.Log("Unchanged status should not send an update.")
|
||||||
m.SetPodStatus(pod, status)
|
m.SetPodStatus(pod, status)
|
||||||
@ -434,13 +400,10 @@ func TestStaleUpdates(t *testing.T) {
|
|||||||
m.apiStatusVersions[pod.UID] = m.apiStatusVersions[pod.UID] - 1
|
m.apiStatusVersions[pod.UID] = m.apiStatusVersions[pod.UID] - 1
|
||||||
|
|
||||||
m.SetPodStatus(pod, status)
|
m.SetPodStatus(pod, status)
|
||||||
m.testSyncBatch()
|
m.syncBatch()
|
||||||
verifyActions(t, m.kubeClient, []core.Action{
|
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
|
|
||||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
|
|
||||||
})
|
|
||||||
|
|
||||||
// Nothing stuck in the pipe.
|
t.Logf("Nothing stuck in the pipe.")
|
||||||
verifyUpdates(t, m, 0)
|
verifyUpdates(t, m, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -527,7 +490,7 @@ func TestStaticPod(t *testing.T) {
|
|||||||
client := fake.NewSimpleClientset(mirrorPod)
|
client := fake.NewSimpleClientset(mirrorPod)
|
||||||
m := newTestManager(client)
|
m := newTestManager(client)
|
||||||
|
|
||||||
// Create the static pod
|
t.Logf("Create the static pod")
|
||||||
m.podManager.AddPod(staticPod)
|
m.podManager.AddPod(staticPod)
|
||||||
assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
|
assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
|
||||||
|
|
||||||
@ -536,69 +499,57 @@ func TestStaticPod(t *testing.T) {
|
|||||||
status.StartTime = &now
|
status.StartTime = &now
|
||||||
m.SetPodStatus(staticPod, status)
|
m.SetPodStatus(staticPod, status)
|
||||||
|
|
||||||
// Should be able to get the static pod status from status manager
|
t.Logf("Should be able to get the static pod status from status manager")
|
||||||
retrievedStatus := expectPodStatus(t, m, staticPod)
|
retrievedStatus := expectPodStatus(t, m, staticPod)
|
||||||
normalizeStatus(staticPod, &status)
|
normalizeStatus(staticPod, &status)
|
||||||
assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
|
assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
|
||||||
|
|
||||||
// Should not sync pod because there is no corresponding mirror pod for the static pod.
|
t.Logf("Should not sync pod in syncBatch because there is no corresponding mirror pod for the static pod.")
|
||||||
m.testSyncBatch()
|
m.syncBatch()
|
||||||
verifyActions(t, m.kubeClient, []core.Action{})
|
assert.Equal(t, len(m.kubeClient.(*fake.Clientset).Actions()), 0, "Expected no updates after syncBatch, got %+v", m.kubeClient.(*fake.Clientset).Actions())
|
||||||
client.ClearActions()
|
|
||||||
|
|
||||||
// Create the mirror pod
|
t.Logf("Create the mirror pod")
|
||||||
m.podManager.AddPod(mirrorPod)
|
m.podManager.AddPod(mirrorPod)
|
||||||
assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
|
assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
|
||||||
assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)
|
assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)
|
||||||
|
|
||||||
// Should be able to get the mirror pod status from status manager
|
t.Logf("Should be able to get the mirror pod status from status manager")
|
||||||
retrievedStatus, _ = m.GetPodStatus(mirrorPod.UID)
|
retrievedStatus, _ = m.GetPodStatus(mirrorPod.UID)
|
||||||
assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
|
assert.True(t, isStatusEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
|
||||||
|
|
||||||
// Should sync pod because the corresponding mirror pod is created
|
t.Logf("Should sync pod because the corresponding mirror pod is created")
|
||||||
m.testSyncBatch()
|
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||||
verifyActions(t, m.kubeClient, []core.Action{
|
|
||||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
|
|
||||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
|
|
||||||
})
|
|
||||||
updateAction := client.Actions()[1].(core.UpdateActionImpl)
|
|
||||||
updatedPod := updateAction.Object.(*v1.Pod)
|
|
||||||
assert.Equal(t, mirrorPod.UID, updatedPod.UID, "Expected mirrorPod (%q), but got %q", mirrorPod.UID, updatedPod.UID)
|
|
||||||
assert.True(t, isStatusEqual(&status, &updatedPod.Status), "Expected: %+v, Got: %+v", status, updatedPod.Status)
|
|
||||||
client.ClearActions()
|
|
||||||
|
|
||||||
// Should not sync pod because nothing is changed.
|
t.Logf("syncBatch should not sync any pods because nothing is changed.")
|
||||||
m.testSyncBatch()
|
m.testSyncBatch()
|
||||||
verifyActions(t, m.kubeClient, []core.Action{})
|
verifyActions(t, m, []core.Action{})
|
||||||
|
|
||||||
// Change mirror pod identity.
|
t.Logf("Change mirror pod identity.")
|
||||||
m.podManager.DeletePod(mirrorPod)
|
m.podManager.DeletePod(mirrorPod)
|
||||||
mirrorPod.UID = "new-mirror-pod"
|
mirrorPod.UID = "new-mirror-pod"
|
||||||
mirrorPod.Status = v1.PodStatus{}
|
mirrorPod.Status = v1.PodStatus{}
|
||||||
m.podManager.AddPod(mirrorPod)
|
m.podManager.AddPod(mirrorPod)
|
||||||
|
|
||||||
// Should not update to mirror pod, because UID has changed.
|
t.Logf("Should not update to mirror pod, because UID has changed.")
|
||||||
m.testSyncBatch()
|
m.syncBatch()
|
||||||
verifyActions(t, m.kubeClient, []core.Action{
|
verifyActions(t, m, []core.Action{getAction()})
|
||||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestTerminatePod(t *testing.T) {
|
func TestTerminatePod(t *testing.T) {
|
||||||
syncer := newTestManager(&fake.Clientset{})
|
syncer := newTestManager(&fake.Clientset{})
|
||||||
testPod := getTestPod()
|
testPod := getTestPod()
|
||||||
// update the pod's status to Failed. TerminatePod should preserve this status update.
|
t.Logf("update the pod's status to Failed. TerminatePod should preserve this status update.")
|
||||||
firstStatus := getRandomPodStatus()
|
firstStatus := getRandomPodStatus()
|
||||||
firstStatus.Phase = v1.PodFailed
|
firstStatus.Phase = v1.PodFailed
|
||||||
syncer.SetPodStatus(testPod, firstStatus)
|
syncer.SetPodStatus(testPod, firstStatus)
|
||||||
|
|
||||||
// set the testPod to a pod with Phase running, to simulate a stale pod
|
t.Logf("set the testPod to a pod with Phase running, to simulate a stale pod")
|
||||||
testPod.Status = getRandomPodStatus()
|
testPod.Status = getRandomPodStatus()
|
||||||
testPod.Status.Phase = v1.PodRunning
|
testPod.Status.Phase = v1.PodRunning
|
||||||
|
|
||||||
syncer.TerminatePod(testPod)
|
syncer.TerminatePod(testPod)
|
||||||
|
|
||||||
// we expect the container statuses to have changed to terminated
|
t.Logf("we expect the container statuses to have changed to terminated")
|
||||||
newStatus := expectPodStatus(t, syncer, testPod)
|
newStatus := expectPodStatus(t, syncer, testPod)
|
||||||
for i := range newStatus.ContainerStatuses {
|
for i := range newStatus.ContainerStatuses {
|
||||||
assert.False(t, newStatus.ContainerStatuses[i].State.Terminated == nil, "expected containers to be terminated")
|
assert.False(t, newStatus.ContainerStatuses[i].State.Terminated == nil, "expected containers to be terminated")
|
||||||
@ -607,7 +558,7 @@ func TestTerminatePod(t *testing.T) {
|
|||||||
assert.False(t, newStatus.InitContainerStatuses[i].State.Terminated == nil, "expected init containers to be terminated")
|
assert.False(t, newStatus.InitContainerStatuses[i].State.Terminated == nil, "expected init containers to be terminated")
|
||||||
}
|
}
|
||||||
|
|
||||||
// we expect the previous status update to be preserved.
|
t.Logf("we expect the previous status update to be preserved.")
|
||||||
assert.Equal(t, newStatus.Phase, firstStatus.Phase)
|
assert.Equal(t, newStatus.Phase, firstStatus.Phase)
|
||||||
assert.Equal(t, newStatus.Message, firstStatus.Message)
|
assert.Equal(t, newStatus.Message, firstStatus.Message)
|
||||||
}
|
}
|
||||||
@ -712,10 +663,10 @@ func TestSyncBatchCleanupVersions(t *testing.T) {
|
|||||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Orphaned pods should be removed.
|
t.Logf("Orphaned pods should be removed.")
|
||||||
m.apiStatusVersions[testPod.UID] = 100
|
m.apiStatusVersions[testPod.UID] = 100
|
||||||
m.apiStatusVersions[mirrorPod.UID] = 200
|
m.apiStatusVersions[mirrorPod.UID] = 200
|
||||||
m.testSyncBatch()
|
m.syncBatch()
|
||||||
if _, ok := m.apiStatusVersions[testPod.UID]; ok {
|
if _, ok := m.apiStatusVersions[testPod.UID]; ok {
|
||||||
t.Errorf("Should have cleared status for testPod")
|
t.Errorf("Should have cleared status for testPod")
|
||||||
}
|
}
|
||||||
@ -723,7 +674,7 @@ func TestSyncBatchCleanupVersions(t *testing.T) {
|
|||||||
t.Errorf("Should have cleared status for mirrorPod")
|
t.Errorf("Should have cleared status for mirrorPod")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Non-orphaned pods should not be removed.
|
t.Logf("Non-orphaned pods should not be removed.")
|
||||||
m.SetPodStatus(testPod, getRandomPodStatus())
|
m.SetPodStatus(testPod, getRandomPodStatus())
|
||||||
m.podManager.AddPod(mirrorPod)
|
m.podManager.AddPod(mirrorPod)
|
||||||
staticPod := mirrorPod
|
staticPod := mirrorPod
|
||||||
@ -746,8 +697,9 @@ func TestReconcilePodStatus(t *testing.T) {
|
|||||||
client := fake.NewSimpleClientset(testPod)
|
client := fake.NewSimpleClientset(testPod)
|
||||||
syncer := newTestManager(client)
|
syncer := newTestManager(client)
|
||||||
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
syncer.SetPodStatus(testPod, getRandomPodStatus())
|
||||||
// Call syncBatch directly to test reconcile
|
t.Logf("Call syncBatch directly to test reconcile")
|
||||||
syncer.syncBatch() // The apiStatusVersions should be set now
|
syncer.syncBatch() // The apiStatusVersions should be set now
|
||||||
|
client.ClearActions()
|
||||||
|
|
||||||
podStatus, ok := syncer.GetPodStatus(testPod.UID)
|
podStatus, ok := syncer.GetPodStatus(testPod.UID)
|
||||||
if !ok {
|
if !ok {
|
||||||
@ -755,42 +707,36 @@ func TestReconcilePodStatus(t *testing.T) {
|
|||||||
}
|
}
|
||||||
testPod.Status = podStatus
|
testPod.Status = podStatus
|
||||||
|
|
||||||
// If the pod status is the same, a reconciliation is not needed,
|
t.Logf("If the pod status is the same, a reconciliation is not needed and syncBatch should do nothing")
|
||||||
// syncBatch should do nothing
|
|
||||||
syncer.podManager.UpdatePod(testPod)
|
syncer.podManager.UpdatePod(testPod)
|
||||||
if syncer.needsReconcile(testPod.UID, podStatus) {
|
if syncer.needsReconcile(testPod.UID, podStatus) {
|
||||||
t.Errorf("Pod status is the same, a reconciliation is not needed")
|
t.Errorf("Pod status is the same, a reconciliation is not needed")
|
||||||
}
|
}
|
||||||
client.ClearActions()
|
|
||||||
syncer.syncBatch()
|
syncer.syncBatch()
|
||||||
verifyActions(t, client, []core.Action{})
|
verifyActions(t, syncer, []core.Action{})
|
||||||
|
|
||||||
// If the pod status is the same, only the timestamp is in Rfc3339 format (lower precision without nanosecond),
|
// If the pod status is the same, only the timestamp is in Rfc3339 format (lower precision without nanosecond),
|
||||||
// a reconciliation is not needed, syncBatch should do nothing.
|
// a reconciliation is not needed, syncBatch should do nothing.
|
||||||
// The StartTime should have been set in SetPodStatus().
|
// The StartTime should have been set in SetPodStatus().
|
||||||
// TODO(random-liu): Remove this later when api becomes consistent for timestamp.
|
// TODO(random-liu): Remove this later when api becomes consistent for timestamp.
|
||||||
|
t.Logf("Syncbatch should do nothing, as a reconciliation is not required")
|
||||||
normalizedStartTime := testPod.Status.StartTime.Rfc3339Copy()
|
normalizedStartTime := testPod.Status.StartTime.Rfc3339Copy()
|
||||||
testPod.Status.StartTime = &normalizedStartTime
|
testPod.Status.StartTime = &normalizedStartTime
|
||||||
syncer.podManager.UpdatePod(testPod)
|
syncer.podManager.UpdatePod(testPod)
|
||||||
if syncer.needsReconcile(testPod.UID, podStatus) {
|
if syncer.needsReconcile(testPod.UID, podStatus) {
|
||||||
t.Errorf("Pod status only differs for timestamp format, a reconciliation is not needed")
|
t.Errorf("Pod status only differs for timestamp format, a reconciliation is not needed")
|
||||||
}
|
}
|
||||||
client.ClearActions()
|
|
||||||
syncer.syncBatch()
|
syncer.syncBatch()
|
||||||
verifyActions(t, client, []core.Action{})
|
verifyActions(t, syncer, []core.Action{})
|
||||||
|
|
||||||
// If the pod status is different, a reconciliation is needed, syncBatch should trigger an update
|
t.Logf("If the pod status is different, a reconciliation is needed, syncBatch should trigger an update")
|
||||||
testPod.Status = getRandomPodStatus()
|
testPod.Status = getRandomPodStatus()
|
||||||
syncer.podManager.UpdatePod(testPod)
|
syncer.podManager.UpdatePod(testPod)
|
||||||
if !syncer.needsReconcile(testPod.UID, podStatus) {
|
if !syncer.needsReconcile(testPod.UID, podStatus) {
|
||||||
t.Errorf("Pod status is different, a reconciliation is needed")
|
t.Errorf("Pod status is different, a reconciliation is needed")
|
||||||
}
|
}
|
||||||
client.ClearActions()
|
|
||||||
syncer.syncBatch()
|
syncer.syncBatch()
|
||||||
verifyActions(t, client, []core.Action{
|
verifyActions(t, syncer, []core.Action{getAction(), updateAction()})
|
||||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
|
|
||||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func expectPodStatus(t *testing.T, m *manager, pod *v1.Pod) v1.PodStatus {
|
func expectPodStatus(t *testing.T, m *manager, pod *v1.Pod) v1.PodStatus {
|
||||||
@ -803,7 +749,7 @@ func expectPodStatus(t *testing.T, m *manager, pod *v1.Pod) v1.PodStatus {
|
|||||||
|
|
||||||
func TestDeletePods(t *testing.T) {
|
func TestDeletePods(t *testing.T) {
|
||||||
pod := getTestPod()
|
pod := getTestPod()
|
||||||
// Set the deletion timestamp.
|
t.Logf("Set the deletion timestamp.")
|
||||||
pod.DeletionTimestamp = new(metav1.Time)
|
pod.DeletionTimestamp = new(metav1.Time)
|
||||||
client := fake.NewSimpleClientset(pod)
|
client := fake.NewSimpleClientset(pod)
|
||||||
m := newTestManager(client)
|
m := newTestManager(client)
|
||||||
@ -814,13 +760,8 @@ func TestDeletePods(t *testing.T) {
|
|||||||
status.StartTime = &now
|
status.StartTime = &now
|
||||||
m.SetPodStatus(pod, status)
|
m.SetPodStatus(pod, status)
|
||||||
|
|
||||||
m.testSyncBatch()
|
t.Logf("Expect to see a delete action.")
|
||||||
// Expect to see an delete action.
|
verifyActions(t, m, []core.Action{getAction(), updateAction(), deleteAction()})
|
||||||
verifyActions(t, m.kubeClient, []core.Action{
|
|
||||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
|
|
||||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
|
|
||||||
core.DeleteActionImpl{ActionImpl: core.ActionImpl{Verb: "delete", Resource: schema.GroupVersionResource{Resource: "pods"}}},
|
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDoNotDeleteMirrorPods(t *testing.T) {
|
func TestDoNotDeleteMirrorPods(t *testing.T) {
|
||||||
@ -832,13 +773,13 @@ func TestDoNotDeleteMirrorPods(t *testing.T) {
|
|||||||
kubetypes.ConfigSourceAnnotationKey: "api",
|
kubetypes.ConfigSourceAnnotationKey: "api",
|
||||||
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
kubetypes.ConfigMirrorAnnotationKey: "mirror",
|
||||||
}
|
}
|
||||||
// Set the deletion timestamp.
|
t.Logf("Set the deletion timestamp.")
|
||||||
mirrorPod.DeletionTimestamp = new(metav1.Time)
|
mirrorPod.DeletionTimestamp = new(metav1.Time)
|
||||||
client := fake.NewSimpleClientset(mirrorPod)
|
client := fake.NewSimpleClientset(mirrorPod)
|
||||||
m := newTestManager(client)
|
m := newTestManager(client)
|
||||||
m.podManager.AddPod(staticPod)
|
m.podManager.AddPod(staticPod)
|
||||||
m.podManager.AddPod(mirrorPod)
|
m.podManager.AddPod(mirrorPod)
|
||||||
// Verify setup.
|
t.Logf("Verify setup.")
|
||||||
assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
|
assert.True(t, kubepod.IsStaticPod(staticPod), "SetUp error: staticPod")
|
||||||
assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
|
assert.True(t, kubepod.IsMirrorPod(mirrorPod), "SetUp error: mirrorPod")
|
||||||
assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)
|
assert.Equal(t, m.podManager.TranslatePodUID(mirrorPod.UID), staticPod.UID)
|
||||||
@ -848,10 +789,18 @@ func TestDoNotDeleteMirrorPods(t *testing.T) {
|
|||||||
status.StartTime = &now
|
status.StartTime = &now
|
||||||
m.SetPodStatus(staticPod, status)
|
m.SetPodStatus(staticPod, status)
|
||||||
|
|
||||||
m.testSyncBatch()
|
t.Logf("Expect not to see a delete action.")
|
||||||
// Expect not to see an delete action.
|
verifyActions(t, m, []core.Action{getAction(), updateAction()})
|
||||||
verifyActions(t, m.kubeClient, []core.Action{
|
}
|
||||||
core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}},
|
|
||||||
core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}},
|
func getAction() core.GetAction {
|
||||||
})
|
return core.GetActionImpl{ActionImpl: core.ActionImpl{Verb: "get", Resource: schema.GroupVersionResource{Resource: "pods"}}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateAction() core.UpdateAction {
|
||||||
|
return core.UpdateActionImpl{ActionImpl: core.ActionImpl{Verb: "update", Resource: schema.GroupVersionResource{Resource: "pods"}, Subresource: "status"}}
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteAction() core.DeleteAction {
|
||||||
|
return core.DeleteActionImpl{ActionImpl: core.ActionImpl{Verb: "delete", Resource: schema.GroupVersionResource{Resource: "pods"}}}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user