|
|
|
@ -31,10 +31,8 @@ import (
|
|
|
|
|
|
|
|
|
|
docker "github.com/fsouza/go-dockerclient"
|
|
|
|
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
|
"k8s.io/kubernetes/pkg/api"
|
|
|
|
|
"k8s.io/kubernetes/pkg/api/testapi"
|
|
|
|
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
|
|
|
|
"k8s.io/kubernetes/pkg/client/record"
|
|
|
|
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
|
|
|
|
"k8s.io/kubernetes/pkg/kubelet/network"
|
|
|
|
@ -557,7 +555,6 @@ func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, p
|
|
|
|
|
if backOff == nil {
|
|
|
|
|
backOff = util.NewBackOff(time.Second, time.Minute)
|
|
|
|
|
}
|
|
|
|
|
//TODO(random-liu): Add test for PodSyncResult
|
|
|
|
|
result := dm.SyncPod(pod, *apiPodStatus, podStatus, []api.Secret{}, backOff)
|
|
|
|
|
err = result.Error()
|
|
|
|
|
if err != nil && !expectErr {
|
|
|
|
@ -890,65 +887,6 @@ func TestSyncPodsDoesNothing(t *testing.T) {
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestSyncPodWithPullPolicy(t *testing.T) {
|
|
|
|
|
dm, fakeDocker := newTestDockerManager()
|
|
|
|
|
puller := dm.dockerPuller.(*FakeDockerPuller)
|
|
|
|
|
puller.HasImages = []string{"existing_one", "want:latest"}
|
|
|
|
|
dm.podInfraContainerImage = "pod_infra_image"
|
|
|
|
|
|
|
|
|
|
pod := &api.Pod{
|
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
|
UID: "12345678",
|
|
|
|
|
Name: "foo",
|
|
|
|
|
Namespace: "new",
|
|
|
|
|
},
|
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
|
Containers: []api.Container{
|
|
|
|
|
{Name: "bar", Image: "pull_always_image", ImagePullPolicy: api.PullAlways},
|
|
|
|
|
{Name: "bar2", Image: "pull_if_not_present_image", ImagePullPolicy: api.PullIfNotPresent},
|
|
|
|
|
{Name: "bar3", Image: "existing_one", ImagePullPolicy: api.PullIfNotPresent},
|
|
|
|
|
{Name: "bar4", Image: "want:latest", ImagePullPolicy: api.PullIfNotPresent},
|
|
|
|
|
{Name: "bar5", Image: "pull_never_image", ImagePullPolicy: api.PullNever},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
expectedStatusMap := map[string]api.ContainerState{
|
|
|
|
|
"bar": {Running: &api.ContainerStateRunning{unversioned.Now()}},
|
|
|
|
|
"bar2": {Running: &api.ContainerStateRunning{unversioned.Now()}},
|
|
|
|
|
"bar3": {Running: &api.ContainerStateRunning{unversioned.Now()}},
|
|
|
|
|
"bar4": {Running: &api.ContainerStateRunning{unversioned.Now()}},
|
|
|
|
|
"bar5": {Waiting: &api.ContainerStateWaiting{Reason: kubecontainer.ErrImageNeverPull.Error(),
|
|
|
|
|
Message: "Container image \"pull_never_image\" is not present with pull policy of Never"}},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
runSyncPod(t, dm, fakeDocker, pod, nil, true)
|
|
|
|
|
statuses, err := dm.GetAPIPodStatus(pod)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Errorf("unable to get pod status")
|
|
|
|
|
}
|
|
|
|
|
for _, c := range pod.Spec.Containers {
|
|
|
|
|
if containerStatus, ok := api.GetContainerStatus(statuses.ContainerStatuses, c.Name); ok {
|
|
|
|
|
// copy the StartedAt time, to make the structs match
|
|
|
|
|
if containerStatus.State.Running != nil && expectedStatusMap[c.Name].Running != nil {
|
|
|
|
|
expectedStatusMap[c.Name].Running.StartedAt = containerStatus.State.Running.StartedAt
|
|
|
|
|
}
|
|
|
|
|
assert.Equal(t, expectedStatusMap[c.Name], containerStatus.State, "for container %s", c.Name)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fakeDocker.Lock()
|
|
|
|
|
defer fakeDocker.Unlock()
|
|
|
|
|
|
|
|
|
|
pulledImageSorted := puller.ImagesPulled[:]
|
|
|
|
|
sort.Strings(pulledImageSorted)
|
|
|
|
|
assert.Equal(t, []string{"pod_infra_image", "pull_always_image", "pull_if_not_present_image"}, pulledImageSorted)
|
|
|
|
|
|
|
|
|
|
if len(fakeDocker.Created) != 5 {
|
|
|
|
|
t.Errorf("Unexpected containers created %v", fakeDocker.Created)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestSyncPodWithRestartPolicy(t *testing.T) {
|
|
|
|
|
dm, fakeDocker := newTestDockerManager()
|
|
|
|
|
containers := []api.Container{
|
|
|
|
@ -1053,112 +991,6 @@ func TestSyncPodWithRestartPolicy(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestGetAPIPodStatusWithLastTermination(t *testing.T) {
|
|
|
|
|
dm, fakeDocker := newTestDockerManager()
|
|
|
|
|
containers := []api.Container{
|
|
|
|
|
{Name: "succeeded"},
|
|
|
|
|
{Name: "failed"},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pod := &api.Pod{
|
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
|
UID: "12345678",
|
|
|
|
|
Name: "foo",
|
|
|
|
|
Namespace: "new",
|
|
|
|
|
},
|
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
|
Containers: containers,
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dockerContainers := []*docker.Container{
|
|
|
|
|
{
|
|
|
|
|
ID: "9876",
|
|
|
|
|
Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0",
|
|
|
|
|
State: docker.State{
|
|
|
|
|
StartedAt: time.Now(),
|
|
|
|
|
FinishedAt: time.Now(),
|
|
|
|
|
Running: true,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
ID: "1234",
|
|
|
|
|
Name: "/k8s_succeeded." + strconv.FormatUint(kubecontainer.HashContainer(&containers[0]), 16) + "_foo_new_12345678_0",
|
|
|
|
|
State: docker.State{
|
|
|
|
|
ExitCode: 0,
|
|
|
|
|
StartedAt: time.Now(),
|
|
|
|
|
FinishedAt: time.Now(),
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
ID: "5678",
|
|
|
|
|
Name: "/k8s_failed." + strconv.FormatUint(kubecontainer.HashContainer(&containers[1]), 16) + "_foo_new_12345678_0",
|
|
|
|
|
State: docker.State{
|
|
|
|
|
ExitCode: 42,
|
|
|
|
|
StartedAt: time.Now(),
|
|
|
|
|
FinishedAt: time.Now(),
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
tests := []struct {
|
|
|
|
|
policy api.RestartPolicy
|
|
|
|
|
created []string
|
|
|
|
|
stopped []string
|
|
|
|
|
lastTerminations []string
|
|
|
|
|
}{
|
|
|
|
|
{
|
|
|
|
|
api.RestartPolicyAlways,
|
|
|
|
|
[]string{"succeeded", "failed"},
|
|
|
|
|
[]string{},
|
|
|
|
|
[]string{"docker://1234", "docker://5678"},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
api.RestartPolicyOnFailure,
|
|
|
|
|
[]string{"failed"},
|
|
|
|
|
[]string{},
|
|
|
|
|
[]string{"docker://5678"},
|
|
|
|
|
},
|
|
|
|
|
{
|
|
|
|
|
api.RestartPolicyNever,
|
|
|
|
|
[]string{},
|
|
|
|
|
[]string{"9876"},
|
|
|
|
|
[]string{},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for i, tt := range tests {
|
|
|
|
|
fakeDocker.SetFakeContainers(dockerContainers)
|
|
|
|
|
fakeDocker.ClearCalls()
|
|
|
|
|
pod.Spec.RestartPolicy = tt.policy
|
|
|
|
|
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
|
|
|
|
|
|
|
|
|
// Check if we can retrieve the pod status.
|
|
|
|
|
status, err := dm.GetAPIPodStatus(pod)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatalf("unexpected error %v", err)
|
|
|
|
|
}
|
|
|
|
|
terminatedContainers := []string{}
|
|
|
|
|
for _, cs := range status.ContainerStatuses {
|
|
|
|
|
if cs.LastTerminationState.Terminated != nil {
|
|
|
|
|
terminatedContainers = append(terminatedContainers, cs.LastTerminationState.Terminated.ContainerID)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
sort.StringSlice(terminatedContainers).Sort()
|
|
|
|
|
sort.StringSlice(tt.lastTerminations).Sort()
|
|
|
|
|
if !reflect.DeepEqual(terminatedContainers, tt.lastTerminations) {
|
|
|
|
|
t.Errorf("Expected(sorted): %#v, Actual(sorted): %#v", tt.lastTerminations, terminatedContainers)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if err := fakeDocker.AssertCreated(tt.created); err != nil {
|
|
|
|
|
t.Errorf("%d: %v", i, err)
|
|
|
|
|
}
|
|
|
|
|
if err := fakeDocker.AssertStopped(tt.stopped); err != nil {
|
|
|
|
|
t.Errorf("%d: %v", i, err)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestSyncPodBackoff(t *testing.T) {
|
|
|
|
|
var fakeClock = &util.FakeClock{Time: time.Now()}
|
|
|
|
|
startTime := fakeClock.Now()
|
|
|
|
@ -1248,101 +1080,10 @@ func TestSyncPodBackoff(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
func TestGetPodCreationFailureReason(t *testing.T) {
|
|
|
|
|
dm, fakeDocker := newTestDockerManager()
|
|
|
|
|
// Inject the creation failure error to docker.
|
|
|
|
|
failureReason := "RunContainerError"
|
|
|
|
|
fakeDocker.Errors = map[string]error{
|
|
|
|
|
"create": fmt.Errorf("%s", failureReason),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pod := &api.Pod{
|
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
|
UID: "12345678",
|
|
|
|
|
Name: "foo",
|
|
|
|
|
Namespace: "new",
|
|
|
|
|
},
|
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
|
Containers: []api.Container{{Name: "bar"}},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Pretend that the pod infra container has already been created, so that
|
|
|
|
|
// we can run the user containers.
|
|
|
|
|
fakeDocker.SetFakeRunningContainers([]*docker.Container{{
|
|
|
|
|
ID: "9876",
|
|
|
|
|
Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0",
|
|
|
|
|
}})
|
|
|
|
|
|
|
|
|
|
runSyncPod(t, dm, fakeDocker, pod, nil, true)
|
|
|
|
|
// Check if we can retrieve the pod status.
|
|
|
|
|
status, err := dm.GetAPIPodStatus(pod)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatalf("unexpected error %v", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if len(status.ContainerStatuses) < 1 {
|
|
|
|
|
t.Errorf("expected 1 container status, got %d", len(status.ContainerStatuses))
|
|
|
|
|
} else {
|
|
|
|
|
state := status.ContainerStatuses[0].State
|
|
|
|
|
if state.Waiting == nil {
|
|
|
|
|
t.Errorf("expected waiting state, got %#v", state)
|
|
|
|
|
} else if state.Waiting.Reason != failureReason {
|
|
|
|
|
t.Errorf("expected reason %q, got %q", failureReason, state.Waiting.Reason)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestGetPodPullImageFailureReason(t *testing.T) {
|
|
|
|
|
dm, fakeDocker := newTestDockerManager()
|
|
|
|
|
// Initialize the FakeDockerPuller so that it'd try to pull non-existent
|
|
|
|
|
// images.
|
|
|
|
|
puller := dm.dockerPuller.(*FakeDockerPuller)
|
|
|
|
|
puller.HasImages = []string{}
|
|
|
|
|
// Inject the pull image failure error.
|
|
|
|
|
failureReason := kubecontainer.ErrImagePull.Error()
|
|
|
|
|
puller.ErrorsToInject = []error{fmt.Errorf("%s", failureReason)}
|
|
|
|
|
|
|
|
|
|
pod := &api.Pod{
|
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
|
UID: "12345678",
|
|
|
|
|
Name: "foo",
|
|
|
|
|
Namespace: "new",
|
|
|
|
|
},
|
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
|
Containers: []api.Container{{Name: "bar", Image: "realImage", ImagePullPolicy: api.PullAlways}},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
// Pretend that the pod infra container has already been created, so that
|
|
|
|
|
// we can run the user containers.
|
|
|
|
|
fakeDocker.SetFakeRunningContainers([]*docker.Container{{
|
|
|
|
|
ID: "9876",
|
|
|
|
|
Name: "/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0",
|
|
|
|
|
}})
|
|
|
|
|
runSyncPod(t, dm, fakeDocker, pod, nil, true)
|
|
|
|
|
// Check if we can retrieve the pod status.
|
|
|
|
|
status, err := dm.GetAPIPodStatus(pod)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatalf("unexpected error %v", err)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if len(status.ContainerStatuses) < 1 {
|
|
|
|
|
t.Errorf("expected 1 container status, got %d", len(status.ContainerStatuses))
|
|
|
|
|
} else {
|
|
|
|
|
state := status.ContainerStatuses[0].State
|
|
|
|
|
if state.Waiting == nil {
|
|
|
|
|
t.Errorf("expected waiting state, got %#v", state)
|
|
|
|
|
} else if state.Waiting.Reason != failureReason {
|
|
|
|
|
t.Errorf("expected reason %q, got %q", failureReason, state.Waiting.Reason)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestGetRestartCount(t *testing.T) {
|
|
|
|
|
dm, fakeDocker := newTestDockerManager()
|
|
|
|
|
containers := []api.Container{
|
|
|
|
|
{Name: "bar"},
|
|
|
|
|
}
|
|
|
|
|
containerName := "bar"
|
|
|
|
|
pod := api.Pod{
|
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
|
UID: "12345678",
|
|
|
|
@ -1350,67 +1091,83 @@ func TestGetRestartCount(t *testing.T) {
|
|
|
|
|
Namespace: "new",
|
|
|
|
|
},
|
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
|
Containers: containers,
|
|
|
|
|
Containers: []api.Container{
|
|
|
|
|
{Name: containerName},
|
|
|
|
|
},
|
|
|
|
|
RestartPolicy: "Always",
|
|
|
|
|
},
|
|
|
|
|
Status: api.PodStatus{
|
|
|
|
|
ContainerStatuses: []api.ContainerStatus{
|
|
|
|
|
{
|
|
|
|
|
Name: containerName,
|
|
|
|
|
RestartCount: 3,
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Helper function for verifying the restart count.
|
|
|
|
|
verifyRestartCount := func(pod *api.Pod, expectedCount int) api.PodStatus {
|
|
|
|
|
verifyRestartCount := func(pod *api.Pod, expectedCount int) {
|
|
|
|
|
runSyncPod(t, dm, fakeDocker, pod, nil, false)
|
|
|
|
|
status, err := dm.GetAPIPodStatus(pod)
|
|
|
|
|
status, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatalf("Unexpected error %v", err)
|
|
|
|
|
}
|
|
|
|
|
restartCount := status.ContainerStatuses[0].RestartCount
|
|
|
|
|
cs := status.FindContainerStatusByName(containerName)
|
|
|
|
|
if cs == nil {
|
|
|
|
|
t.Fatal("Can't find status for container %q", containerName)
|
|
|
|
|
}
|
|
|
|
|
restartCount := cs.RestartCount
|
|
|
|
|
if restartCount != expectedCount {
|
|
|
|
|
t.Errorf("expected %d restart count, got %d", expectedCount, restartCount)
|
|
|
|
|
}
|
|
|
|
|
return *status
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
killOneContainer := func(pod *api.Pod) {
|
|
|
|
|
status, err := dm.GetAPIPodStatus(pod)
|
|
|
|
|
status, err := dm.GetPodStatus(pod.UID, pod.Name, pod.Namespace)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatalf("Unexpected error %v", err)
|
|
|
|
|
}
|
|
|
|
|
containerID := kubecontainer.ParseContainerID(status.ContainerStatuses[0].ContainerID)
|
|
|
|
|
dm.KillContainerInPod(containerID, &pod.Spec.Containers[0], pod, "test container restart count.")
|
|
|
|
|
cs := status.FindContainerStatusByName(containerName)
|
|
|
|
|
if cs == nil {
|
|
|
|
|
t.Fatal("Can't find status for container %q", containerName)
|
|
|
|
|
}
|
|
|
|
|
dm.KillContainerInPod(cs.ID, &pod.Spec.Containers[0], pod, "test container restart count.")
|
|
|
|
|
}
|
|
|
|
|
// Container "bar" starts the first time.
|
|
|
|
|
// TODO: container lists are expected to be sorted reversely by time.
|
|
|
|
|
// We should fix FakeDockerClient to sort the list before returning.
|
|
|
|
|
// (randome-liu) Just partially sorted now.
|
|
|
|
|
pod.Status = verifyRestartCount(&pod, 0)
|
|
|
|
|
verifyRestartCount(&pod, 0)
|
|
|
|
|
killOneContainer(&pod)
|
|
|
|
|
|
|
|
|
|
// Poor container "bar" has been killed, and should be restarted with restart count 1
|
|
|
|
|
pod.Status = verifyRestartCount(&pod, 1)
|
|
|
|
|
verifyRestartCount(&pod, 1)
|
|
|
|
|
killOneContainer(&pod)
|
|
|
|
|
|
|
|
|
|
// Poor container "bar" has been killed again, and should be restarted with restart count 2
|
|
|
|
|
pod.Status = verifyRestartCount(&pod, 2)
|
|
|
|
|
verifyRestartCount(&pod, 2)
|
|
|
|
|
killOneContainer(&pod)
|
|
|
|
|
|
|
|
|
|
// Poor container "bar" has been killed again ang again, and should be restarted with restart count 3
|
|
|
|
|
pod.Status = verifyRestartCount(&pod, 3)
|
|
|
|
|
verifyRestartCount(&pod, 3)
|
|
|
|
|
|
|
|
|
|
// The oldest container has been garbage collected
|
|
|
|
|
exitedContainers := fakeDocker.ExitedContainerList
|
|
|
|
|
fakeDocker.ExitedContainerList = exitedContainers[:len(exitedContainers)-1]
|
|
|
|
|
pod.Status = verifyRestartCount(&pod, 3)
|
|
|
|
|
verifyRestartCount(&pod, 3)
|
|
|
|
|
|
|
|
|
|
// The last two oldest containers have been garbage collected
|
|
|
|
|
fakeDocker.ExitedContainerList = exitedContainers[:len(exitedContainers)-2]
|
|
|
|
|
pod.Status = verifyRestartCount(&pod, 3)
|
|
|
|
|
verifyRestartCount(&pod, 3)
|
|
|
|
|
|
|
|
|
|
// All exited containers have been garbage collected
|
|
|
|
|
// All exited containers have been garbage collected, restart count should be got from old api pod status
|
|
|
|
|
fakeDocker.ExitedContainerList = []docker.APIContainers{}
|
|
|
|
|
pod.Status = verifyRestartCount(&pod, 3)
|
|
|
|
|
verifyRestartCount(&pod, 3)
|
|
|
|
|
killOneContainer(&pod)
|
|
|
|
|
|
|
|
|
|
// Poor container "bar" has been killed again ang again and again, and should be restarted with restart count 4
|
|
|
|
|
pod.Status = verifyRestartCount(&pod, 4)
|
|
|
|
|
verifyRestartCount(&pod, 4)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestGetTerminationMessagePath(t *testing.T) {
|
|
|
|
@ -1681,57 +1438,6 @@ func TestSyncPodWithHostNetwork(t *testing.T) {
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestGetAPIPodStatusSortedContainers(t *testing.T) {
|
|
|
|
|
dm, fakeDocker := newTestDockerManager()
|
|
|
|
|
specContainerList := []api.Container{}
|
|
|
|
|
expectedOrder := []string{}
|
|
|
|
|
|
|
|
|
|
numContainers := 10
|
|
|
|
|
podName := "foo"
|
|
|
|
|
podNs := "test"
|
|
|
|
|
podUID := "uid1"
|
|
|
|
|
fakeConfig := &docker.Config{
|
|
|
|
|
Image: "some:latest",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
dockerContainers := []*docker.Container{}
|
|
|
|
|
for i := 0; i < numContainers; i++ {
|
|
|
|
|
id := fmt.Sprintf("%v", i)
|
|
|
|
|
containerName := fmt.Sprintf("%vcontainer", id)
|
|
|
|
|
expectedOrder = append(expectedOrder, containerName)
|
|
|
|
|
dockerContainers = append(dockerContainers, &docker.Container{
|
|
|
|
|
ID: id,
|
|
|
|
|
Name: fmt.Sprintf("/k8s_%v_%v_%v_%v_42", containerName, podName, podNs, podUID),
|
|
|
|
|
Config: fakeConfig,
|
|
|
|
|
Image: fmt.Sprintf("%vimageid", id),
|
|
|
|
|
})
|
|
|
|
|
specContainerList = append(specContainerList, api.Container{Name: containerName})
|
|
|
|
|
}
|
|
|
|
|
fakeDocker.SetFakeRunningContainers(dockerContainers)
|
|
|
|
|
fakeDocker.ClearCalls()
|
|
|
|
|
pod := &api.Pod{
|
|
|
|
|
ObjectMeta: api.ObjectMeta{
|
|
|
|
|
UID: types.UID(podUID),
|
|
|
|
|
Name: podName,
|
|
|
|
|
Namespace: podNs,
|
|
|
|
|
},
|
|
|
|
|
Spec: api.PodSpec{
|
|
|
|
|
Containers: specContainerList,
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
for i := 0; i < 5; i++ {
|
|
|
|
|
status, err := dm.GetAPIPodStatus(pod)
|
|
|
|
|
if err != nil {
|
|
|
|
|
t.Fatalf("unexpected error %v", err)
|
|
|
|
|
}
|
|
|
|
|
for i, c := range status.ContainerStatuses {
|
|
|
|
|
if expectedOrder[i] != c.Name {
|
|
|
|
|
t.Fatalf("Container status not sorted, expected %v at index %d, but found %v", expectedOrder[i], i, c.Name)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func TestVerifyNonRoot(t *testing.T) {
|
|
|
|
|
dm, fakeDocker := newTestDockerManager()
|
|
|
|
|
|
|
|
|
@ -1893,3 +1599,5 @@ func TestGetIPCMode(t *testing.T) {
|
|
|
|
|
t.Errorf("expected host ipc mode for pod but got %v", ipcMode)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TODO(random-liu): Add unit test for returned PodSyncResult (issue #20478)
|
|
|
|
|