mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
Kubelet: clean up kubelet_test.go
* Start using FakeRuntime to replace FakeDockerClient in unit tests. * Move and adapt docker-specific tests (e.g. creating/deleting infra containers) to manager_test.go in dockertools.
This commit is contained in:
parent
8ea88d58d4
commit
9a71fb9373
@ -48,7 +48,7 @@ func newTestRuntimeCache(getter podsGetter) *testRuntimeCache {
|
||||
func TestGetPods(t *testing.T) {
|
||||
runtime := &FakeRuntime{}
|
||||
expected := []*Pod{{ID: "1111"}, {ID: "2222"}, {ID: "3333"}}
|
||||
runtime.Podlist = expected
|
||||
runtime.PodList = expected
|
||||
cache := newTestRuntimeCache(runtime)
|
||||
actual, err := cache.GetPods()
|
||||
if err != nil {
|
||||
@ -65,12 +65,12 @@ func TestForceUpdateIfOlder(t *testing.T) {
|
||||
|
||||
// Cache old pods.
|
||||
oldpods := []*Pod{{ID: "1111"}}
|
||||
runtime.Podlist = oldpods
|
||||
runtime.PodList = oldpods
|
||||
cache.updateCacheWithLock()
|
||||
|
||||
// Update the runtime to new pods.
|
||||
newpods := []*Pod{{ID: "1111"}, {ID: "2222"}, {ID: "3333"}}
|
||||
runtime.Podlist = newpods
|
||||
runtime.PodList = newpods
|
||||
|
||||
// An older timestamp should not force an update.
|
||||
cache.ForceUpdateIfOlder(time.Now().Add(-20 * time.Minute))
|
||||
@ -100,7 +100,7 @@ func TestUpdatePodsOnlyIfNewer(t *testing.T) {
|
||||
|
||||
// Instruct runime to return a list of old pods.
|
||||
oldpods := []*Pod{{ID: "1111"}}
|
||||
runtime.Podlist = oldpods
|
||||
runtime.PodList = oldpods
|
||||
|
||||
// Try to update the cache; the attempt should not succeed because the
|
||||
// cache timestamp is newer than the current time.
|
||||
|
@ -19,8 +19,12 @@ package dockertools
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -37,12 +41,61 @@ import (
|
||||
"github.com/fsouza/go-dockerclient"
|
||||
)
|
||||
|
||||
type fakeHTTP struct {
|
||||
url string
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeHTTP) Get(url string) (*http.Response, error) {
|
||||
f.url = url
|
||||
return nil, f.err
|
||||
}
|
||||
|
||||
// TODO: Find a better way to mock the runtime hooks so that we don't have to
|
||||
// duplicate the code here.
|
||||
type fakeRuntimeHooks struct {
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
||||
var _ kubecontainer.RuntimeHooks = &fakeRuntimeHooks{}
|
||||
|
||||
func newFakeRuntimeHooks(recorder record.EventRecorder) kubecontainer.RuntimeHooks {
|
||||
return &fakeRuntimeHooks{
|
||||
recorder: recorder,
|
||||
}
|
||||
}
|
||||
|
||||
func (fr *fakeRuntimeHooks) ShouldPullImage(pod *api.Pod, container *api.Container, imagePresent bool) bool {
|
||||
if container.ImagePullPolicy == api.PullNever {
|
||||
return false
|
||||
}
|
||||
if container.ImagePullPolicy == api.PullAlways ||
|
||||
(container.ImagePullPolicy == api.PullIfNotPresent && (!imagePresent)) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (fr *fakeRuntimeHooks) ReportImagePull(pod *api.Pod, container *api.Container, pullError error) {
|
||||
}
|
||||
|
||||
type fakeOptionGenerator struct{}
|
||||
|
||||
var _ kubecontainer.RunContainerOptionsGenerator = &fakeOptionGenerator{}
|
||||
|
||||
func (*fakeOptionGenerator) GenerateRunContainerOptions(pod *api.Pod, container *api.Container) (*kubecontainer.RunContainerOptions, error) {
|
||||
return &kubecontainer.RunContainerOptions{}, nil
|
||||
}
|
||||
|
||||
func newTestDockerManager() (*DockerManager, *FakeDockerClient) {
|
||||
fakeDocker := &FakeDockerClient{VersionInfo: docker.Env{"Version=1.1.3", "ApiVersion=1.15"}, Errors: make(map[string]error), RemovedImages: util.StringSet{}}
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
readinessManager := kubecontainer.NewReadinessManager()
|
||||
containerRefManager := kubecontainer.NewRefManager()
|
||||
networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
|
||||
runtimeHooks := newFakeRuntimeHooks(fakeRecorder)
|
||||
optionGenerator := &fakeOptionGenerator{}
|
||||
dockerManager := NewFakeDockerManager(
|
||||
fakeDocker,
|
||||
fakeRecorder,
|
||||
@ -52,13 +105,21 @@ func newTestDockerManager() (*DockerManager, *FakeDockerClient) {
|
||||
0, 0, "",
|
||||
kubecontainer.FakeOS{},
|
||||
networkPlugin,
|
||||
nil,
|
||||
nil,
|
||||
nil)
|
||||
optionGenerator,
|
||||
&fakeHTTP{},
|
||||
runtimeHooks)
|
||||
|
||||
return dockerManager, fakeDocker
|
||||
}
|
||||
|
||||
func matchString(t *testing.T, pattern, str string) bool {
|
||||
match, err := regexp.MatchString(pattern, str)
|
||||
if err != nil {
|
||||
t.Logf("unexpected error: %v", err)
|
||||
}
|
||||
return match
|
||||
}
|
||||
|
||||
func TestSetEntrypointAndCommand(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
@ -699,3 +760,440 @@ func TestIsAExitError(t *testing.T) {
|
||||
t.Error("couldn't cast dockerExitError to exec.ExitError")
|
||||
}
|
||||
}
|
||||
|
||||
func generatePodInfraContainerHash(pod *api.Pod) uint64 {
|
||||
var ports []api.ContainerPort
|
||||
if !pod.Spec.HostNetwork {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
ports = append(ports, container.Ports...)
|
||||
}
|
||||
}
|
||||
|
||||
container := &api.Container{
|
||||
Name: PodInfraContainerName,
|
||||
Image: PodInfraContainerImage,
|
||||
Ports: ports,
|
||||
}
|
||||
return kubecontainer.HashContainer(container)
|
||||
}
|
||||
|
||||
// runSyncPod is a helper function to retrieve the running pods from the fake
|
||||
// docker client and runs SyncPod for the given pod.
|
||||
func runSyncPod(t *testing.T, dm *DockerManager, fakeDocker *FakeDockerClient, pod *api.Pod) {
|
||||
runningPods, err := dm.GetPods(false)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
runningPod := kubecontainer.Pods(runningPods).FindPodByID(pod.UID)
|
||||
fakeDocker.ClearCalls()
|
||||
err = dm.SyncPod(pod, runningPod, api.PodStatus{}, []api.Secret{})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncPodCreateNetAndContainer(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManager()
|
||||
dm.PodInfraContainerImage = "custom_image_name"
|
||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "bar"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod)
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
|
||||
found := false
|
||||
for _, c := range fakeDocker.ContainerList {
|
||||
if c.Image == "custom_image_name" && strings.HasPrefix(c.Names[0], "/k8s_POD") {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Custom pod infra container not found: %v", fakeDocker.ContainerList)
|
||||
}
|
||||
|
||||
if len(fakeDocker.Created) != 2 ||
|
||||
!matchString(t, "k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) ||
|
||||
!matchString(t, "k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) {
|
||||
t.Errorf("Unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
}
|
||||
|
||||
func TestSyncPodCreatesNetAndContainerPullsImage(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManager()
|
||||
dm.PodInfraContainerImage = "custom_image_name"
|
||||
puller := dm.Puller.(*FakeDockerPuller)
|
||||
puller.HasImages = []string{}
|
||||
dm.PodInfraContainerImage = "custom_image_name"
|
||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "bar", Image: "something", ImagePullPolicy: "IfNotPresent"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
|
||||
if !reflect.DeepEqual(puller.ImagesPulled, []string{"custom_image_name", "something"}) {
|
||||
t.Errorf("Unexpected pulled containers: %v", puller.ImagesPulled)
|
||||
}
|
||||
|
||||
if len(fakeDocker.Created) != 2 ||
|
||||
!matchString(t, "k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) ||
|
||||
!matchString(t, "k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) {
|
||||
t.Errorf("Unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
}
|
||||
|
||||
func TestSyncPodWithPodInfraCreatesContainer(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManager()
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "bar"},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// pod infra container
|
||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0"},
|
||||
ID: "9876",
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerMap = map[string]*docker.Container{
|
||||
"9876": {
|
||||
ID: "9876",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
}
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Inspect pod infra container (but does not create)"
|
||||
"inspect_container",
|
||||
// Create container.
|
||||
"create", "start",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
if len(fakeDocker.Created) != 1 ||
|
||||
!matchString(t, "k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) {
|
||||
t.Errorf("Unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
}
|
||||
|
||||
func TestSyncPodDeletesWithNoPodInfraContainer(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManager()
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo1",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "bar1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// format is // k8s_<container-id>_<pod-fullname>_<pod-uid>
|
||||
Names: []string{"/k8s_bar1_foo1_new_12345678_0"},
|
||||
ID: "1234",
|
||||
},
|
||||
}
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Kill the container since pod infra container is not running.
|
||||
"inspect_container", "stop",
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start",
|
||||
})
|
||||
|
||||
// A map iteration is used to delete containers, so must not depend on
|
||||
// order here.
|
||||
expectedToStop := map[string]bool{
|
||||
"1234": true,
|
||||
}
|
||||
fakeDocker.Lock()
|
||||
if len(fakeDocker.Stopped) != 1 || !expectedToStop[fakeDocker.Stopped[0]] {
|
||||
t.Errorf("Wrong containers were stopped: %v", fakeDocker.Stopped)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
}
|
||||
|
||||
func TestSyncPodDeletesDuplicate(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManager()
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "bar",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "foo"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// the k8s prefix is required for the kubelet to manage the container
|
||||
Names: []string{"/k8s_foo_bar_new_12345678_1111"},
|
||||
ID: "1234",
|
||||
},
|
||||
{
|
||||
// pod infra container
|
||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_bar_new_12345678_2222"},
|
||||
ID: "9876",
|
||||
},
|
||||
{
|
||||
// Duplicate for the same container.
|
||||
Names: []string{"/k8s_foo_bar_new_12345678_3333"},
|
||||
ID: "4567",
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerMap = map[string]*docker.Container{
|
||||
"1234": {
|
||||
ID: "1234",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
"9876": {
|
||||
ID: "9876",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
"4567": {
|
||||
ID: "4567",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
}
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Kill the duplicated container.
|
||||
"inspect_container", "stop",
|
||||
})
|
||||
// Expect one of the duplicates to be killed.
|
||||
if len(fakeDocker.Stopped) != 1 || (fakeDocker.Stopped[0] != "1234" && fakeDocker.Stopped[0] != "4567") {
|
||||
t.Errorf("Wrong containers were stopped: %v", fakeDocker.Stopped)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncPodBadHash(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManager()
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "bar"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// the k8s prefix is required for the kubelet to manage the container
|
||||
Names: []string{"/k8s_bar.1234_foo_new_12345678_42"},
|
||||
ID: "1234",
|
||||
},
|
||||
{
|
||||
// pod infra container
|
||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_42"},
|
||||
ID: "9876",
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerMap = map[string]*docker.Container{
|
||||
"1234": {
|
||||
ID: "1234",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
"9876": {
|
||||
ID: "9876",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
}
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Kill and restart the bad hash container.
|
||||
"inspect_container", "stop", "create", "start",
|
||||
})
|
||||
|
||||
if err := fakeDocker.AssertStopped([]string{"1234"}); err != nil {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncPodsUnhealthy(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManager()
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "bar",
|
||||
LivenessProbe: &api.Probe{
|
||||
// Always returns healthy == false
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// the k8s prefix is required for the kubelet to manage the container
|
||||
Names: []string{"/k8s_bar_foo_new_12345678_42"},
|
||||
ID: "1234",
|
||||
},
|
||||
{
|
||||
// pod infra container
|
||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_42"},
|
||||
ID: "9876",
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerMap = map[string]*docker.Container{
|
||||
"1234": {
|
||||
ID: "1234",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
"9876": {
|
||||
ID: "9876",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
}
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Kill the unhealthy container.
|
||||
"inspect_container", "stop",
|
||||
// Restart the unhealthy container.
|
||||
"create", "start",
|
||||
})
|
||||
|
||||
if err := fakeDocker.AssertStopped([]string{"1234"}); err != nil {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncPodsDoesNothing(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManager()
|
||||
container := api.Container{Name: "bar"}
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
container,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// format is // k8s_<container-id>_<pod-fullname>_<pod-uid>_<random>
|
||||
Names: []string{"/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainer(&container), 16) + "_foo_new_12345678_0"},
|
||||
ID: "1234",
|
||||
},
|
||||
{
|
||||
// pod infra container
|
||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pod), 16) + "_foo_new_12345678_0"},
|
||||
ID: "9876",
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerMap = map[string]*docker.Container{
|
||||
"1234": {
|
||||
ID: "1234",
|
||||
HostConfig: &docker.HostConfig{},
|
||||
Config: &docker.Config{},
|
||||
},
|
||||
"9876": {
|
||||
ID: "9876",
|
||||
HostConfig: &docker.HostConfig{},
|
||||
Config: &docker.Config{},
|
||||
},
|
||||
}
|
||||
|
||||
runSyncPod(t, dm, fakeDocker, pod)
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
// Check the pod infra contianer.
|
||||
"inspect_container",
|
||||
})
|
||||
}
|
||||
|
@ -61,6 +61,8 @@ func init() {
|
||||
util.ReallyCrash = true
|
||||
}
|
||||
|
||||
// TODO: Depcreate this in favor of TestKubeletWithFakeRuntime after all
|
||||
// Docker-specific tests have been moved to dockertools.
|
||||
type TestKubelet struct {
|
||||
kubelet *Kubelet
|
||||
fakeDocker *dockertools.FakeDockerClient
|
||||
@ -173,6 +175,67 @@ func verifyBoolean(t *testing.T, expected, value bool) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Rename this to TestKubelet after TestKubelet is removed.
|
||||
type TestKubeletWithFakeRuntime struct {
|
||||
kubelet *Kubelet
|
||||
fakeRuntime *kubecontainer.FakeRuntime
|
||||
fakeCadvisor *cadvisor.Mock
|
||||
fakeKubeClient *testclient.Fake
|
||||
fakeMirrorClient *fakeMirrorClient
|
||||
}
|
||||
|
||||
func newTestKubeletWithFakeRuntime(t *testing.T) *TestKubeletWithFakeRuntime {
|
||||
fakeRuntime := &kubecontainer.FakeRuntime{}
|
||||
fakeRuntime.VersionInfo = "1.15"
|
||||
fakeRecorder := &record.FakeRecorder{}
|
||||
fakeKubeClient := &testclient.Fake{}
|
||||
kubelet := &Kubelet{}
|
||||
kubelet.kubeClient = fakeKubeClient
|
||||
kubelet.os = kubecontainer.FakeOS{}
|
||||
|
||||
kubelet.hostname = testKubeletHostname
|
||||
kubelet.runtimeUpThreshold = maxWaitForContainerRuntime
|
||||
kubelet.networkPlugin, _ = network.InitNetworkPlugin([]network.NetworkPlugin{}, "", network.NewFakeHost(nil))
|
||||
if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil {
|
||||
t.Fatalf("can't make a temp rootdir: %v", err)
|
||||
} else {
|
||||
kubelet.rootDirectory = tempDir
|
||||
}
|
||||
if err := os.MkdirAll(kubelet.rootDirectory, 0750); err != nil {
|
||||
t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err)
|
||||
}
|
||||
kubelet.sourcesReady = func() bool { return true }
|
||||
kubelet.masterServiceNamespace = api.NamespaceDefault
|
||||
kubelet.serviceLister = testServiceLister{}
|
||||
kubelet.nodeLister = testNodeLister{}
|
||||
kubelet.readinessManager = kubecontainer.NewReadinessManager()
|
||||
kubelet.recorder = fakeRecorder
|
||||
kubelet.statusManager = newStatusManager(fakeKubeClient)
|
||||
if err := kubelet.setupDataDirs(); err != nil {
|
||||
t.Fatalf("can't initialize kubelet data dirs: %v", err)
|
||||
}
|
||||
mockCadvisor := &cadvisor.Mock{}
|
||||
kubelet.cadvisor = mockCadvisor
|
||||
podManager, fakeMirrorClient := newFakePodManager()
|
||||
kubelet.podManager = podManager
|
||||
kubelet.containerRefManager = kubecontainer.NewRefManager()
|
||||
diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{})
|
||||
if err != nil {
|
||||
t.Fatalf("can't initialize disk space manager: %v", err)
|
||||
}
|
||||
kubelet.diskSpaceManager = diskSpaceManager
|
||||
|
||||
kubelet.containerRuntime = fakeRuntime
|
||||
kubelet.runtimeCache = kubecontainer.NewFakeRuntimeCache(kubelet.containerRuntime)
|
||||
kubelet.podWorkers = &fakePodWorkers{
|
||||
syncPodFn: kubelet.syncPod,
|
||||
runtimeCache: kubelet.runtimeCache,
|
||||
t: t,
|
||||
}
|
||||
kubelet.volumeManager = newVolumeManager()
|
||||
return &TestKubeletWithFakeRuntime{kubelet, fakeRuntime, mockCadvisor, fakeKubeClient, fakeMirrorClient}
|
||||
}
|
||||
|
||||
func newTestPods(count int) []*api.Pod {
|
||||
pods := make([]*api.Pod, count)
|
||||
for i := 0; i < count; i++ {
|
||||
@ -354,6 +417,8 @@ func apiContainerToContainer(c docker.APIContainers) container.Container {
|
||||
|
||||
var emptyPodUIDs map[types.UID]metrics.SyncPodType
|
||||
|
||||
// TODO: Remove this function after all docker-specifc tests have been migrated
|
||||
// to dockertools.
|
||||
func generatePodInfraContainerHash(pod *api.Pod) uint64 {
|
||||
var ports []api.ContainerPort
|
||||
if !pod.Spec.HostNetwork {
|
||||
@ -370,73 +435,6 @@ func generatePodInfraContainerHash(pod *api.Pod) uint64 {
|
||||
return kubecontainer.HashContainer(container)
|
||||
}
|
||||
|
||||
func TestSyncPodsDoesNothing(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
fakeDocker := testKubelet.fakeDocker
|
||||
|
||||
container := api.Container{Name: "bar"}
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
container,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// format is // k8s_<container-id>_<pod-fullname>_<pod-uid>_<random>
|
||||
Names: []string{"/k8s_bar." + strconv.FormatUint(kubecontainer.HashContainer(&container), 16) + "_foo_new_12345678_0"},
|
||||
ID: "1234",
|
||||
},
|
||||
{
|
||||
// pod infra container
|
||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"},
|
||||
ID: "9876",
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerMap = map[string]*docker.Container{
|
||||
"1234": {
|
||||
ID: "1234",
|
||||
HostConfig: &docker.HostConfig{},
|
||||
Config: &docker.Config{},
|
||||
},
|
||||
"9876": {
|
||||
ID: "9876",
|
||||
HostConfig: &docker.HostConfig{},
|
||||
Config: &docker.Config{},
|
||||
},
|
||||
}
|
||||
|
||||
kubelet.podManager.SetPods(pods)
|
||||
err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now())
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
"list", "list",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container",
|
||||
// Check the pod infra contianer.
|
||||
"inspect_container",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container",
|
||||
// Get pods for deleting orphaned volumes.
|
||||
"list",
|
||||
})
|
||||
}
|
||||
|
||||
func TestSyncPodsWithTerminationLog(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||
@ -501,18 +499,13 @@ func matchString(t *testing.T, pattern, str string) bool {
|
||||
return match
|
||||
}
|
||||
|
||||
func TestSyncPodsCreatesNetAndContainer(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
func TestSyncPodsStartPod(t *testing.T) {
|
||||
testKubelet := newTestKubeletWithFakeRuntime(t)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
fakeDocker := testKubelet.fakeDocker
|
||||
// TODO: Move this test to dockertools so that we don't have to do the hacky
|
||||
// type assertion here.
|
||||
dm := kubelet.containerRuntime.(*dockertools.DockerManager)
|
||||
dm.PodInfraContainerImage = "custom_image_name"
|
||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
@ -532,164 +525,7 @@ func TestSyncPodsCreatesNetAndContainer(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
"list", "list",
|
||||
// Get pod status.
|
||||
"list", "inspect_image",
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container",
|
||||
// Get pods for deleting orphaned volumes.
|
||||
"list",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
|
||||
found := false
|
||||
for _, c := range fakeDocker.ContainerList {
|
||||
if c.Image == "custom_image_name" && strings.HasPrefix(c.Names[0], "/k8s_POD") {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
t.Errorf("Custom pod infra container not found: %v", fakeDocker.ContainerList)
|
||||
}
|
||||
|
||||
if len(fakeDocker.Created) != 2 ||
|
||||
!matchString(t, "k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) ||
|
||||
!matchString(t, "k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) {
|
||||
t.Errorf("Unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
}
|
||||
|
||||
func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
fakeDocker := testKubelet.fakeDocker
|
||||
// TODO: Move this test to dockertools so that we don't have to do the hacky
|
||||
// type assertion here.
|
||||
dm := kubelet.containerRuntime.(*dockertools.DockerManager)
|
||||
puller := dm.Puller.(*dockertools.FakeDockerPuller)
|
||||
puller.HasImages = []string{}
|
||||
dm.PodInfraContainerImage = "custom_image_name"
|
||||
fakeDocker.ContainerList = []docker.APIContainers{}
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "bar", Image: "something", ImagePullPolicy: "IfNotPresent"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
kubelet.podManager.SetPods(pods)
|
||||
err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now())
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
"list", "list",
|
||||
// Get pod status.
|
||||
"list", "inspect_image",
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container",
|
||||
// Get pods for deleting orphaned volumes.
|
||||
"list",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
|
||||
if !reflect.DeepEqual(puller.ImagesPulled, []string{"custom_image_name", "something"}) {
|
||||
t.Errorf("Unexpected pulled containers: %v", puller.ImagesPulled)
|
||||
}
|
||||
|
||||
if len(fakeDocker.Created) != 2 ||
|
||||
!matchString(t, "k8s_POD\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) ||
|
||||
!matchString(t, "k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[1]) {
|
||||
t.Errorf("Unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
}
|
||||
|
||||
func TestSyncPodsWithPodInfraCreatesContainer(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
fakeDocker := testKubelet.fakeDocker
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// pod infra container
|
||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_0"},
|
||||
ID: "9876",
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerMap = map[string]*docker.Container{
|
||||
"9876": {
|
||||
ID: "9876",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
}
|
||||
kubelet.podManager.SetPods(pods)
|
||||
err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now())
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
"list", "list",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_image",
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Create container.
|
||||
"create", "start",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container",
|
||||
// Get pods for deleting orphaned volumes.
|
||||
"list",
|
||||
})
|
||||
|
||||
fakeDocker.Lock()
|
||||
if len(fakeDocker.Created) != 1 ||
|
||||
!matchString(t, "k8s_bar\\.[a-f0-9]+_foo_new_", fakeDocker.Created[0]) {
|
||||
t.Errorf("Unexpected containers created %v", fakeDocker.Created)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
fakeRuntime.AssertStartedPods([]string{string(pods[0].UID)})
|
||||
}
|
||||
|
||||
type fakeHTTP struct {
|
||||
@ -786,168 +622,33 @@ func TestSyncPodsWithPodInfraCreatesContainerCallsHandler(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncPodsDeletesWithNoPodInfraContainer(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
fakeDocker := testKubelet.fakeDocker
|
||||
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo1",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "bar1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "87654321",
|
||||
Name: "foo2",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "bar2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// format is // k8s_<container-id>_<pod-fullname>_<pod-uid>
|
||||
Names: []string{"/k8s_bar1_foo1_new_12345678_0"},
|
||||
ID: "1234",
|
||||
},
|
||||
{
|
||||
// format is // k8s_<container-id>_<pod-fullname>_<pod-uid>
|
||||
Names: []string{"/k8s_bar2_foo2_new_87654321_0"},
|
||||
ID: "5678",
|
||||
},
|
||||
{
|
||||
// format is // k8s_<container-id>_<pod-fullname>_<pod-uid>
|
||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo2_new_87654321_0"},
|
||||
ID: "8765",
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerMap = map[string]*docker.Container{
|
||||
"1234": {
|
||||
ID: "1234",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
"5678": {
|
||||
ID: "5678",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
"8765": {
|
||||
ID: "8765",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
}
|
||||
|
||||
kubelet.podManager.SetPods(pods)
|
||||
err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now())
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
"list",
|
||||
|
||||
// foo1
|
||||
"list",
|
||||
// Get pod status.
|
||||
"list", "inspect_container",
|
||||
// Kill the container since pod infra container is not running.
|
||||
"inspect_container", "stop",
|
||||
// Create pod infra container.
|
||||
"create", "start", "inspect_container",
|
||||
// Create container.
|
||||
"create", "start",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container", "inspect_container",
|
||||
|
||||
// foo2
|
||||
"list",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container",
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container",
|
||||
|
||||
// Get pods for deleting orphaned volumes.
|
||||
"list",
|
||||
})
|
||||
|
||||
// A map iteration is used to delete containers, so must not depend on
|
||||
// order here.
|
||||
expectedToStop := map[string]bool{
|
||||
"1234": true,
|
||||
}
|
||||
fakeDocker.Lock()
|
||||
if len(fakeDocker.Stopped) != 1 || !expectedToStop[fakeDocker.Stopped[0]] {
|
||||
t.Errorf("Wrong containers were stopped: %v", fakeDocker.Stopped)
|
||||
}
|
||||
fakeDocker.Unlock()
|
||||
}
|
||||
|
||||
func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
|
||||
ready := false
|
||||
testKubelet := newTestKubelet(t)
|
||||
|
||||
testKubelet := newTestKubeletWithFakeRuntime(t)
|
||||
fakeRuntime := testKubelet.fakeRuntime
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
fakeDocker := testKubelet.fakeDocker
|
||||
kubelet.sourcesReady = func() bool { return ready }
|
||||
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// the k8s prefix is required for the kubelet to manage the container
|
||||
Names: []string{"/k8s_foo_bar_new_12345678_42"},
|
||||
ID: "1234",
|
||||
},
|
||||
{
|
||||
// pod infra container
|
||||
Names: []string{"/k8s_POD_foo_new_12345678_42"},
|
||||
ID: "9876",
|
||||
},
|
||||
fakeRuntime.PodList = []*kubecontainer.Pod{
|
||||
{ID: "12345678", Name: "foo", Namespace: "new", Containers: []*kubecontainer.Container{{Name: "bar"}}},
|
||||
}
|
||||
if err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now()); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
// Validate nothing happened.
|
||||
verifyCalls(t, fakeDocker, []string{"list"})
|
||||
fakeDocker.ClearCalls()
|
||||
// Sources are not ready yet. Don't remove any pods.
|
||||
fakeRuntime.AssertKilledPods([]string{})
|
||||
|
||||
ready = true
|
||||
if err := kubelet.SyncPods([]*api.Pod{}, emptyPodUIDs, map[string]*api.Pod{}, time.Now()); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
verifyCalls(t, fakeDocker, []string{"list", "inspect_container", "stop", "inspect_container", "stop", "list"})
|
||||
|
||||
// A map iteration is used to delete containers, so must not depend on
|
||||
// order here.
|
||||
expectedToStop := map[string]bool{
|
||||
"1234": true,
|
||||
"9876": true,
|
||||
}
|
||||
if len(fakeDocker.Stopped) != 2 ||
|
||||
!expectedToStop[fakeDocker.Stopped[0]] ||
|
||||
!expectedToStop[fakeDocker.Stopped[1]] {
|
||||
t.Errorf("Wrong containers were stopped: %v", fakeDocker.Stopped)
|
||||
}
|
||||
// Sources are ready. Remove unwanted pods.
|
||||
fakeRuntime.AssertKilledPods([]string{"12345678"})
|
||||
}
|
||||
|
||||
func TestSyncPodsDeletes(t *testing.T) {
|
||||
@ -993,240 +694,6 @@ func TestSyncPodsDeletes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncPodsDeletesDuplicate(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
fakeDocker := testKubelet.fakeDocker
|
||||
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "bar",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "foo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// the k8s prefix is required for the kubelet to manage the container
|
||||
Names: []string{"/k8s_foo_bar_new_12345678_1111"},
|
||||
ID: "1234",
|
||||
},
|
||||
{
|
||||
// pod infra container
|
||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_bar_new_12345678_2222"},
|
||||
ID: "9876",
|
||||
},
|
||||
{
|
||||
// Duplicate for the same container.
|
||||
Names: []string{"/k8s_foo_bar_new_12345678_3333"},
|
||||
ID: "4567",
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerMap = map[string]*docker.Container{
|
||||
"1234": {
|
||||
ID: "1234",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
"9876": {
|
||||
ID: "9876",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
"4567": {
|
||||
ID: "4567",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
}
|
||||
|
||||
kubelet.podManager.SetPods(pods)
|
||||
err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now())
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
"list", "list",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container", "inspect_container",
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Kill the duplicated container.
|
||||
"inspect_container", "stop",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container", "inspect_container",
|
||||
// Get pods for deleting orphaned volumes.
|
||||
"list",
|
||||
})
|
||||
// Expect one of the duplicates to be killed.
|
||||
if len(fakeDocker.Stopped) != 1 || (fakeDocker.Stopped[0] != "1234" && fakeDocker.Stopped[0] != "4567") {
|
||||
t.Errorf("Wrong containers were stopped: %v", fakeDocker.Stopped)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncPodsBadHash(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
fakeDocker := testKubelet.fakeDocker
|
||||
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// the k8s prefix is required for the kubelet to manage the container
|
||||
Names: []string{"/k8s_bar.1234_foo_new_12345678_42"},
|
||||
ID: "1234",
|
||||
},
|
||||
{
|
||||
// pod infra container
|
||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_42"},
|
||||
ID: "9876",
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerMap = map[string]*docker.Container{
|
||||
"1234": {
|
||||
ID: "1234",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
"9876": {
|
||||
ID: "9876",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
}
|
||||
|
||||
kubelet.podManager.SetPods(pods)
|
||||
err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now())
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
"list", "list",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container",
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Kill and restart the bad hash container.
|
||||
"inspect_container", "stop", "create", "start",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container", "inspect_container",
|
||||
// Get pods for deleting orphaned volumes.
|
||||
"list",
|
||||
})
|
||||
|
||||
if err := fakeDocker.AssertStopped([]string{"1234"}); err != nil {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncPodsUnhealthy(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorApi.MachineInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorApiv2.FsInfo{}, nil)
|
||||
kubelet := testKubelet.kubelet
|
||||
fakeDocker := testKubelet.fakeDocker
|
||||
|
||||
pods := []*api.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "bar",
|
||||
LivenessProbe: &api.Probe{
|
||||
// Always returns healthy == false
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
fakeDocker.ContainerList = []docker.APIContainers{
|
||||
{
|
||||
// the k8s prefix is required for the kubelet to manage the container
|
||||
Names: []string{"/k8s_bar_foo_new_12345678_42"},
|
||||
ID: "1234",
|
||||
},
|
||||
{
|
||||
// pod infra container
|
||||
Names: []string{"/k8s_POD." + strconv.FormatUint(generatePodInfraContainerHash(pods[0]), 16) + "_foo_new_12345678_42"},
|
||||
ID: "9876",
|
||||
},
|
||||
}
|
||||
fakeDocker.ContainerMap = map[string]*docker.Container{
|
||||
"1234": {
|
||||
ID: "1234",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
"9876": {
|
||||
ID: "9876",
|
||||
Config: &docker.Config{},
|
||||
HostConfig: &docker.HostConfig{},
|
||||
},
|
||||
}
|
||||
kubelet.podManager.SetPods(pods)
|
||||
err := kubelet.SyncPods(pods, emptyPodUIDs, map[string]*api.Pod{}, time.Now())
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
verifyCalls(t, fakeDocker, []string{
|
||||
"list", "list",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container",
|
||||
// Check the pod infra container.
|
||||
"inspect_container",
|
||||
// Kill the unhealthy container.
|
||||
"inspect_container", "stop",
|
||||
// Restart the unhealthy container.
|
||||
"create", "start",
|
||||
// Get pod status.
|
||||
"list", "inspect_container", "inspect_container", "inspect_container",
|
||||
// Get pods for deleting orphaned volumes.
|
||||
"list",
|
||||
})
|
||||
|
||||
if err := fakeDocker.AssertStopped([]string{"1234"}); err != nil {
|
||||
t.Errorf("%v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMountExternalVolumes(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
kubelet := testKubelet.kubelet
|
||||
|
Loading…
Reference in New Issue
Block a user