Merge pull request #43884 from yujuhong/nuke-docker-manager

Automatic merge from submit-queue

Remove DockerManager from kubelet

This commit deletes code in dockertools that is only used by
DockerManager. A follow-up change will rename and clean up the rest of
the files in this package.

The commit also sets EnableCRI to true if the container runtime is not
rkt. A follow-up change will remove the flag/field and all references to
it.
This commit is contained in:
Kubernetes Submit Queue 2017-05-01 15:27:51 -07:00 committed by GitHub
commit 0b8b4033cd
20 changed files with 145 additions and 7207 deletions

View File

@ -101,7 +101,6 @@ go_library(
"//pkg/util/mount:go_default_library",
"//pkg/util/node:go_default_library",
"//pkg/util/oom:go_default_library",
"//pkg/util/procfs:go_default_library",
"//pkg/util/removeall:go_default_library",
"//pkg/version:go_default_library",
"//pkg/volume:go_default_library",

View File

@ -11,53 +11,26 @@ load(
go_library(
name = "go_default_library",
srcs = [
"container_gc.go",
"convert.go",
"docker.go",
"docker_manager.go",
"docker_manager_linux.go",
"exec.go",
"fake_docker_client.go",
"fake_manager.go",
"images.go",
"instrumented_docker.go",
"kube_docker_client.go",
"labels.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/api/v1/helper:go_default_library",
"//pkg/client/unversioned/remotecommand:go_default_library",
"//pkg/credentialprovider:go_default_library",
"//pkg/kubelet/cm:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/custommetrics:go_default_library",
"//pkg/kubelet/dockertools/securitycontext:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/kubelet/images:go_default_library",
"//pkg/kubelet/leaky:go_default_library",
"//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/metrics:go_default_library",
"//pkg/kubelet/network:go_default_library",
"//pkg/kubelet/network/hairpin:go_default_library",
"//pkg/kubelet/prober/results:go_default_library",
"//pkg/kubelet/qos:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/cache:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/securitycontext:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/oom:go_default_library",
"//pkg/util/procfs:go_default_library",
"//pkg/util/selinux:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/util/tail:go_default_library",
"//pkg/util/term:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/github.com/armon/circbuf:go_default_library",
"//vendor/github.com/docker/distribution/digest:go_default_library",
"//vendor/github.com/docker/distribution/reference:go_default_library",
"//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library",
@ -65,36 +38,20 @@ go_library(
"//vendor/github.com/docker/engine-api/client:go_default_library",
"//vendor/github.com/docker/engine-api/types:go_default_library",
"//vendor/github.com/docker/engine-api/types/container:go_default_library",
"//vendor/github.com/docker/engine-api/types/strslice:go_default_library",
"//vendor/github.com/docker/engine-api/types/versions:go_default_library",
"//vendor/github.com/docker/go-connections/nat:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
"//vendor/golang.org/x/net/context:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/clock:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"container_gc_test.go",
"convert_test.go",
"docker_manager_linux_test.go",
"docker_manager_test.go",
"docker_test.go",
"images_test.go",
"kube_docker_client_test.go",
"labels_test.go",
],
data = [
"fixtures/seccomp/sub/subtest",
@ -105,41 +62,14 @@ go_test(
"automanaged",
],
deps = [
"//pkg/api/testapi:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/apis/componentconfig:go_default_library",
"//pkg/credentialprovider:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/container/testing:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/kubelet/images:go_default_library",
"//pkg/kubelet/network:go_default_library",
"//pkg/kubelet/network/testing:go_default_library",
"//pkg/kubelet/prober/results:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/hash:go_default_library",
"//pkg/util/strings:go_default_library",
"//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library",
"//vendor/github.com/docker/engine-api/types:go_default_library",
"//vendor/github.com/docker/engine-api/types/container:go_default_library",
"//vendor/github.com/docker/engine-api/types/strslice:go_default_library",
"//vendor/github.com/docker/go-connections/nat:go_default_library",
"//vendor/github.com/golang/mock/gomock:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/equality:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/clock:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
],
)

View File

@ -1,340 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"os"
"path"
"path/filepath"
"sort"
"time"
dockertypes "github.com/docker/engine-api/types"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
knetwork "k8s.io/kubernetes/pkg/kubelet/network"
)
type containerGC struct {
client DockerInterface
podGetter podGetter
network *knetwork.PluginManager
containerLogsDir string
}
func NewContainerGC(client DockerInterface, podGetter podGetter, network *knetwork.PluginManager, containerLogsDir string) *containerGC {
return &containerGC{
client: client,
podGetter: podGetter,
network: network,
containerLogsDir: containerLogsDir,
}
}
// Internal information kept for containers being considered for GC.
type containerGCInfo struct {
// Docker ID of the container.
id string
// Docker name of the container.
dockerName string
// Creation time for the container.
createTime time.Time
// Full pod name, including namespace in the format `namespace_podName`.
// This comes from dockertools.ParseDockerName(...)
podNameWithNamespace string
// Kubernetes pod UID
podUID types.UID
// Container name in pod
containerName string
// Container network mode
isHostNetwork bool
}
// Containers are considered for eviction as units of (UID, container name) pair.
type evictUnit struct {
// UID of the pod.
uid types.UID
// Name of the container in the pod.
name string
}
type containersByEvictUnit map[evictUnit][]containerGCInfo
// Returns the number of containers in this map.
func (cu containersByEvictUnit) NumContainers() int {
num := 0
for key := range cu {
num += len(cu[key])
}
return num
}
// Returns the number of pod in this map.
func (cu containersByEvictUnit) NumEvictUnits() int {
return len(cu)
}
// Newest first.
type byCreated []containerGCInfo
func (a byCreated) Len() int { return len(a) }
func (a byCreated) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a byCreated) Less(i, j int) bool { return a[i].createTime.After(a[j].createTime) }
func (cgc *containerGC) enforceMaxContainersPerEvictUnit(evictUnits containersByEvictUnit, MaxContainers int) {
for uid := range evictUnits {
toRemove := len(evictUnits[uid]) - MaxContainers
if toRemove > 0 {
evictUnits[uid] = cgc.removeOldestN(evictUnits[uid], toRemove)
}
}
}
// Removes the oldest toRemove containers and returns the resulting slice.
func (cgc *containerGC) removeOldestN(containers []containerGCInfo, toRemove int) []containerGCInfo {
// Remove from oldest to newest (last to first).
numToKeep := len(containers) - toRemove
for i := numToKeep; i < len(containers); i++ {
cgc.removeContainer(containers[i])
}
// Assume we removed the containers so that we're not too aggressive.
return containers[:numToKeep]
}
// Returns a full GC info structure on success, or a partial one on failure
func newContainerGCInfo(id string, inspectResult *dockertypes.ContainerJSON, created time.Time) (containerGCInfo, error) {
containerName, _, err := ParseDockerName(inspectResult.Name)
if err != nil {
return containerGCInfo{
id: id,
dockerName: inspectResult.Name,
}, fmt.Errorf("failed to parse docker name %q: %v", inspectResult.Name, err)
}
networkMode := getDockerNetworkMode(inspectResult)
return containerGCInfo{
id: id,
dockerName: inspectResult.Name,
podNameWithNamespace: containerName.PodFullName,
podUID: containerName.PodUID,
containerName: containerName.ContainerName,
createTime: created,
isHostNetwork: networkMode == namespaceModeHost,
}, nil
}
// Get all containers that are evictable. Evictable containers are: not running
// and created more than MinAge ago.
func (cgc *containerGC) evictableContainers(minAge time.Duration) (containersByEvictUnit, []containerGCInfo, []containerGCInfo, error) {
containers, err := GetKubeletDockerContainers(cgc.client, true)
if err != nil {
return containersByEvictUnit{}, []containerGCInfo{}, []containerGCInfo{}, err
}
unidentifiedContainers := make([]containerGCInfo, 0)
netContainers := make([]containerGCInfo, 0)
evictUnits := make(containersByEvictUnit)
newestGCTime := time.Now().Add(-minAge)
for _, container := range containers {
// Prune out running containers.
data, err := cgc.client.InspectContainer(container.ID)
if err != nil {
// Container may have been removed already, skip.
continue
} else if data.State.Running {
continue
}
created, err := ParseDockerTimestamp(data.Created)
if err != nil {
glog.Errorf("Failed to parse Created timestamp %q for container %q", data.Created, container.ID)
}
if newestGCTime.Before(created) {
continue
}
containerInfo, err := newContainerGCInfo(container.ID, data, created)
if err != nil {
unidentifiedContainers = append(unidentifiedContainers, containerInfo)
} else {
// Track net containers for special cleanup
if containerIsNetworked(containerInfo.containerName) {
netContainers = append(netContainers, containerInfo)
}
key := evictUnit{
uid: containerInfo.podUID,
name: containerInfo.containerName,
}
evictUnits[key] = append(evictUnits[key], containerInfo)
}
}
// Sort the containers by age.
for uid := range evictUnits {
sort.Sort(byCreated(evictUnits[uid]))
}
return evictUnits, netContainers, unidentifiedContainers, nil
}
// GarbageCollect removes dead containers using the specified container gc policy
func (cgc *containerGC) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool) error {
// Separate containers by evict units.
evictUnits, netContainers, unidentifiedContainers, err := cgc.evictableContainers(gcPolicy.MinAge)
if err != nil {
return err
}
// Remove unidentified containers.
for _, container := range unidentifiedContainers {
glog.Infof("Removing unidentified dead container %q", container.dockerName)
err = cgc.client.RemoveContainer(container.id, dockertypes.ContainerRemoveOptions{RemoveVolumes: true})
if err != nil {
glog.Warningf("Failed to remove unidentified dead container %q: %v", container.dockerName, err)
}
}
// Always clean up net containers to ensure network resources are released
// TODO: this may tear down networking again if the container doesn't get
// removed in this GC cycle, but that already happens elsewhere...
for _, container := range netContainers {
glog.Infof("Cleaning up dead net container %q", container.dockerName)
cgc.netContainerCleanup(container)
}
// Remove deleted pod containers if all sources are ready.
if allSourcesReady {
for key, unit := range evictUnits {
if cgc.isPodDeleted(key.uid) {
cgc.removeOldestN(unit, len(unit)) // Remove all.
delete(evictUnits, key)
}
}
}
// Enforce max containers per evict unit.
if gcPolicy.MaxPerPodContainer >= 0 {
cgc.enforceMaxContainersPerEvictUnit(evictUnits, gcPolicy.MaxPerPodContainer)
}
// Enforce max total number of containers.
if gcPolicy.MaxContainers >= 0 && evictUnits.NumContainers() > gcPolicy.MaxContainers {
// Leave an equal number of containers per evict unit (min: 1).
numContainersPerEvictUnit := gcPolicy.MaxContainers / evictUnits.NumEvictUnits()
if numContainersPerEvictUnit < 1 {
numContainersPerEvictUnit = 1
}
cgc.enforceMaxContainersPerEvictUnit(evictUnits, numContainersPerEvictUnit)
// If we still need to evict, evict oldest first.
numContainers := evictUnits.NumContainers()
if numContainers > gcPolicy.MaxContainers {
flattened := make([]containerGCInfo, 0, numContainers)
for uid := range evictUnits {
flattened = append(flattened, evictUnits[uid]...)
}
sort.Sort(byCreated(flattened))
cgc.removeOldestN(flattened, numContainers-gcPolicy.MaxContainers)
}
}
// Remove dead symlinks - should only happen on upgrade
// from a k8s version without proper log symlink cleanup
logSymlinks, _ := filepath.Glob(path.Join(cgc.containerLogsDir, fmt.Sprintf("*.%s", LogSuffix)))
for _, logSymlink := range logSymlinks {
if _, err = os.Stat(logSymlink); os.IsNotExist(err) {
err = os.Remove(logSymlink)
if err != nil {
glog.Warningf("Failed to remove container log dead symlink %q: %v", logSymlink, err)
}
}
}
return nil
}
func (cgc *containerGC) netContainerCleanup(containerInfo containerGCInfo) {
if containerInfo.isHostNetwork {
return
}
podName, podNamespace, err := kubecontainer.ParsePodFullName(containerInfo.podNameWithNamespace)
if err != nil {
glog.Warningf("failed to parse container %q pod full name: %v", containerInfo.dockerName, err)
return
}
containerID := kubecontainer.DockerID(containerInfo.id).ContainerID()
if err := cgc.network.TearDownPod(podNamespace, podName, containerID); err != nil {
glog.Warningf("failed to tear down container %q network: %v", containerInfo.dockerName, err)
}
}
func (cgc *containerGC) removeContainer(containerInfo containerGCInfo) {
glog.V(4).Infof("Removing container %q", containerInfo.dockerName)
err := cgc.client.RemoveContainer(containerInfo.id, dockertypes.ContainerRemoveOptions{RemoveVolumes: true})
if err != nil {
glog.Warningf("Failed to remove container %q: %v", containerInfo.dockerName, err)
}
symlinkPath := LogSymlink(cgc.containerLogsDir, containerInfo.podNameWithNamespace, containerInfo.containerName, containerInfo.id)
err = os.Remove(symlinkPath)
if err != nil && !os.IsNotExist(err) {
glog.Warningf("Failed to remove container %q log symlink %q: %v", containerInfo.dockerName, symlinkPath, err)
}
}
func (cgc *containerGC) deleteContainer(id string) error {
data, err := cgc.client.InspectContainer(id)
if err != nil {
glog.Warningf("Failed to inspect container %q: %v", id, err)
return err
}
if data.State.Running {
return fmt.Errorf("container %q is still running", id)
}
containerInfo, err := newContainerGCInfo(id, data, time.Now())
if err != nil {
return err
}
if containerIsNetworked(containerInfo.containerName) {
cgc.netContainerCleanup(containerInfo)
}
cgc.removeContainer(containerInfo)
return nil
}
func (cgc *containerGC) isPodDeleted(podUID types.UID) bool {
_, found := cgc.podGetter.GetPodByUID(podUID)
return !found
}

View File

@ -1,318 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"reflect"
"sort"
"testing"
"time"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
knetwork "k8s.io/kubernetes/pkg/kubelet/network"
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
)
func newTestContainerGC(t *testing.T) (*containerGC, *FakeDockerClient, *nettest.MockNetworkPlugin) {
fakeDocker := NewFakeDockerClient()
fakePodGetter := newFakePodGetter()
fakePlugin := nettest.NewMockNetworkPlugin(gomock.NewController(t))
fakePlugin.EXPECT().Name().Return("someNetworkPlugin").AnyTimes()
gc := NewContainerGC(fakeDocker, fakePodGetter, knetwork.NewPluginManager(fakePlugin), "")
return gc, fakeDocker, fakePlugin
}
// Makes a stable time object, lower id is earlier time.
func makeTime(id int) time.Time {
var zero time.Time
return zero.Add(time.Duration(id) * time.Second)
}
// Makes a container with the specified properties.
func makeContainer(id, uid, name string, running bool, created time.Time) *FakeContainer {
return &FakeContainer{
Name: fmt.Sprintf("/k8s_%s_bar_new_%s_42", name, uid),
Running: running,
ID: id,
CreatedAt: created,
}
}
// Makes a container with unidentified name and specified properties.
func makeUndefinedContainer(id string, running bool, created time.Time) *FakeContainer {
return &FakeContainer{
Name: "/k8s_unidentified",
Running: running,
ID: id,
CreatedAt: created,
}
}
func addPods(podGetter podGetter, podUIDs ...types.UID) {
fakePodGetter := podGetter.(*fakePodGetter)
for _, uid := range podUIDs {
fakePodGetter.pods[uid] = &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uid),
Namespace: "test",
UID: uid,
},
}
}
}
func verifyStringArrayEqualsAnyOrder(t *testing.T, actual, expected []string) {
act := make([]string, len(actual))
exp := make([]string, len(expected))
copy(act, actual)
copy(exp, expected)
sort.StringSlice(act).Sort()
sort.StringSlice(exp).Sort()
if !reflect.DeepEqual(exp, act) {
t.Errorf("Expected(sorted): %#v, Actual(sorted): %#v", exp, act)
}
}
func TestDeleteContainerSkipRunningContainer(t *testing.T) {
gc, fakeDocker, _ := newTestContainerGC(t)
fakeDocker.SetFakeContainers([]*FakeContainer{
makeContainer("1876", "foo", "POD", true, makeTime(0)),
})
addPods(gc.podGetter, "foo")
assert.Error(t, gc.deleteContainer("1876"))
assert.Len(t, fakeDocker.Removed, 0)
}
func TestDeleteContainerRemoveDeadContainer(t *testing.T) {
gc, fakeDocker, fakePlugin := newTestContainerGC(t)
defer fakePlugin.Finish()
fakeDocker.SetFakeContainers([]*FakeContainer{
makeContainer("1876", "foo", "POD", false, makeTime(0)),
})
addPods(gc.podGetter, "foo")
fakePlugin.EXPECT().TearDownPod(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
assert.Nil(t, gc.deleteContainer("1876"))
assert.Len(t, fakeDocker.Removed, 1)
}
func TestGarbageCollectNetworkTeardown(t *testing.T) {
// Ensure infra container gets teardown called
gc, fakeDocker, fakePlugin := newTestContainerGC(t)
defer fakePlugin.Finish()
id := kubecontainer.DockerID("1867").ContainerID()
fakeDocker.SetFakeContainers([]*FakeContainer{
makeContainer(id.ID, "foo", "POD", false, makeTime(0)),
})
addPods(gc.podGetter, "foo")
fakePlugin.EXPECT().TearDownPod(gomock.Any(), gomock.Any(), id).Return(nil)
assert.Nil(t, gc.deleteContainer(id.ID))
assert.Len(t, fakeDocker.Removed, 1)
// Ensure non-infra container does not have teardown called
gc, fakeDocker, fakePlugin = newTestContainerGC(t)
id = kubecontainer.DockerID("1877").ContainerID()
fakeDocker.SetFakeContainers([]*FakeContainer{
makeContainer(id.ID, "foo", "adsfasdfasdf", false, makeTime(0)),
})
fakePlugin.EXPECT().SetUpPod(gomock.Any(), gomock.Any(), id).Return(nil)
addPods(gc.podGetter, "foo")
assert.Nil(t, gc.deleteContainer(id.ID))
assert.Len(t, fakeDocker.Removed, 1)
}
func TestGarbageCollectZeroMaxContainers(t *testing.T) {
gc, fakeDocker, fakePlugin := newTestContainerGC(t)
defer fakePlugin.Finish()
fakeDocker.SetFakeContainers([]*FakeContainer{
makeContainer("1876", "foo", "POD", false, makeTime(0)),
})
addPods(gc.podGetter, "foo")
fakePlugin.EXPECT().TearDownPod(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil)
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: 1, MaxContainers: 0}, true))
assert.Len(t, fakeDocker.Removed, 1)
}
func TestGarbageCollectNoMaxPerPodContainerLimit(t *testing.T) {
gc, fakeDocker, fakePlugin := newTestContainerGC(t)
defer fakePlugin.Finish()
fakeDocker.SetFakeContainers([]*FakeContainer{
makeContainer("1876", "foo", "POD", false, makeTime(0)),
makeContainer("2876", "foo1", "POD", false, makeTime(1)),
makeContainer("3876", "foo2", "POD", false, makeTime(2)),
makeContainer("4876", "foo3", "POD", false, makeTime(3)),
makeContainer("5876", "foo4", "POD", false, makeTime(4)),
})
addPods(gc.podGetter, "foo", "foo1", "foo2", "foo3", "foo4")
fakePlugin.EXPECT().TearDownPod(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(5)
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: 4}, true))
assert.Len(t, fakeDocker.Removed, 1)
}
func TestGarbageCollectNoMaxLimit(t *testing.T) {
gc, fakeDocker, fakePlugin := newTestContainerGC(t)
defer fakePlugin.Finish()
fakeDocker.SetFakeContainers([]*FakeContainer{
makeContainer("1876", "foo", "POD", false, makeTime(0)),
makeContainer("2876", "foo1", "POD", false, makeTime(0)),
makeContainer("3876", "foo2", "POD", false, makeTime(0)),
makeContainer("4876", "foo3", "POD", false, makeTime(0)),
makeContainer("5876", "foo4", "POD", false, makeTime(0)),
})
addPods(gc.podGetter, "foo", "foo1", "foo2", "foo3", "foo4")
fakePlugin.EXPECT().TearDownPod(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).Times(5)
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Minute, MaxPerPodContainer: -1, MaxContainers: -1}, true))
assert.Len(t, fakeDocker.Removed, 0)
}
func TestGarbageCollect(t *testing.T) {
tests := []struct {
containers []*FakeContainer
expectedRemoved []string
}{
// Don't remove containers started recently.
{
containers: []*FakeContainer{
makeContainer("1876", "foo", "POD", false, time.Now()),
makeContainer("2876", "foo", "POD", false, time.Now()),
makeContainer("3876", "foo", "POD", false, time.Now()),
},
},
// Remove oldest containers.
{
containers: []*FakeContainer{
makeContainer("1876", "foo", "POD", false, makeTime(0)),
makeContainer("2876", "foo", "POD", false, makeTime(1)),
makeContainer("3876", "foo", "POD", false, makeTime(2)),
},
expectedRemoved: []string{"1876"},
},
// Only remove non-running containers.
{
containers: []*FakeContainer{
makeContainer("1876", "foo", "POD", true, makeTime(0)),
makeContainer("2876", "foo", "POD", false, makeTime(1)),
makeContainer("3876", "foo", "POD", false, makeTime(2)),
makeContainer("4876", "foo", "POD", false, makeTime(3)),
},
expectedRemoved: []string{"2876"},
},
// Less than maxContainerCount doesn't delete any.
{
containers: []*FakeContainer{
makeContainer("1876", "foo", "POD", false, makeTime(0)),
},
},
// maxContainerCount applies per (UID,container) pair.
{
containers: []*FakeContainer{
makeContainer("1876", "foo", "POD", false, makeTime(0)),
makeContainer("2876", "foo", "POD", false, makeTime(1)),
makeContainer("3876", "foo", "POD", false, makeTime(2)),
makeContainer("1076", "foo", "bar", false, makeTime(0)),
makeContainer("2076", "foo", "bar", false, makeTime(1)),
makeContainer("3076", "foo", "bar", false, makeTime(2)),
makeContainer("1176", "foo2", "POD", false, makeTime(0)),
makeContainer("2176", "foo2", "POD", false, makeTime(1)),
makeContainer("3176", "foo2", "POD", false, makeTime(2)),
},
expectedRemoved: []string{"1076", "1176", "1876"},
},
// Remove non-running unidentified Kubernetes containers.
{
containers: []*FakeContainer{
makeUndefinedContainer("1876", true, makeTime(0)),
makeUndefinedContainer("2876", false, makeTime(0)),
makeContainer("3876", "foo", "POD", false, makeTime(0)),
},
expectedRemoved: []string{"2876"},
},
// Max limit applied and tries to keep from every pod.
{
containers: []*FakeContainer{
makeContainer("1876", "foo", "POD", false, makeTime(0)),
makeContainer("2876", "foo", "POD", false, makeTime(1)),
makeContainer("3876", "foo1", "POD", false, makeTime(0)),
makeContainer("4876", "foo1", "POD", false, makeTime(1)),
makeContainer("5876", "foo2", "POD", false, makeTime(0)),
makeContainer("6876", "foo2", "POD", false, makeTime(1)),
makeContainer("7876", "foo3", "POD", false, makeTime(0)),
makeContainer("8876", "foo3", "POD", false, makeTime(1)),
makeContainer("9876", "foo4", "POD", false, makeTime(0)),
makeContainer("10876", "foo4", "POD", false, makeTime(1)),
},
expectedRemoved: []string{"1876", "3876", "5876", "7876", "9876"},
},
// If more pods than limit allows, evicts oldest pod.
{
containers: []*FakeContainer{
makeContainer("1876", "foo", "POD", false, makeTime(1)),
makeContainer("2876", "foo", "POD", false, makeTime(2)),
makeContainer("3876", "foo1", "POD", false, makeTime(1)),
makeContainer("4876", "foo1", "POD", false, makeTime(2)),
makeContainer("5876", "foo2", "POD", false, makeTime(0)),
makeContainer("6876", "foo3", "POD", false, makeTime(1)),
makeContainer("7876", "foo4", "POD", false, makeTime(0)),
makeContainer("8876", "foo5", "POD", false, makeTime(1)),
makeContainer("9876", "foo6", "POD", false, makeTime(2)),
makeContainer("10876", "foo7", "POD", false, makeTime(1)),
},
expectedRemoved: []string{"1876", "3876", "5876", "7876"},
},
// Containers for deleted pods should be GC'd.
{
containers: []*FakeContainer{
makeContainer("1876", "foo", "POD", false, makeTime(1)),
makeContainer("2876", "foo", "POD", false, makeTime(2)),
makeContainer("3876", "deleted", "POD", false, makeTime(1)),
makeContainer("4876", "deleted", "POD", false, makeTime(2)),
makeContainer("5876", "deleted", "POD", false, time.Now()), // Deleted pods still respect MinAge.
},
expectedRemoved: []string{"3876", "4876"},
},
}
for i, test := range tests {
t.Logf("Running test case with index %d", i)
gc, fakeDocker, fakePlugin := newTestContainerGC(t)
fakeDocker.SetFakeContainers(test.containers)
addPods(gc.podGetter, "foo", "foo1", "foo2", "foo3", "foo4", "foo5", "foo6", "foo7")
fakePlugin.EXPECT().TearDownPod(gomock.Any(), gomock.Any(), gomock.Any()).Return(nil).AnyTimes()
assert.Nil(t, gc.GarbageCollect(kubecontainer.ContainerGCPolicy{MinAge: time.Hour, MaxPerPodContainer: 2, MaxContainers: 6}, true))
verifyStringArrayEqualsAnyOrder(t, fakeDocker.Removed, test.expectedRemoved)
fakePlugin.Finish()
}
}

View File

@ -1,85 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"strings"
dockertypes "github.com/docker/engine-api/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// This file contains helper functions to convert docker API types to runtime
// (kubecontainer) types.
const (
statusRunningPrefix = "Up"
statusCreatedPrefix = "Created"
statusExitedPrefix = "Exited"
)
func mapState(state string) kubecontainer.ContainerState {
// Parse the state string in dockertypes.Container. This could break when
// we upgrade docker.
switch {
case strings.HasPrefix(state, statusRunningPrefix):
return kubecontainer.ContainerStateRunning
case strings.HasPrefix(state, statusExitedPrefix):
return kubecontainer.ContainerStateExited
default:
return kubecontainer.ContainerStateUnknown
}
}
// Converts dockertypes.Container to kubecontainer.Container.
func toRuntimeContainer(c *dockertypes.Container) (*kubecontainer.Container, error) {
if c == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime container")
}
dockerName, hash, err := getDockerContainerNameInfo(c)
if err != nil {
return nil, err
}
return &kubecontainer.Container{
ID: kubecontainer.DockerID(c.ID).ContainerID(),
Name: dockerName.ContainerName,
Image: c.Image,
ImageID: c.ImageID,
Hash: hash,
// (random-liu) docker uses status to indicate whether a container is running or exited.
// However, in kubernetes we usually use state to indicate whether a container is running or exited,
// while use status to indicate the comprehensive status of the container. So we have different naming
// norm here.
State: mapState(c.Status),
}, nil
}
// Converts dockertypes.Image to kubecontainer.Image.
func toRuntimeImage(image *dockertypes.Image) (*kubecontainer.Image, error) {
if image == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime image")
}
return &kubecontainer.Image{
ID: image.ID,
RepoTags: image.RepoTags,
RepoDigests: image.RepoDigests,
Size: image.VirtualSize,
}, nil
}

View File

@ -1,90 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"reflect"
"testing"
dockertypes "github.com/docker/engine-api/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
func TestMapState(t *testing.T) {
testCases := []struct {
input string
expected kubecontainer.ContainerState
}{
{input: "Up 5 hours", expected: kubecontainer.ContainerStateRunning},
{input: "Exited (0) 2 hours ago", expected: kubecontainer.ContainerStateExited},
{input: "Created", expected: kubecontainer.ContainerStateUnknown},
{input: "Random string", expected: kubecontainer.ContainerStateUnknown},
}
for i, test := range testCases {
if actual := mapState(test.input); actual != test.expected {
t.Errorf("Test[%d]: expected %q, got %q", i, test.expected, actual)
}
}
}
func TestToRuntimeContainer(t *testing.T) {
original := &dockertypes.Container{
ID: "ab2cdf",
Image: "bar_image",
Names: []string{"/k8s_bar.5678_foo_ns_1234_42"},
Status: "Up 5 hours",
}
expected := &kubecontainer.Container{
ID: kubecontainer.ContainerID{Type: "docker", ID: "ab2cdf"},
Name: "bar",
Image: "bar_image",
Hash: 0x5678,
State: kubecontainer.ContainerStateRunning,
}
actual, err := toRuntimeContainer(original)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("expected %#v, got %#v", expected, actual)
}
}
func TestToRuntimeImage(t *testing.T) {
original := &dockertypes.Image{
ID: "aeeea",
RepoTags: []string{"abc", "def"},
RepoDigests: []string{"123", "456"},
VirtualSize: 1234,
}
expected := &kubecontainer.Image{
ID: "aeeea",
RepoTags: []string{"abc", "def"},
RepoDigests: []string{"123", "456"},
Size: 1234,
}
actual, err := toRuntimeImage(original)
if err != nil {
t.Fatalf("unexpected error %v", err)
}
if !reflect.DeepEqual(expected, actual) {
t.Errorf("expected %#v, got %#v", expected, actual)
}
}

View File

@ -387,6 +387,8 @@ func ConnectToDockerOrDie(dockerEndpoint string, requestTimeout, imagePullProgre
// GetKubeletDockerContainers lists all container or just the running ones.
// Returns a list of docker containers that we manage
// TODO: This function should be deleted after migrating
// test/e2e_node/garbage_collector_test.go off of it.
func GetKubeletDockerContainers(client DockerInterface, allContainers bool) ([]*dockertypes.Container, error) {
result := []*dockertypes.Container{}
containers, err := client.ListContainers(dockertypes.ContainerListOptions{All: allContainers})

File diff suppressed because it is too large Load Diff

View File

@ -18,63 +18,6 @@ limitations under the License.
package dockertools
import (
dockertypes "github.com/docker/engine-api/types"
dockercontainer "github.com/docker/engine-api/types/container"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// These two functions are OS specific (for now at least)
func updateHostConfig(hc *dockercontainer.HostConfig, opts *kubecontainer.RunContainerOptions) {
// no-op, there is a windows implementation that is different.
}
func DefaultMemorySwap() int64 {
return 0
}
func getContainerIP(container *dockertypes.ContainerJSON) string {
result := ""
if container.NetworkSettings != nil {
result = container.NetworkSettings.IPAddress
// Fall back to IPv6 address if no IPv4 address is present
if result == "" {
result = container.NetworkSettings.GlobalIPv6Address
}
}
return result
}
// We don't want to override the networking mode on Linux.
func getNetworkingMode() string { return "" }
// Returns true if the container name matches the infrastructure's container name
func containerProvidesPodIP(containerName string) bool {
return containerName == PodInfraContainerName
}
// Only the infrastructure container needs network setup/teardown
func containerIsNetworked(containerName string) bool {
return containerName == PodInfraContainerName
}
// Returns Seccomp and AppArmor Security options
func (dm *DockerManager) getSecurityOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) {
var securityOpts []dockerOpt
if seccompOpts, err := dm.getSeccompOpts(pod, ctrName); err != nil {
return nil, err
} else {
securityOpts = append(securityOpts, seccompOpts...)
}
if appArmorOpts, err := dm.getAppArmorOpts(pod, ctrName); err != nil {
return nil, err
} else {
securityOpts = append(securityOpts, appArmorOpts...)
}
return securityOpts, nil
}

View File

@ -1,466 +0,0 @@
// +build linux
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"net"
"path"
"strconv"
"testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/network"
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
"k8s.io/kubernetes/pkg/security/apparmor"
utilstrings "k8s.io/kubernetes/pkg/util/strings"
)
func TestGetSecurityOpts(t *testing.T) {
const containerName = "bar"
pod := func(annotations map[string]string) *v1.Pod {
p := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: containerName},
},
})
p.Annotations = annotations
return p
}
tests := []struct {
msg string
pod *v1.Pod
expectedOpts []string
}{{
msg: "No security annotations",
pod: pod(nil),
expectedOpts: []string{"seccomp=unconfined"},
}, {
msg: "Seccomp default",
pod: pod(map[string]string{
v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
}),
expectedOpts: nil,
}, {
msg: "AppArmor runtime/default",
pod: pod(map[string]string{
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileRuntimeDefault,
}),
expectedOpts: []string{"seccomp=unconfined"},
}, {
msg: "AppArmor local profile",
pod: pod(map[string]string{
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo",
}),
expectedOpts: []string{"seccomp=unconfined", "apparmor=foo"},
}, {
msg: "AppArmor and seccomp profile",
pod: pod(map[string]string{
v1.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo",
}),
expectedOpts: []string{"apparmor=foo"},
}}
dm, _ := newTestDockerManagerWithVersion("1.11.1", "1.23")
for i, test := range tests {
securityOpts, err := dm.getSecurityOpts(test.pod, containerName)
assert.NoError(t, err, "TestCase[%d]: %s", i, test.msg)
opts := FmtDockerOpts(securityOpts, '=')
assert.Len(t, opts, len(test.expectedOpts), "TestCase[%d]: %s", i, test.msg)
for _, opt := range test.expectedOpts {
assert.Contains(t, opts, opt, "TestCase[%d]: %s", i, test.msg)
}
}
}
func TestSeccompIsUnconfinedByDefaultWithDockerV110(t *testing.T) {
dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22")
// We want to capture events.
recorder := record.NewFakeRecorder(20)
dm.recorder = recorder
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar"},
},
})
runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{
// Create pod infra container.
"create", "start", "inspect_container", "inspect_container",
// Create container.
"create", "start", "inspect_container",
})
assert.NoError(t, fakeDocker.AssertCreatedByNameWithOrder([]string{"POD", "bar"}))
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
if err != nil {
t.Fatalf("unexpected error %v", err)
}
assert.Contains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods with Docker versions >= 1.10 must not have seccomp disabled by default")
cid := utilstrings.ShortenString(fakeDocker.Created[1], 12)
assert.NoError(t, expectEvent(recorder, v1.EventTypeNormal, events.CreatedContainer,
fmt.Sprintf("Created container with docker id %s; Security:[seccomp=unconfined]", cid)))
}
func TestUnconfinedSeccompProfileWithDockerV110(t *testing.T) {
dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22")
pod := makePod("foo4", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar4"},
},
})
pod.Annotations = map[string]string{
v1.SeccompPodAnnotationKey: "unconfined",
}
runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{
// Create pod infra container.
"create", "start", "inspect_container", "inspect_container",
// Create container.
"create", "start", "inspect_container",
})
assert.NoError(t, fakeDocker.AssertCreatedByNameWithOrder([]string{"POD", "bar4"}))
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
if err != nil {
t.Fatalf("unexpected error %v", err)
}
assert.Contains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods created with a secccomp annotation of unconfined should have seccomp:unconfined.")
}
func TestDefaultSeccompProfileWithDockerV110(t *testing.T) {
dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22")
pod := makePod("foo1", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar1"},
},
})
pod.Annotations = map[string]string{
v1.SeccompPodAnnotationKey: "docker/default",
}
runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{
// Create pod infra container.
"create", "start", "inspect_container", "inspect_container",
// Create container.
"create", "start", "inspect_container",
})
assert.NoError(t, fakeDocker.AssertCreatedByNameWithOrder([]string{"POD", "bar1"}))
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
if err != nil {
t.Fatalf("unexpected error %v", err)
}
assert.NotContains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods created with a secccomp annotation of docker/default should have empty security opt.")
}
func TestSeccompContainerAnnotationTrumpsPod(t *testing.T) {
dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22")
pod := makePod("foo2", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar2"},
},
})
pod.Annotations = map[string]string{
v1.SeccompPodAnnotationKey: "unconfined",
v1.SeccompContainerAnnotationKeyPrefix + "bar2": "docker/default",
}
runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{
// Create pod infra container.
"create", "start", "inspect_container", "inspect_container",
// Create container.
"create", "start", "inspect_container",
})
assert.NoError(t, fakeDocker.AssertCreatedByNameWithOrder([]string{"POD", "bar2"}))
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
if err != nil {
t.Fatalf("unexpected error %v", err)
}
assert.NotContains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Container annotation should trump the pod annotation for seccomp.")
}
func TestSecurityOptsAreNilWithDockerV19(t *testing.T) {
dm, fakeDocker := newTestDockerManagerWithVersion("1.9.1", "1.21")
pod := makePod("foo", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar"},
},
})
runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{
// Create pod infra container.
"create", "start", "inspect_container", "inspect_container",
// Create container.
"create", "start", "inspect_container",
})
assert.NoError(t, fakeDocker.AssertCreatedByNameWithOrder([]string{"POD", "bar"}))
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
if err != nil {
t.Fatalf("unexpected error %v", err)
}
assert.NotContains(t, newContainer.HostConfig.SecurityOpt, "seccomp:unconfined", "Pods with Docker versions < 1.10 must not have seccomp disabled by default")
}
func TestCreateAppArmorContanier(t *testing.T) {
dm, fakeDocker := newTestDockerManagerWithVersion("1.11.1", "1.23")
// We want to capture events.
recorder := record.NewFakeRecorder(20)
dm.recorder = recorder
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
Annotations: map[string]string{
apparmor.ContainerAnnotationKeyPrefix + "test": apparmor.ProfileNamePrefix + "test-profile",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{Name: "test"},
},
},
}
runSyncPod(t, dm, fakeDocker, pod, nil, false)
verifyCalls(t, fakeDocker, []string{
// Create pod infra container.
"create", "start", "inspect_container", "inspect_container",
// Create container.
"create", "start", "inspect_container",
})
assert.NoError(t, fakeDocker.AssertCreatedByNameWithOrder([]string{"POD", "test"}))
// Verify security opts.
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
if err != nil {
t.Fatalf("unexpected error %v", err)
}
securityOpts := newContainer.HostConfig.SecurityOpt
assert.Contains(t, securityOpts, "apparmor=test-profile", "Container should have apparmor security opt")
cid := utilstrings.ShortenString(fakeDocker.Created[1], 12)
assert.NoError(t, expectEvent(recorder, v1.EventTypeNormal, events.CreatedContainer,
fmt.Sprintf("Created container with docker id %s; Security:[seccomp=unconfined apparmor=test-profile]", cid)))
}
func TestSeccompLocalhostProfileIsLoaded(t *testing.T) {
tests := []struct {
annotations map[string]string
expectedSecOpt string
expectedSecMsg string
expectedError string
}{
{
annotations: map[string]string{
v1.SeccompPodAnnotationKey: "localhost/test",
},
expectedSecOpt: `seccomp={"foo":"bar"}`,
expectedSecMsg: "seccomp=test(md5:21aeae45053385adebd25311f9dd9cb1)",
},
{
annotations: map[string]string{
v1.SeccompPodAnnotationKey: "localhost/sub/subtest",
},
expectedSecOpt: `seccomp={"abc":"def"}`,
expectedSecMsg: "seccomp=sub/subtest(md5:07c9bcb4db631f7ca191d6e0bca49f76)",
},
{
annotations: map[string]string{
v1.SeccompPodAnnotationKey: "localhost/not-existing",
},
expectedError: "cannot load seccomp profile",
},
}
for i, test := range tests {
dm, fakeDocker := newTestDockerManagerWithVersion("1.11.0", "1.23")
// We want to capture events.
recorder := record.NewFakeRecorder(20)
dm.recorder = recorder
dm.seccompProfileRoot = path.Join("fixtures", "seccomp")
pod := makePod("foo2", &v1.PodSpec{
Containers: []v1.Container{
{Name: "bar2"},
},
})
pod.Annotations = test.annotations
result := runSyncPod(t, dm, fakeDocker, pod, nil, test.expectedError != "")
if test.expectedError != "" {
assert.Contains(t, result.Error().Error(), test.expectedError)
continue
}
verifyCalls(t, fakeDocker, []string{
// Create pod infra container.
"create", "start", "inspect_container", "inspect_container",
// Create container.
"create", "start", "inspect_container",
})
assert.NoError(t, fakeDocker.AssertCreatedByNameWithOrder([]string{"POD", "bar2"}))
newContainer, err := fakeDocker.InspectContainer(fakeDocker.Created[1])
if err != nil {
t.Fatalf("unexpected error %v", err)
}
assert.Contains(t, newContainer.HostConfig.SecurityOpt, test.expectedSecOpt, "The compacted seccomp json profile should be loaded.")
cid := utilstrings.ShortenString(fakeDocker.Created[1], 12)
assert.NoError(t, expectEvent(recorder, v1.EventTypeNormal, events.CreatedContainer,
fmt.Sprintf("Created container with docker id %s; Security:[%s]", cid, test.expectedSecMsg)),
"testcase %d", i)
}
}
func TestGetPodStatusFromNetworkPlugin(t *testing.T) {
cases := []struct {
pod *v1.Pod
fakePodIP string
containerID string
infraContainerID string
networkStatusError error
expectRunning bool
expectUnknown bool
}{
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{Name: "container"}},
},
},
fakePodIP: "10.10.10.10",
containerID: "123",
infraContainerID: "9876",
networkStatusError: nil,
expectRunning: true,
expectUnknown: false,
},
{
pod: &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: "12345678",
Name: "foo",
Namespace: "new",
},
Spec: v1.PodSpec{
Containers: []v1.Container{{Name: "container"}},
},
},
fakePodIP: "",
containerID: "123",
infraContainerID: "9876",
networkStatusError: fmt.Errorf("CNI plugin error"),
expectRunning: false,
expectUnknown: true,
},
}
for _, test := range cases {
dm, fakeDocker := newTestDockerManager()
ctrl := gomock.NewController(t)
defer ctrl.Finish()
fnp := nettest.NewMockNetworkPlugin(ctrl)
dm.network = network.NewPluginManager(fnp)
fakeDocker.SetFakeRunningContainers([]*FakeContainer{
{
ID: test.containerID,
Name: fmt.Sprintf("/k8s_container_%s_%s_%s_42", test.pod.Name, test.pod.Namespace, test.pod.UID),
Running: true,
},
{
ID: test.infraContainerID,
Name: fmt.Sprintf("/k8s_POD.%s_%s_%s_%s_42", strconv.FormatUint(generatePodInfraContainerHash(test.pod), 16), test.pod.Name, test.pod.Namespace, test.pod.UID),
Running: true,
},
})
fnp.EXPECT().Name().Return("someNetworkPlugin").AnyTimes()
var podNetworkStatus *network.PodNetworkStatus
if test.fakePodIP != "" {
podNetworkStatus = &network.PodNetworkStatus{IP: net.ParseIP(test.fakePodIP)}
}
fnp.EXPECT().GetPodNetworkStatus(test.pod.Namespace, test.pod.Name, kubecontainer.DockerID(test.infraContainerID).ContainerID()).Return(podNetworkStatus, test.networkStatusError)
podStatus, err := dm.GetPodStatus(test.pod.UID, test.pod.Name, test.pod.Namespace)
if err != nil {
t.Fatal(err)
}
if podStatus.IP != test.fakePodIP {
t.Errorf("Got wrong ip, expected %v, got %v", test.fakePodIP, podStatus.IP)
}
expectedStatesCount := 0
var expectedState kubecontainer.ContainerState
if test.expectRunning {
expectedState = kubecontainer.ContainerStateRunning
} else if test.expectUnknown {
expectedState = kubecontainer.ContainerStateUnknown
} else {
t.Errorf("Some state has to be expected")
}
for _, containerStatus := range podStatus.ContainerStatuses {
if containerStatus.State == expectedState {
expectedStatesCount++
}
}
if expectedStatesCount < 1 {
t.Errorf("Invalid count of containers with expected state")
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -18,39 +18,6 @@ limitations under the License.
package dockertools
import (
dockertypes "github.com/docker/engine-api/types"
dockercontainer "github.com/docker/engine-api/types/container"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// These two functions are OS specific (for now at least)
func updateHostConfig(hc *dockercontainer.HostConfig, opts *kubecontainer.RunContainerOptions) {
}
func DefaultMemorySwap() int64 {
return -1
}
func getContainerIP(container *dockertypes.ContainerJSON) string {
return ""
}
func getNetworkingMode() string {
return ""
}
func containerProvidesPodIP(containerName string) bool {
return false
}
func containerIsNetworked(containerName string) bool {
return containerName == PodInfraContainerName
}
// Returns nil as both Seccomp and AppArmor security options are not valid on Windows
func (dm *DockerManager) getSecurityOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) {
return nil, nil
}

View File

@ -18,63 +18,6 @@ limitations under the License.
package dockertools
import (
"os"
dockertypes "github.com/docker/engine-api/types"
dockercontainer "github.com/docker/engine-api/types/container"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
)
// These two functions are OS specific (for now at least)
func updateHostConfig(hc *dockercontainer.HostConfig, opts *kubecontainer.RunContainerOptions) {
// There is no /etc/resolv.conf in Windows, DNS and DNSSearch options would have to be passed to Docker runtime instead
hc.DNS = opts.DNS
hc.DNSSearch = opts.DNSSearch
// MemorySwap == -1 is not currently supported in Docker 1.14 on Windows
// https://github.com/docker/docker/blob/master/daemon/daemon_windows.go#L175
hc.Resources.MemorySwap = 0
}
func DefaultMemorySwap() int64 {
return 0
}
func getContainerIP(container *dockertypes.ContainerJSON) string {
if container.NetworkSettings != nil {
for _, network := range container.NetworkSettings.Networks {
if network.IPAddress != "" {
return network.IPAddress
}
}
}
return ""
}
func getNetworkingMode() string {
// Allow override via env variable. Otherwise, use a default "kubenet" network
netMode := os.Getenv("CONTAINER_NETWORK")
if netMode == "" {
netMode = "kubenet"
}
return netMode
}
// Infrastructure containers are not supported on Windows. For this reason, we
// make sure to not grab the infra container's IP for the pod.
func containerProvidesPodIP(containerName string) bool {
return containerName != PodInfraContainerName
}
// All containers in Windows need networking setup/teardown
func containerIsNetworked(containerName string) bool {
return true
}
// Returns nil as both Seccomp and AppArmor security options are not valid on Windows
func (dm *DockerManager) getSecurityOpts(pod *v1.Pod, ctrName string) ([]dockerOpt, error) {
return nil, nil
}

View File

@ -23,26 +23,16 @@ import (
"math/rand"
"path"
"reflect"
"sort"
"strconv"
"strings"
"testing"
"github.com/docker/docker/pkg/jsonmessage"
dockertypes "github.com/docker/engine-api/types"
dockernat "github.com/docker/go-connections/nat"
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/stretchr/testify/assert"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/credentialprovider"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/network"
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
hashutil "k8s.io/kubernetes/pkg/util/hash"
)
@ -716,272 +706,6 @@ func TestGetImageRef(t *testing.T) {
}
}
type podsByID []*kubecontainer.Pod
func (b podsByID) Len() int { return len(b) }
func (b podsByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b podsByID) Less(i, j int) bool { return b[i].ID < b[j].ID }
type containersByID []*kubecontainer.Container
func (b containersByID) Len() int { return len(b) }
func (b containersByID) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b containersByID) Less(i, j int) bool { return b[i].ID.ID < b[j].ID.ID }
func TestFindContainersByPod(t *testing.T) {
tests := []struct {
runningContainerList []dockertypes.Container
exitedContainerList []dockertypes.Container
all bool
expectedPods []*kubecontainer.Pod
}{
{
[]dockertypes.Container{
{
ID: "foobar",
Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"},
},
{
ID: "barbar",
Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"},
},
{
ID: "baz",
Names: []string{"/k8s_baz.1234_qux_ns_1234_42"},
},
},
[]dockertypes.Container{
{
ID: "barfoo",
Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"},
},
{
ID: "bazbaz",
Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"},
},
},
false,
[]*kubecontainer.Pod{
{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubecontainer.DockerID("foobar").ContainerID(),
Name: "foobar",
Hash: 0x1234,
State: kubecontainer.ContainerStateUnknown,
},
{
ID: kubecontainer.DockerID("baz").ContainerID(),
Name: "baz",
Hash: 0x1234,
State: kubecontainer.ContainerStateUnknown,
},
},
},
{
ID: "2343",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubecontainer.DockerID("barbar").ContainerID(),
Name: "barbar",
Hash: 0x1234,
State: kubecontainer.ContainerStateUnknown,
},
},
},
},
},
{
[]dockertypes.Container{
{
ID: "foobar",
Names: []string{"/k8s_foobar.1234_qux_ns_1234_42"},
},
{
ID: "barbar",
Names: []string{"/k8s_barbar.1234_qux_ns_2343_42"},
},
{
ID: "baz",
Names: []string{"/k8s_baz.1234_qux_ns_1234_42"},
},
},
[]dockertypes.Container{
{
ID: "barfoo",
Names: []string{"/k8s_barfoo.1234_qux_ns_1234_42"},
},
{
ID: "bazbaz",
Names: []string{"/k8s_bazbaz.1234_qux_ns_5678_42"},
},
},
true,
[]*kubecontainer.Pod{
{
ID: "1234",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubecontainer.DockerID("foobar").ContainerID(),
Name: "foobar",
Hash: 0x1234,
State: kubecontainer.ContainerStateUnknown,
},
{
ID: kubecontainer.DockerID("barfoo").ContainerID(),
Name: "barfoo",
Hash: 0x1234,
State: kubecontainer.ContainerStateUnknown,
},
{
ID: kubecontainer.DockerID("baz").ContainerID(),
Name: "baz",
Hash: 0x1234,
State: kubecontainer.ContainerStateUnknown,
},
},
},
{
ID: "2343",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubecontainer.DockerID("barbar").ContainerID(),
Name: "barbar",
Hash: 0x1234,
State: kubecontainer.ContainerStateUnknown,
},
},
},
{
ID: "5678",
Name: "qux",
Namespace: "ns",
Containers: []*kubecontainer.Container{
{
ID: kubecontainer.DockerID("bazbaz").ContainerID(),
Name: "bazbaz",
Hash: 0x1234,
State: kubecontainer.ContainerStateUnknown,
},
},
},
},
},
{
[]dockertypes.Container{},
[]dockertypes.Container{},
true,
nil,
},
}
fakeClient := NewFakeDockerClient()
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, "10.0.0.0/8", network.UseDefaultMTU)
// image back-off is set to nil, this test should not pull images
containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, &cadvisorapi.MachineInfo{}, "", 0, 0, "", &containertest.FakeOS{}, np, nil, nil, nil)
for i, test := range tests {
fakeClient.RunningContainerList = test.runningContainerList
fakeClient.ExitedContainerList = test.exitedContainerList
result, _ := containerManager.GetPods(test.all)
for i := range result {
sort.Sort(containersByID(result[i].Containers))
}
for i := range test.expectedPods {
sort.Sort(containersByID(test.expectedPods[i].Containers))
}
sort.Sort(podsByID(result))
sort.Sort(podsByID(test.expectedPods))
if !reflect.DeepEqual(test.expectedPods, result) {
t.Errorf("%d: expected: %#v, saw: %#v", i, test.expectedPods, result)
}
}
}
func TestMakePortsAndBindings(t *testing.T) {
portMapping := func(container, host int, protocol v1.Protocol, ip string) kubecontainer.PortMapping {
return kubecontainer.PortMapping{
ContainerPort: container,
HostPort: host,
Protocol: protocol,
HostIP: ip,
}
}
portBinding := func(port, ip string) dockernat.PortBinding {
return dockernat.PortBinding{
HostPort: port,
HostIP: ip,
}
}
ports := []kubecontainer.PortMapping{
portMapping(80, 8080, "", "127.0.0.1"),
portMapping(443, 443, "tcp", ""),
portMapping(444, 444, "udp", ""),
portMapping(445, 445, "foobar", ""),
portMapping(443, 446, "tcp", ""),
portMapping(443, 446, "udp", ""),
}
exposedPorts, bindings := makePortsAndBindings(ports)
// Count the expected exposed ports and bindings
expectedExposedPorts := map[string]struct{}{}
for _, binding := range ports {
dockerKey := strconv.Itoa(binding.ContainerPort) + "/" + string(binding.Protocol)
expectedExposedPorts[dockerKey] = struct{}{}
}
// Should expose right ports in docker
if len(expectedExposedPorts) != len(exposedPorts) {
t.Errorf("Unexpected ports and bindings, %#v %#v %#v", ports, exposedPorts, bindings)
}
// Construct expected bindings
expectPortBindings := map[string][]dockernat.PortBinding{
"80/tcp": {
portBinding("8080", "127.0.0.1"),
},
"443/tcp": {
portBinding("443", ""),
portBinding("446", ""),
},
"443/udp": {
portBinding("446", ""),
},
"444/udp": {
portBinding("444", ""),
},
"445/tcp": {
portBinding("445", ""),
},
}
// interate the bindings by dockerPort, and check its portBindings
for dockerPort, portBindings := range bindings {
switch dockerPort {
case "80/tcp", "443/tcp", "443/udp", "444/udp", "445/tcp":
if !reflect.DeepEqual(expectPortBindings[string(dockerPort)], portBindings) {
t.Errorf("Unexpected portbindings for %#v, expected: %#v, but got: %#v",
dockerPort, expectPortBindings[string(dockerPort)], portBindings)
}
default:
t.Errorf("Unexpected docker port: %#v with portbindings: %#v", dockerPort, portBindings)
}
}
}
const letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
func randStringBytes(n int) string {

View File

@ -1,78 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/network"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/cache"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
)
func NewFakeDockerManager(
client DockerInterface,
recorder record.EventRecorder,
livenessManager proberesults.Manager,
containerRefManager *kubecontainer.RefManager,
machineInfo *cadvisorapi.MachineInfo,
podInfraContainerImage string,
qps float32,
burst int,
containerLogsDir string,
osInterface kubecontainer.OSInterface,
networkPlugin network.NetworkPlugin,
runtimeHelper kubecontainer.RuntimeHelper,
httpClient kubetypes.HttpGetter, imageBackOff *flowcontrol.Backoff) *DockerManager {
fakeOOMAdjuster := oom.NewFakeOOMAdjuster()
fakeProcFs := procfs.NewFakeProcFS()
fakePodGetter := &fakePodGetter{}
dm := NewDockerManager(client, recorder, livenessManager, containerRefManager, fakePodGetter, machineInfo, podInfraContainerImage, qps,
burst, containerLogsDir, osInterface, networkPlugin, runtimeHelper, httpClient, &NativeExecHandler{},
fakeOOMAdjuster, fakeProcFs, false, imageBackOff, false, false, true, "/var/lib/kubelet/seccomp")
dm.dockerPuller = &FakeDockerPuller{client: client}
// ttl of version cache is set to 0 so we always call version api directly in tests.
dm.versionCache = cache.NewObjectCache(
func() (interface{}, error) {
return dm.getVersionInfo()
},
0,
)
return dm
}
type fakePodGetter struct {
pods map[types.UID]*v1.Pod
}
func newFakePodGetter() *fakePodGetter {
return &fakePodGetter{make(map[types.UID]*v1.Pod)}
}
func (f *fakePodGetter) GetPodByUID(uid types.UID) (*v1.Pod, bool) {
pod, found := f.pods[uid]
return pod, found
}

View File

@ -1,102 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"fmt"
"sync"
"github.com/golang/glog"
dockertypes "github.com/docker/engine-api/types"
runtime "k8s.io/kubernetes/pkg/kubelet/container"
)
// imageStatsProvider exposes stats about all images currently available.
type imageStatsProvider struct {
sync.Mutex
// layers caches the current layers, key is the layer ID.
layers map[string]*dockertypes.ImageHistory
// imageToLayerIDs maps image to its layer IDs.
imageToLayerIDs map[string][]string
// Docker remote API client
c DockerInterface
}
func newImageStatsProvider(c DockerInterface) *imageStatsProvider {
return &imageStatsProvider{
layers: make(map[string]*dockertypes.ImageHistory),
imageToLayerIDs: make(map[string][]string),
c: c,
}
}
func (isp *imageStatsProvider) ImageStats() (*runtime.ImageStats, error) {
images, err := isp.c.ListImages(dockertypes.ImageListOptions{})
if err != nil {
return nil, fmt.Errorf("failed to list docker images - %v", err)
}
// Take the lock to protect the cache
isp.Lock()
defer isp.Unlock()
// Create new cache each time, this is a little more memory consuming, but:
// * ImageStats is only called every 10 seconds
// * We use pointers and reference to copy cache elements.
// The memory usage should be acceptable.
// TODO(random-liu): Add more logic to implement in place cache update.
newLayers := make(map[string]*dockertypes.ImageHistory)
newImageToLayerIDs := make(map[string][]string)
for _, image := range images {
layerIDs, ok := isp.imageToLayerIDs[image.ID]
if !ok {
// Get information about the various layers of the given docker image.
history, err := isp.c.ImageHistory(image.ID)
if err != nil {
// Skip the image and inspect again in next ImageStats if the image is still there
glog.V(2).Infof("failed to get history of docker image %+v - %v", image, err)
continue
}
// Cache each layer
for i := range history {
layer := &history[i]
key := layer.ID
// Some of the layers are empty.
// We are hoping that these layers are unique to each image.
// Still keying with the CreatedBy field to be safe.
if key == "" || key == "<missing>" {
key = key + layer.CreatedBy
}
layerIDs = append(layerIDs, key)
newLayers[key] = layer
}
} else {
for _, layerID := range layerIDs {
newLayers[layerID] = isp.layers[layerID]
}
}
newImageToLayerIDs[image.ID] = layerIDs
}
ret := &runtime.ImageStats{}
// Calculate the total storage bytes
for _, layer := range newLayers {
ret.TotalStorageBytes += uint64(layer.Size)
}
// Update current cache
isp.layers = newLayers
isp.imageToLayerIDs = newImageToLayerIDs
return ret, nil
}

View File

@ -1,334 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"testing"
dockertypes "github.com/docker/engine-api/types"
"github.com/stretchr/testify/assert"
)
func TestImageStatsNoImages(t *testing.T) {
fakeDockerClient := NewFakeDockerClient().WithVersion("1.2.3", "1.2")
isp := newImageStatsProvider(fakeDockerClient)
st, err := isp.ImageStats()
as := assert.New(t)
as.NoError(err)
as.NoError(fakeDockerClient.AssertCalls([]string{"list_images"}))
as.Equal(st.TotalStorageBytes, uint64(0))
}
func TestImageStatsWithImages(t *testing.T) {
fakeDockerClient := NewFakeDockerClient().WithVersion("1.2.3", "1.2")
fakeHistoryData := map[string][]dockertypes.ImageHistory{
"busybox": {
{
ID: "0123456",
CreatedBy: "foo",
Size: 100,
},
{
ID: "0123457",
CreatedBy: "duplicate",
Size: 200,
},
{
ID: "<missing>",
CreatedBy: "baz",
Size: 300,
},
},
"kubelet": {
{
ID: "1123456",
CreatedBy: "foo",
Size: 200,
},
{
ID: "<missing>",
CreatedBy: "1baz",
Size: 400,
},
},
"busybox-new": {
{
ID: "01234567",
CreatedBy: "foo",
Size: 100,
},
{
ID: "0123457",
CreatedBy: "duplicate",
Size: 200,
},
{
ID: "<missing>",
CreatedBy: "baz",
Size: 300,
},
},
}
fakeDockerClient.InjectImageHistory(fakeHistoryData)
fakeDockerClient.InjectImages([]dockertypes.Image{
{
ID: "busybox",
},
{
ID: "kubelet",
},
{
ID: "busybox-new",
},
})
isp := newImageStatsProvider(fakeDockerClient)
st, err := isp.ImageStats()
as := assert.New(t)
as.NoError(err)
as.NoError(fakeDockerClient.AssertCalls([]string{"list_images", "image_history", "image_history", "image_history"}))
const expectedOutput uint64 = 1300
as.Equal(expectedOutput, st.TotalStorageBytes, "expected %d, got %d", expectedOutput, st.TotalStorageBytes)
}
func TestImageStatsWithCachedImages(t *testing.T) {
for _, test := range []struct {
oldLayers map[string]*dockertypes.ImageHistory
oldImageToLayerIDs map[string][]string
images []dockertypes.Image
history map[string][]dockertypes.ImageHistory
expectedCalls []string
expectedLayers map[string]*dockertypes.ImageHistory
expectedImageToLayerIDs map[string][]string
expectedTotalStorageSize uint64
}{
{
// No cache
oldLayers: make(map[string]*dockertypes.ImageHistory),
oldImageToLayerIDs: make(map[string][]string),
images: []dockertypes.Image{
{
ID: "busybox",
},
{
ID: "kubelet",
},
},
history: map[string][]dockertypes.ImageHistory{
"busybox": {
{
ID: "0123456",
CreatedBy: "foo",
Size: 100,
},
{
ID: "<missing>",
CreatedBy: "baz",
Size: 300,
},
},
"kubelet": {
{
ID: "1123456",
CreatedBy: "foo",
Size: 200,
},
{
ID: "<missing>",
CreatedBy: "1baz",
Size: 400,
},
},
},
expectedCalls: []string{"list_images", "image_history", "image_history"},
expectedLayers: map[string]*dockertypes.ImageHistory{
"0123456": {
ID: "0123456",
CreatedBy: "foo",
Size: 100,
},
"1123456": {
ID: "1123456",
CreatedBy: "foo",
Size: 200,
},
"<missing>baz": {
ID: "<missing>",
CreatedBy: "baz",
Size: 300,
},
"<missing>1baz": {
ID: "<missing>",
CreatedBy: "1baz",
Size: 400,
},
},
expectedImageToLayerIDs: map[string][]string{
"busybox": {"0123456", "<missing>baz"},
"kubelet": {"1123456", "<missing>1baz"},
},
expectedTotalStorageSize: 1000,
},
{
// Use cache value
oldLayers: map[string]*dockertypes.ImageHistory{
"0123456": {
ID: "0123456",
CreatedBy: "foo",
Size: 100,
},
"<missing>baz": {
ID: "<missing>",
CreatedBy: "baz",
Size: 300,
},
},
oldImageToLayerIDs: map[string][]string{
"busybox": {"0123456", "<missing>baz"},
},
images: []dockertypes.Image{
{
ID: "busybox",
},
{
ID: "kubelet",
},
},
history: map[string][]dockertypes.ImageHistory{
"busybox": {
{
ID: "0123456",
CreatedBy: "foo",
Size: 100,
},
{
ID: "<missing>",
CreatedBy: "baz",
Size: 300,
},
},
"kubelet": {
{
ID: "1123456",
CreatedBy: "foo",
Size: 200,
},
{
ID: "<missing>",
CreatedBy: "1baz",
Size: 400,
},
},
},
expectedCalls: []string{"list_images", "image_history"},
expectedLayers: map[string]*dockertypes.ImageHistory{
"0123456": {
ID: "0123456",
CreatedBy: "foo",
Size: 100,
},
"1123456": {
ID: "1123456",
CreatedBy: "foo",
Size: 200,
},
"<missing>baz": {
ID: "<missing>",
CreatedBy: "baz",
Size: 300,
},
"<missing>1baz": {
ID: "<missing>",
CreatedBy: "1baz",
Size: 400,
},
},
expectedImageToLayerIDs: map[string][]string{
"busybox": {"0123456", "<missing>baz"},
"kubelet": {"1123456", "<missing>1baz"},
},
expectedTotalStorageSize: 1000,
},
{
// Unused cache value
oldLayers: map[string]*dockertypes.ImageHistory{
"0123456": {
ID: "0123456",
CreatedBy: "foo",
Size: 100,
},
"<missing>baz": {
ID: "<missing>",
CreatedBy: "baz",
Size: 300,
},
},
oldImageToLayerIDs: map[string][]string{
"busybox": {"0123456", "<missing>baz"},
},
images: []dockertypes.Image{
{
ID: "kubelet",
},
},
history: map[string][]dockertypes.ImageHistory{
"kubelet": {
{
ID: "1123456",
CreatedBy: "foo",
Size: 200,
},
{
ID: "<missing>",
CreatedBy: "1baz",
Size: 400,
},
},
},
expectedCalls: []string{"list_images", "image_history"},
expectedLayers: map[string]*dockertypes.ImageHistory{
"1123456": {
ID: "1123456",
CreatedBy: "foo",
Size: 200,
},
"<missing>1baz": {
ID: "<missing>",
CreatedBy: "1baz",
Size: 400,
},
},
expectedImageToLayerIDs: map[string][]string{
"kubelet": {"1123456", "<missing>1baz"},
},
expectedTotalStorageSize: 600,
},
} {
fakeDockerClient := NewFakeDockerClient().WithVersion("1.2.3", "1.2")
fakeDockerClient.InjectImages(test.images)
fakeDockerClient.InjectImageHistory(test.history)
isp := newImageStatsProvider(fakeDockerClient)
isp.layers = test.oldLayers
isp.imageToLayerIDs = test.oldImageToLayerIDs
st, err := isp.ImageStats()
as := assert.New(t)
as.NoError(err)
as.NoError(fakeDockerClient.AssertCalls(test.expectedCalls))
as.Equal(test.expectedLayers, isp.layers, "expected %+v, got %+v", test.expectedLayers, isp.layers)
as.Equal(test.expectedImageToLayerIDs, isp.imageToLayerIDs, "expected %+v, got %+v", test.expectedImageToLayerIDs, isp.imageToLayerIDs)
as.Equal(test.expectedTotalStorageSize, st.TotalStorageBytes, "expected %d, got %d", test.expectedTotalStorageSize, st.TotalStorageBytes)
}
}

View File

@ -1,249 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"encoding/json"
"strconv"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/runtime"
kubetypes "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/custommetrics"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
)
// This file contains all docker label related constants and functions, including:
// * label setters and getters
// * label filters (maybe in the future)
const (
kubernetesPodDeletionGracePeriodLabel = "io.kubernetes.pod.deletionGracePeriod"
kubernetesPodTerminationGracePeriodLabel = "io.kubernetes.pod.terminationGracePeriod"
kubernetesContainerHashLabel = "io.kubernetes.container.hash"
kubernetesContainerRestartCountLabel = "io.kubernetes.container.restartCount"
kubernetesContainerTerminationMessagePathLabel = "io.kubernetes.container.terminationMessagePath"
kubernetesContainerTerminationMessagePolicyLabel = "io.kubernetes.container.terminationMessagePolicy"
kubernetesContainerPreStopHandlerLabel = "io.kubernetes.container.preStopHandler"
kubernetesContainerPortsLabel = "io.kubernetes.container.ports" // Added in 1.4
// TODO(random-liu): Keep this for old containers, remove this when we drop support for v1.1.
kubernetesPodLabel = "io.kubernetes.pod.data"
cadvisorPrometheusMetricsLabel = "io.cadvisor.metric.prometheus"
)
// Container information which has been labelled on each docker container
// TODO(random-liu): The type of Hash should be compliance with kubelet container status.
type labelledContainerInfo struct {
PodName string
PodNamespace string
PodUID kubetypes.UID
PodDeletionGracePeriod *int64
PodTerminationGracePeriod *int64
Name string
Hash string
RestartCount int
TerminationMessagePath string
TerminationMessagePolicy v1.TerminationMessagePolicy
PreStopHandler *v1.Handler
Ports []v1.ContainerPort
}
func newLabels(container *v1.Container, pod *v1.Pod, restartCount int, enableCustomMetrics bool) map[string]string {
labels := map[string]string{}
labels[types.KubernetesPodNameLabel] = pod.Name
labels[types.KubernetesPodNamespaceLabel] = pod.Namespace
labels[types.KubernetesPodUIDLabel] = string(pod.UID)
if pod.DeletionGracePeriodSeconds != nil {
labels[kubernetesPodDeletionGracePeriodLabel] = strconv.FormatInt(*pod.DeletionGracePeriodSeconds, 10)
}
if pod.Spec.TerminationGracePeriodSeconds != nil {
labels[kubernetesPodTerminationGracePeriodLabel] = strconv.FormatInt(*pod.Spec.TerminationGracePeriodSeconds, 10)
}
labels[types.KubernetesContainerNameLabel] = container.Name
labels[kubernetesContainerHashLabel] = strconv.FormatUint(kubecontainer.HashContainerLegacy(container), 16)
labels[kubernetesContainerRestartCountLabel] = strconv.Itoa(restartCount)
labels[kubernetesContainerTerminationMessagePathLabel] = container.TerminationMessagePath
labels[kubernetesContainerTerminationMessagePolicyLabel] = string(container.TerminationMessagePolicy)
if container.Lifecycle != nil && container.Lifecycle.PreStop != nil {
// Using json enconding so that the PreStop handler object is readable after writing as a label
rawPreStop, err := json.Marshal(container.Lifecycle.PreStop)
if err != nil {
glog.Errorf("Unable to marshal lifecycle PreStop handler for container %q of pod %q: %v", container.Name, format.Pod(pod), err)
} else {
labels[kubernetesContainerPreStopHandlerLabel] = string(rawPreStop)
}
}
if len(container.Ports) > 0 {
rawContainerPorts, err := json.Marshal(container.Ports)
if err != nil {
glog.Errorf("Unable to marshal container ports for container %q for pod %q: %v", container.Name, format.Pod(pod), err)
} else {
labels[kubernetesContainerPortsLabel] = string(rawContainerPorts)
}
}
if enableCustomMetrics {
path, err := custommetrics.GetCAdvisorCustomMetricsDefinitionPath(container)
if path != nil && err == nil {
labels[cadvisorPrometheusMetricsLabel] = *path
}
}
return labels
}
func getContainerInfoFromLabel(labels map[string]string) *labelledContainerInfo {
var err error
containerInfo := &labelledContainerInfo{
PodName: getStringValueFromLabel(labels, types.KubernetesPodNameLabel),
PodNamespace: getStringValueFromLabel(labels, types.KubernetesPodNamespaceLabel),
PodUID: kubetypes.UID(getStringValueFromLabel(labels, types.KubernetesPodUIDLabel)),
Name: getStringValueFromLabel(labels, types.KubernetesContainerNameLabel),
Hash: getStringValueFromLabel(labels, kubernetesContainerHashLabel),
TerminationMessagePath: getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePathLabel),
TerminationMessagePolicy: v1.TerminationMessagePolicy(getStringValueFromLabel(labels, kubernetesContainerTerminationMessagePolicyLabel)),
}
if containerInfo.RestartCount, err = getIntValueFromLabel(labels, kubernetesContainerRestartCountLabel); err != nil {
logError(containerInfo, kubernetesContainerRestartCountLabel, err)
}
if containerInfo.PodDeletionGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodDeletionGracePeriodLabel); err != nil {
logError(containerInfo, kubernetesPodDeletionGracePeriodLabel, err)
}
if containerInfo.PodTerminationGracePeriod, err = getInt64PointerFromLabel(labels, kubernetesPodTerminationGracePeriodLabel); err != nil {
logError(containerInfo, kubernetesPodTerminationGracePeriodLabel, err)
}
preStopHandler := &v1.Handler{}
if found, err := getJsonObjectFromLabel(labels, kubernetesContainerPreStopHandlerLabel, preStopHandler); err != nil {
logError(containerInfo, kubernetesContainerPreStopHandlerLabel, err)
} else if found {
containerInfo.PreStopHandler = preStopHandler
}
containerPorts := []v1.ContainerPort{}
if found, err := getJsonObjectFromLabel(labels, kubernetesContainerPortsLabel, &containerPorts); err != nil {
logError(containerInfo, kubernetesContainerPortsLabel, err)
} else if found {
containerInfo.Ports = containerPorts
}
supplyContainerInfoWithOldLabel(labels, containerInfo)
return containerInfo
}
func getStringValueFromLabel(labels map[string]string, label string) string {
if value, found := labels[label]; found {
return value
}
// Do not report error, because there should be many old containers without label now.
glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
// Return empty string "" for these containers, the caller will get value by other ways.
return ""
}
func getIntValueFromLabel(labels map[string]string, label string) (int, error) {
if strValue, found := labels[label]; found {
intValue, err := strconv.Atoi(strValue)
if err != nil {
// This really should not happen. Just set value to 0 to handle this abnormal case
return 0, err
}
return intValue, nil
}
// Do not report error, because there should be many old containers without label now.
glog.V(3).Infof("Container doesn't have label %s, it may be an old or invalid container", label)
// Just set the value to 0
return 0, nil
}
func getInt64PointerFromLabel(labels map[string]string, label string) (*int64, error) {
if strValue, found := labels[label]; found {
int64Value, err := strconv.ParseInt(strValue, 10, 64)
if err != nil {
return nil, err
}
return &int64Value, nil
}
// Because it's normal that a container has no PodDeletionGracePeriod and PodTerminationGracePeriod label,
// don't report any error here.
return nil, nil
}
// getJsonObjectFromLabel returns a bool value indicating whether an object is found
func getJsonObjectFromLabel(labels map[string]string, label string, value interface{}) (bool, error) {
if strValue, found := labels[label]; found {
err := json.Unmarshal([]byte(strValue), value)
return found, err
}
// Because it's normal that a container has no PreStopHandler label, don't report any error here.
return false, nil
}
// The label kubernetesPodLabel is added a long time ago (#7421), it serialized the whole v1.Pod to a docker label.
// We want to remove this label because it serialized too much useless information. However kubelet may still work
// with old containers which only have this label for a long time until we completely deprecate the old label.
// Before that to ensure correctness we have to supply information with the old labels when newly added labels
// are not available.
// TODO(random-liu): Remove this function when we can completely remove label kubernetesPodLabel, probably after
// dropping support for v1.1.
func supplyContainerInfoWithOldLabel(labels map[string]string, containerInfo *labelledContainerInfo) {
// Get v1.Pod from old label
var pod *v1.Pod
data, found := labels[kubernetesPodLabel]
if !found {
// Don't report any error here, because it's normal that a container has no pod label, especially
// when we gradually deprecate the old label
return
}
pod = &v1.Pod{}
if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), []byte(data), pod); err != nil {
// If the pod label can't be parsed, we should report an error
logError(containerInfo, kubernetesPodLabel, err)
return
}
if containerInfo.PodDeletionGracePeriod == nil {
containerInfo.PodDeletionGracePeriod = pod.DeletionGracePeriodSeconds
}
if containerInfo.PodTerminationGracePeriod == nil {
containerInfo.PodTerminationGracePeriod = pod.Spec.TerminationGracePeriodSeconds
}
// Get v1.Container from v1.Pod
var container *v1.Container
for i := range pod.Spec.Containers {
if pod.Spec.Containers[i].Name == containerInfo.Name {
container = &pod.Spec.Containers[i]
break
}
}
if container == nil {
glog.Errorf("Unable to find container %q in pod %q", containerInfo.Name, format.Pod(pod))
return
}
if containerInfo.PreStopHandler == nil && container.Lifecycle != nil {
containerInfo.PreStopHandler = container.Lifecycle.PreStop
}
}
func logError(containerInfo *labelledContainerInfo, label string, err error) {
glog.Errorf("Unable to get %q for container %q of pod %q: %v", label, containerInfo.Name,
kubecontainer.BuildPodFullName(containerInfo.PodName, containerInfo.PodNamespace), err)
}

View File

@ -1,142 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockertools
import (
"reflect"
"strconv"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/util/format"
)
func TestLabels(t *testing.T) {
restartCount := 5
deletionGracePeriod := int64(10)
terminationGracePeriod := int64(10)
lifecycle := &v1.Lifecycle{
// Left PostStart as nil
PreStop: &v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"action1", "action2"},
},
HTTPGet: &v1.HTTPGetAction{
Path: "path",
Host: "host",
Port: intstr.FromInt(8080),
Scheme: "scheme",
},
TCPSocket: &v1.TCPSocketAction{
Port: intstr.FromString("80"),
},
},
}
containerPorts := []v1.ContainerPort{
{
Name: "http",
HostPort: 80,
ContainerPort: 8080,
Protocol: v1.ProtocolTCP,
},
{
Name: "https",
HostPort: 443,
ContainerPort: 6443,
Protocol: v1.ProtocolTCP,
},
}
container := &v1.Container{
Name: "test_container",
Ports: containerPorts,
TerminationMessagePath: "/somepath",
Lifecycle: lifecycle,
}
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "test_pod",
Namespace: "test_pod_namespace",
UID: "test_pod_uid",
DeletionGracePeriodSeconds: &deletionGracePeriod,
},
Spec: v1.PodSpec{
Containers: []v1.Container{*container},
TerminationGracePeriodSeconds: &terminationGracePeriod,
},
}
expected := &labelledContainerInfo{
PodName: pod.Name,
PodNamespace: pod.Namespace,
PodUID: pod.UID,
PodDeletionGracePeriod: pod.DeletionGracePeriodSeconds,
PodTerminationGracePeriod: pod.Spec.TerminationGracePeriodSeconds,
Name: container.Name,
Hash: strconv.FormatUint(kubecontainer.HashContainerLegacy(container), 16),
RestartCount: restartCount,
TerminationMessagePath: container.TerminationMessagePath,
PreStopHandler: container.Lifecycle.PreStop,
Ports: containerPorts,
}
// Test whether we can get right information from label
labels := newLabels(container, pod, restartCount, false)
containerInfo := getContainerInfoFromLabel(labels)
if !reflect.DeepEqual(containerInfo, expected) {
t.Errorf("expected %v, got %v", expected, containerInfo)
}
// Test when DeletionGracePeriodSeconds, TerminationGracePeriodSeconds and Lifecycle are nil,
// the information got from label should also be nil
container.Lifecycle = nil
pod.DeletionGracePeriodSeconds = nil
pod.Spec.TerminationGracePeriodSeconds = nil
expected.PodDeletionGracePeriod = nil
expected.PodTerminationGracePeriod = nil
expected.PreStopHandler = nil
// Because container is changed, the Hash should be updated
expected.Hash = strconv.FormatUint(kubecontainer.HashContainerLegacy(container), 16)
labels = newLabels(container, pod, restartCount, false)
containerInfo = getContainerInfoFromLabel(labels)
if !reflect.DeepEqual(containerInfo, expected) {
t.Errorf("expected %v, got %v", expected, containerInfo)
}
// Test when DeletionGracePeriodSeconds, TerminationGracePeriodSeconds and Lifecycle are nil,
// but the old label kubernetesPodLabel is set, the information got from label should also be set
pod.DeletionGracePeriodSeconds = &deletionGracePeriod
pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriod
container.Lifecycle = lifecycle
data, err := runtime.Encode(testapi.Default.Codec(), pod)
if err != nil {
t.Fatalf("Failed to encode pod %q into string: %v", format.Pod(pod), err)
}
labels[kubernetesPodLabel] = string(data)
expected.PodDeletionGracePeriod = pod.DeletionGracePeriodSeconds
expected.PodTerminationGracePeriod = pod.Spec.TerminationGracePeriodSeconds
expected.PreStopHandler = container.Lifecycle.PreStop
// Do not update expected.Hash here, because we directly use the labels in last test, so we never
// changed the kubernetesContainerHashLabel in this test, the expected.Hash shouldn't be changed.
containerInfo = getContainerInfoFromLabel(labels)
if !reflect.DeepEqual(containerInfo, expected) {
t.Errorf("expected %v, got %v", expected, containerInfo)
}
}

View File

@ -101,7 +101,6 @@ import (
"k8s.io/kubernetes/pkg/util/mount"
nodeutil "k8s.io/kubernetes/pkg/util/node"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/util/procfs"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
)
@ -504,7 +503,6 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
return nil, err
}
procFs := procfs.NewProcFS()
imageBackOff := flowcontrol.NewBackOff(backOffPeriod, MaxContainerBackOff)
klet.livenessManager = proberesults.NewManager()
@ -541,7 +539,12 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
pluginSettings.LegacyRuntimeHost = nl
// rktnetes cannot be run with CRI.
if kubeCfg.ContainerRuntime != "rkt" && kubeCfg.EnableCRI {
// TODO(yujuhong): Remove the EnableCRI field.
if kubeCfg.ContainerRuntime != "rkt" {
kubeCfg.EnableCRI = true
}
if kubeCfg.EnableCRI {
// kubelet defers to the runtime shim to setup networking. Setting
// this to nil will prevent it from trying to invoke the plugin.
// It's easier to always probe and initialize plugins till cri
@ -621,76 +624,36 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
klet.containerRuntime = runtime
klet.runner = runtime
} else {
switch kubeCfg.ContainerRuntime {
case "docker":
runtime := dockertools.NewDockerManager(
kubeDeps.DockerClient,
kubecontainer.FilterEventRecorder(kubeDeps.Recorder),
klet.livenessManager,
containerRefManager,
klet.podManager,
machineInfo,
kubeCfg.PodInfraContainerImage,
float32(kubeCfg.RegistryPullQPS),
int(kubeCfg.RegistryBurst),
ContainerLogsDir,
kubeDeps.OSInterface,
klet.networkPlugin,
klet,
klet.httpClient,
dockerExecHandler,
kubeDeps.OOMAdjuster,
procFs,
klet.cpuCFSQuota,
imageBackOff,
kubeCfg.SerializeImagePulls,
kubeCfg.EnableCustomMetrics,
// If using "kubenet", the Kubernetes network plugin that wraps
// CNI's bridge plugin, it knows how to set the hairpin veth flag
// so we tell the container runtime to back away from setting it.
// If the kubelet is started with any other plugin we can't be
// sure it handles the hairpin case so we instruct the docker
// runtime to set the flag instead.
klet.hairpinMode == componentconfig.HairpinVeth && kubeCfg.NetworkPluginName != "kubenet",
kubeCfg.SeccompProfileRoot,
kubeDeps.ContainerRuntimeOptions...,
)
klet.containerRuntime = runtime
klet.runner = kubecontainer.DirectStreamingRunner(runtime)
case "rkt":
// TODO: Include hairpin mode settings in rkt?
conf := &rkt.Config{
Path: kubeCfg.RktPath,
Stage1Image: kubeCfg.RktStage1Image,
InsecureOptions: "image,ondisk",
}
runtime, err := rkt.New(
kubeCfg.RktAPIEndpoint,
conf,
klet,
kubeDeps.Recorder,
containerRefManager,
klet.podManager,
klet.livenessManager,
klet.httpClient,
klet.networkPlugin,
klet.hairpinMode == componentconfig.HairpinVeth,
utilexec.New(),
kubecontainer.RealOS{},
imageBackOff,
kubeCfg.SerializeImagePulls,
float32(kubeCfg.RegistryPullQPS),
int(kubeCfg.RegistryBurst),
kubeCfg.RuntimeRequestTimeout.Duration,
)
if err != nil {
return nil, err
}
klet.containerRuntime = runtime
klet.runner = kubecontainer.DirectStreamingRunner(runtime)
default:
return nil, fmt.Errorf("unsupported container runtime %q specified", kubeCfg.ContainerRuntime)
// TODO: Include hairpin mode settings in rkt?
conf := &rkt.Config{
Path: kubeCfg.RktPath,
Stage1Image: kubeCfg.RktStage1Image,
InsecureOptions: "image,ondisk",
}
runtime, err := rkt.New(
kubeCfg.RktAPIEndpoint,
conf,
klet,
kubeDeps.Recorder,
containerRefManager,
klet.podManager,
klet.livenessManager,
klet.httpClient,
klet.networkPlugin,
klet.hairpinMode == componentconfig.HairpinVeth,
utilexec.New(),
kubecontainer.RealOS{},
imageBackOff,
kubeCfg.SerializeImagePulls,
float32(kubeCfg.RegistryPullQPS),
int(kubeCfg.RegistryBurst),
kubeCfg.RuntimeRequestTimeout.Duration,
)
if err != nil {
return nil, err
}
klet.containerRuntime = runtime
klet.runner = kubecontainer.DirectStreamingRunner(runtime)
}
// TODO: Factor out "StatsProvider" from Kubelet so we don't have a cyclic dependency