Clean up dockershim in tests

Signed-off-by: Ciprian Hacman <ciprian@hakman.dev>
This commit is contained in:
Ciprian Hacman 2021-12-22 09:42:15 +02:00
parent c75d254beb
commit a0abe5aa33
24 changed files with 99 additions and 1006 deletions

View File

@ -1705,32 +1705,29 @@
- testname: Container Runtime, TerminationMessage, from log output of succeeding container - testname: Container Runtime, TerminationMessage, from log output of succeeding container
codename: '[sig-node] Container Runtime blackbox test on terminated container should codename: '[sig-node] Container Runtime blackbox test on terminated container should
report termination message as empty when pod succeeds and TerminationMessagePolicy report termination message as empty when pod succeeds and TerminationMessagePolicy
FallbackToLogsOnError is set [Excluded:WindowsDocker] [NodeConformance] [Conformance]' FallbackToLogsOnError is set [NodeConformance] [Conformance]'
description: 'Create a pod with an container. Container''s output is recorded in description: Create a pod with an container. Container's output is recorded in log
log and container exits successfully without an error. When container is terminated, and container exits successfully without an error. When container is terminated,
terminationMessage MUST have no content as container succeed. [Excluded:WindowsDocker]: terminationMessage MUST have no content as container succeed.
Cannot mount files in Windows Containers created by Docker.'
release: v1.15 release: v1.15
file: test/e2e/common/node/runtime.go file: test/e2e/common/node/runtime.go
- testname: Container Runtime, TerminationMessage, from file of succeeding container - testname: Container Runtime, TerminationMessage, from file of succeeding container
codename: '[sig-node] Container Runtime blackbox test on terminated container should codename: '[sig-node] Container Runtime blackbox test on terminated container should
report termination message from file when pod succeeds and TerminationMessagePolicy report termination message from file when pod succeeds and TerminationMessagePolicy
FallbackToLogsOnError is set [Excluded:WindowsDocker] [NodeConformance] [Conformance]' FallbackToLogsOnError is set [NodeConformance] [Conformance]'
description: 'Create a pod with an container. Container''s output is recorded in description: Create a pod with an container. Container's output is recorded in a
a file and the container exits successfully without an error. When container is file and the container exits successfully without an error. When container is
terminated, terminationMessage MUST match with the content from file. [Excluded:WindowsDocker]: terminated, terminationMessage MUST match with the content from file.
Cannot mount files in Windows Containers created by Docker.'
release: v1.15 release: v1.15
file: test/e2e/common/node/runtime.go file: test/e2e/common/node/runtime.go
- testname: Container Runtime, TerminationMessage, from container's log output of - testname: Container Runtime, TerminationMessage, from container's log output of
failing container failing container
codename: '[sig-node] Container Runtime blackbox test on terminated container should codename: '[sig-node] Container Runtime blackbox test on terminated container should
report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError
is set [Excluded:WindowsDocker] [NodeConformance] [Conformance]' is set [NodeConformance] [Conformance]'
description: 'Create a pod with an container. Container''s output is recorded in description: Create a pod with an container. Container's output is recorded in log
log and container exits with an error. When container is terminated, termination and container exits with an error. When container is terminated, termination message
message MUST match the expected output recorded from container''s log. [Excluded:WindowsDocker]: MUST match the expected output recorded from container's log.
Cannot mount files in Windows Containers created by Docker.'
release: v1.15 release: v1.15
file: test/e2e/common/node/runtime.go file: test/e2e/common/node/runtime.go
- testname: Container Runtime, Restart Policy, Pod Phases - testname: Container Runtime, Restart Policy, Pod Phases
@ -1743,37 +1740,37 @@
accordingly. accordingly.
release: v1.13 release: v1.13
file: test/e2e/common/node/runtime.go file: test/e2e/common/node/runtime.go
- testname: Docker containers, with arguments - testname: Containers, with arguments
codename: '[sig-node] Docker Containers should be able to override the image''s codename: '[sig-node] Containers should be able to override the image''s default
default arguments (docker cmd) [NodeConformance] [Conformance]' arguments (container cmd) [NodeConformance] [Conformance]'
description: Default command and from the docker image entrypoint MUST be used description: Default command and from the container image entrypoint MUST be used
when Pod does not specify the container command but the arguments from Pod spec when Pod does not specify the container command but the arguments from Pod spec
MUST override when specified. MUST override when specified.
release: v1.9 release: v1.9
file: test/e2e/common/node/docker_containers.go file: test/e2e/common/node/containers.go
- testname: Docker containers, with command - testname: Containers, with command
codename: '[sig-node] Docker Containers should be able to override the image''s codename: '[sig-node] Containers should be able to override the image''s default
default command (docker entrypoint) [NodeConformance] [Conformance]' command (container entrypoint) [NodeConformance] [Conformance]'
description: Default command from the docker image entrypoint MUST NOT be used when description: Default command from the container image entrypoint MUST NOT be used
Pod specifies the container command. Command from Pod spec MUST override the when Pod specifies the container command. Command from Pod spec MUST override
command in the image. the command in the image.
release: v1.9 release: v1.9
file: test/e2e/common/node/docker_containers.go file: test/e2e/common/node/containers.go
- testname: Docker containers, with command and arguments - testname: Containers, with command and arguments
codename: '[sig-node] Docker Containers should be able to override the image''s codename: '[sig-node] Containers should be able to override the image''s default
default command and arguments [NodeConformance] [Conformance]' command and arguments [NodeConformance] [Conformance]'
description: Default command and arguments from the docker image entrypoint MUST description: Default command and arguments from the container image entrypoint MUST
NOT be used when Pod specifies the container command and arguments. Command and NOT be used when Pod specifies the container command and arguments. Command and
arguments from Pod spec MUST override the command and arguments in the image. arguments from Pod spec MUST override the command and arguments in the image.
release: v1.9 release: v1.9
file: test/e2e/common/node/docker_containers.go file: test/e2e/common/node/containers.go
- testname: Docker containers, without command and arguments - testname: Containers, without command and arguments
codename: '[sig-node] Docker Containers should use the image defaults if command codename: '[sig-node] Containers should use the image defaults if command and args
and args are blank [NodeConformance] [Conformance]' are blank [NodeConformance] [Conformance]'
description: Default command and arguments from the docker image entrypoint MUST description: Default command and arguments from the container image entrypoint MUST
be used when Pod does not specify the container command be used when Pod does not specify the container command
release: v1.9 release: v1.9
file: test/e2e/common/node/docker_containers.go file: test/e2e/common/node/containers.go
- testname: DownwardAPI, environment for CPU and memory limits and requests - testname: DownwardAPI, environment for CPU and memory limits and requests
codename: '[sig-node] Downward API should provide container''s limits.cpu/memory codename: '[sig-node] Downward API should provide container''s limits.cpu/memory
and requests.cpu/memory as env vars [NodeConformance] [Conformance]' and requests.cpu/memory as env vars [NodeConformance] [Conformance]'
@ -3064,46 +3061,38 @@
file: test/e2e/common/storage/secrets_volume.go file: test/e2e/common/storage/secrets_volume.go
- testname: 'SubPath: Reading content from a configmap volume.' - testname: 'SubPath: Reading content from a configmap volume.'
codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with
configmap pod [Excluded:WindowsDocker] [Conformance]' configmap pod [Conformance]'
description: Containers in a pod can read content from a configmap mounted volume description: Containers in a pod can read content from a configmap mounted volume
which was configured with a subpath. This test is marked [Excluded:WindowsDocker] which was configured with a subpath.
since Docker does not support creating individual file mounts for containers on
Windows.
release: v1.12 release: v1.12
file: test/e2e/storage/subpath.go file: test/e2e/storage/subpath.go
- testname: 'SubPath: Reading content from a configmap volume.' - testname: 'SubPath: Reading content from a configmap volume.'
codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with
configmap pod with mountPath of existing file [Excluded:WindowsDocker] [Conformance]' configmap pod with mountPath of existing file [Conformance]'
description: Containers in a pod can read content from a configmap mounted volume description: Containers in a pod can read content from a configmap mounted volume
which was configured with a subpath and also using a mountpath that is a specific which was configured with a subpath and also using a mountpath that is a specific
file. This test is marked [Excluded:WindowsDocker] since Docker does not support file.
creating individual file mounts for containers on Windows.
release: v1.12 release: v1.12
file: test/e2e/storage/subpath.go file: test/e2e/storage/subpath.go
- testname: 'SubPath: Reading content from a downwardAPI volume.' - testname: 'SubPath: Reading content from a downwardAPI volume.'
codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with
downward pod [Excluded:WindowsDocker] [Conformance]' downward pod [Conformance]'
description: Containers in a pod can read content from a downwardAPI mounted volume description: Containers in a pod can read content from a downwardAPI mounted volume
which was configured with a subpath. This test is marked [Excluded:WindowsDocker] which was configured with a subpath.
since Docker does not support creating individual file mounts for containers on
Windows.
release: v1.12 release: v1.12
file: test/e2e/storage/subpath.go file: test/e2e/storage/subpath.go
- testname: 'SubPath: Reading content from a projected volume.' - testname: 'SubPath: Reading content from a projected volume.'
codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with
projected pod [Excluded:WindowsDocker] [Conformance]' projected pod [Conformance]'
description: Containers in a pod can read content from a projected mounted volume description: Containers in a pod can read content from a projected mounted volume
which was configured with a subpath. This test is marked [Excluded:WindowsDocker] which was configured with a subpath.
since Docker does not support creating individual file mounts for containers on
Windows.
release: v1.12 release: v1.12
file: test/e2e/storage/subpath.go file: test/e2e/storage/subpath.go
- testname: 'SubPath: Reading content from a secret volume.' - testname: 'SubPath: Reading content from a secret volume.'
codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with codename: '[sig-storage] Subpath Atomic writer volumes should support subpaths with
secret pod [Excluded:WindowsDocker] [Conformance]' secret pod [Conformance]'
description: Containers in a pod can read content from a secret mounted volume which description: Containers in a pod can read content from a secret mounted volume which
was configured with a subpath. This test is marked [Excluded:WindowsDocker] since was configured with a subpath.
Docker does not support creating individual file mounts for containers on Windows.
release: v1.12 release: v1.12
file: test/e2e/storage/subpath.go file: test/e2e/storage/subpath.go

View File

@ -168,15 +168,6 @@ func testPrivilegedPods(tester func(pod *v1.Pod)) {
tester(hostipc) tester(hostipc)
}) })
if isAppArmorSupported() && framework.TestContext.ContainerRuntime == "docker" {
ginkgo.By("Running a custom AppArmor profile pod", func() {
aa := restrictedPod("apparmor")
// Every node is expected to have the docker-default profile.
aa.Annotations[v1.AppArmorBetaContainerAnnotationKeyPrefix+"pause"] = "localhost/docker-default"
tester(aa)
})
}
ginkgo.By("Running an unconfined Seccomp pod", func() { ginkgo.By("Running an unconfined Seccomp pod", func() {
unconfined := restrictedPod("seccomp") unconfined := restrictedPod("seccomp")
unconfined.Annotations[v1.SeccompPodAnnotationKey] = "unconfined" unconfined.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
@ -372,8 +363,3 @@ func restrictedPSP(name string) *policyv1beta1.PodSecurityPolicy {
func boolPtr(b bool) *bool { func boolPtr(b bool) *bool {
return &b return &b
} }
// isAppArmorSupported checks whether the AppArmor is supported by the node OS distro.
func isAppArmorSupported() bool {
return framework.NodeOSDistroIs(e2eskipper.AppArmorDistros...)
}

View File

@ -25,13 +25,13 @@ import (
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
) )
var _ = SIGDescribe("Docker Containers", func() { var _ = SIGDescribe("Containers", func() {
f := framework.NewDefaultFramework("containers") f := framework.NewDefaultFramework("containers")
/* /*
Release: v1.9 Release: v1.9
Testname: Docker containers, without command and arguments Testname: Containers, without command and arguments
Description: Default command and arguments from the docker image entrypoint MUST be used when Pod does not specify the container command Description: Default command and arguments from the container image entrypoint MUST be used when Pod does not specify the container command
*/ */
framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() { framework.ConformanceIt("should use the image defaults if command and args are blank [NodeConformance]", func() {
pod := entrypointTestPod(f.Namespace.Name) pod := entrypointTestPod(f.Namespace.Name)
@ -50,24 +50,24 @@ var _ = SIGDescribe("Docker Containers", func() {
/* /*
Release: v1.9 Release: v1.9
Testname: Docker containers, with arguments Testname: Containers, with arguments
Description: Default command and from the docker image entrypoint MUST be used when Pod does not specify the container command but the arguments from Pod spec MUST override when specified. Description: Default command and from the container image entrypoint MUST be used when Pod does not specify the container command but the arguments from Pod spec MUST override when specified.
*/ */
framework.ConformanceIt("should be able to override the image's default arguments (docker cmd) [NodeConformance]", func() { framework.ConformanceIt("should be able to override the image's default arguments (container cmd) [NodeConformance]", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments") pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments")
f.TestContainerOutput("override arguments", pod, 0, []string{ f.TestContainerOutput("override arguments", pod, 0, []string{
"[/agnhost entrypoint-tester override arguments]", "[/agnhost entrypoint-tester override arguments]",
}) })
}) })
// Note: when you override the entrypoint, the image's arguments (docker cmd) // Note: when you override the entrypoint, the image's arguments (container cmd)
// are ignored. // are ignored.
/* /*
Release: v1.9 Release: v1.9
Testname: Docker containers, with command Testname: Containers, with command
Description: Default command from the docker image entrypoint MUST NOT be used when Pod specifies the container command. Command from Pod spec MUST override the command in the image. Description: Default command from the container image entrypoint MUST NOT be used when Pod specifies the container command. Command from Pod spec MUST override the command in the image.
*/ */
framework.ConformanceIt("should be able to override the image's default command (docker entrypoint) [NodeConformance]", func() { framework.ConformanceIt("should be able to override the image's default command (container entrypoint) [NodeConformance]", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester") pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester")
pod.Spec.Containers[0].Command = []string{"/agnhost-2"} pod.Spec.Containers[0].Command = []string{"/agnhost-2"}
@ -78,8 +78,8 @@ var _ = SIGDescribe("Docker Containers", func() {
/* /*
Release: v1.9 Release: v1.9
Testname: Docker containers, with command and arguments Testname: Containers, with command and arguments
Description: Default command and arguments from the docker image entrypoint MUST NOT be used when Pod specifies the container command and arguments. Command and arguments from Pod spec MUST override the command and arguments in the image. Description: Default command and arguments from the container image entrypoint MUST NOT be used when Pod specifies the container command and arguments. Command and arguments from Pod spec MUST override the command and arguments in the image.
*/ */
framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func() { framework.ConformanceIt("should be able to override the image's default command and arguments [NodeConformance]", func() {
pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments") pod := entrypointTestPod(f.Namespace.Name, "entrypoint-tester", "override", "arguments")

View File

@ -168,10 +168,7 @@ while true; do sleep 1; done
gomega.Expect(c.Delete()).To(gomega.Succeed()) gomega.Expect(c.Delete()).To(gomega.Succeed())
} }
ginkgo.It("should report termination message if TerminationMessagePath is set [Excluded:WindowsDocker] [NodeConformance]", func() { ginkgo.It("should report termination message if TerminationMessagePath is set [NodeConformance]", func() {
// Cannot mount files in Windows Containers created by Docker.
// TODO(claudiub): Remove [Excluded:WindowsDocker] tag if Containerd becomes the only
// container runtime on Windows.
container := v1.Container{ container := v1.Container{
Image: framework.BusyBoxImage, Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"}, Command: []string{"/bin/sh", "-c"},
@ -215,9 +212,8 @@ while true; do sleep 1; done
Release: v1.15 Release: v1.15
Testname: Container Runtime, TerminationMessage, from container's log output of failing container Testname: Container Runtime, TerminationMessage, from container's log output of failing container
Description: Create a pod with an container. Container's output is recorded in log and container exits with an error. When container is terminated, termination message MUST match the expected output recorded from container's log. Description: Create a pod with an container. Container's output is recorded in log and container exits with an error. When container is terminated, termination message MUST match the expected output recorded from container's log.
[Excluded:WindowsDocker]: Cannot mount files in Windows Containers created by Docker.
*/ */
framework.ConformanceIt("should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [Excluded:WindowsDocker] [NodeConformance]", func() { framework.ConformanceIt("should report termination message from log output if TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func() {
container := v1.Container{ container := v1.Container{
Image: framework.BusyBoxImage, Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"}, Command: []string{"/bin/sh", "-c"},
@ -232,9 +228,8 @@ while true; do sleep 1; done
Release: v1.15 Release: v1.15
Testname: Container Runtime, TerminationMessage, from log output of succeeding container Testname: Container Runtime, TerminationMessage, from log output of succeeding container
Description: Create a pod with an container. Container's output is recorded in log and container exits successfully without an error. When container is terminated, terminationMessage MUST have no content as container succeed. Description: Create a pod with an container. Container's output is recorded in log and container exits successfully without an error. When container is terminated, terminationMessage MUST have no content as container succeed.
[Excluded:WindowsDocker]: Cannot mount files in Windows Containers created by Docker.
*/ */
framework.ConformanceIt("should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [Excluded:WindowsDocker] [NodeConformance]", func() { framework.ConformanceIt("should report termination message as empty when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func() {
container := v1.Container{ container := v1.Container{
Image: framework.BusyBoxImage, Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"}, Command: []string{"/bin/sh", "-c"},
@ -249,9 +244,8 @@ while true; do sleep 1; done
Release: v1.15 Release: v1.15
Testname: Container Runtime, TerminationMessage, from file of succeeding container Testname: Container Runtime, TerminationMessage, from file of succeeding container
Description: Create a pod with an container. Container's output is recorded in a file and the container exits successfully without an error. When container is terminated, terminationMessage MUST match with the content from file. Description: Create a pod with an container. Container's output is recorded in a file and the container exits successfully without an error. When container is terminated, terminationMessage MUST match with the content from file.
[Excluded:WindowsDocker]: Cannot mount files in Windows Containers created by Docker.
*/ */
framework.ConformanceIt("should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [Excluded:WindowsDocker] [NodeConformance]", func() { framework.ConformanceIt("should report termination message from file when pod succeeds and TerminationMessagePolicy FallbackToLogsOnError is set [NodeConformance]", func() {
container := v1.Container{ container := v1.Container{
Image: framework.BusyBoxImage, Image: framework.BusyBoxImage,
Command: []string{"/bin/sh", "-c"}, Command: []string{"/bin/sh", "-c"},

View File

@ -76,10 +76,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
// see https://github.com/kubernetes/kubernetes/blob/eb729620c522753bc7ae61fc2c7b7ea19d4aad2f/cluster/gce/gci/configure-helper.sh#L3069-L3076 // see https://github.com/kubernetes/kubernetes/blob/eb729620c522753bc7ae61fc2c7b7ea19d4aad2f/cluster/gce/gci/configure-helper.sh#L3069-L3076
e2eskipper.SkipUnlessProviderIs("gce") e2eskipper.SkipUnlessProviderIs("gce")
// The built-in docker runtime does not support configuring runtime handlers. rcName := createRuntimeClass(f, "preconfigured-handler", e2enode.PreconfiguredRuntimeClassHandler)
handler := e2enode.PreconfiguredRuntimeClassHandler(framework.TestContext.ContainerRuntime)
rcName := createRuntimeClass(f, "preconfigured-handler", handler)
defer deleteRuntimeClass(f, rcName) defer deleteRuntimeClass(f, rcName)
pod := f.PodClient().Create(e2enode.NewRuntimeClassPod(rcName)) pod := f.PodClient().Create(e2enode.NewRuntimeClassPod(rcName))
expectPodSuccess(f, pod) expectPodSuccess(f, pod)

View File

@ -25,16 +25,11 @@ import (
utilpointer "k8s.io/utils/pointer" utilpointer "k8s.io/utils/pointer"
) )
// PreconfiguredRuntimeClassHandler returns configured runtime handler. const (
func PreconfiguredRuntimeClassHandler(handler string) string { // PreconfiguredRuntimeClassHandler is the name of the runtime handler
if handler == "docker" { // that is expected to be preconfigured in the test environment.
return handler PreconfiguredRuntimeClassHandler = "test-handler"
} )
// test-handler is the name of the runtime handler that is expected to be
// preconfigured in the test environment.
return "test-handler"
}
// NewRuntimeClassPod returns a test pod with the given runtimeClassName // NewRuntimeClassPod returns a test pod with the given runtimeClassName
func NewRuntimeClassPod(runtimeClassName string) *v1.Pod { func NewRuntimeClassPod(runtimeClassName string) *v1.Pod {

View File

@ -272,16 +272,6 @@ func SkipIfAppArmorNotSupported() {
SkipUnlessNodeOSDistroIs(AppArmorDistros...) SkipUnlessNodeOSDistroIs(AppArmorDistros...)
} }
// RunIfContainerRuntimeIs runs if the container runtime is included in the runtimes.
func RunIfContainerRuntimeIs(runtimes ...string) {
for _, containerRuntime := range runtimes {
if containerRuntime == framework.TestContext.ContainerRuntime {
return
}
}
skipInternalf(1, "Skipped because container runtime %q is not in %s", framework.TestContext.ContainerRuntime, runtimes)
}
// RunIfSystemSpecNameIs runs if the system spec name is included in the names. // RunIfSystemSpecNameIs runs if the system spec name is included in the names.
func RunIfSystemSpecNameIs(names ...string) { func RunIfSystemSpecNameIs(names ...string) {
for _, name := range names { for _, name := range names {

View File

@ -308,7 +308,7 @@ func RegisterCommonFlags(flags *flag.FlagSet) {
flags.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.") flags.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
flags.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.") flags.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
flags.Var(cliflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.") flags.Var(cliflag.NewMapStringBool(&TestContext.FeatureGates), "feature-gates", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
flags.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker/remote).") flags.StringVar(&TestContext.ContainerRuntime, "container-runtime", "remote", "The container runtime of cluster VM instances (remote).")
flags.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/dockershim.sock", "The container runtime endpoint of cluster VM instances.") flags.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "unix:///var/run/dockershim.sock", "The container runtime endpoint of cluster VM instances.")
flags.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.") flags.StringVar(&TestContext.ContainerRuntimeProcessName, "container-runtime-process-name", "dockerd", "The name of the container runtime process.")
flags.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.") flags.StringVar(&TestContext.ContainerRuntimePidFile, "container-runtime-pid-file", "/var/run/docker.pid", "The pid file of the container runtime.")
@ -486,6 +486,17 @@ func AfterReadingAllFlags(t *TestContextType) {
TestContext.Provider = "skeleton" TestContext.Provider = "skeleton"
} }
// TODO: Fix tests scripts that set CONTAINER_RUNTIME="containerd"
if TestContext.ContainerRuntime == "containerd" {
klog.Warningf("The --container-runtime flag is set to 'containerd' instead of 'remote'.")
TestContext.ContainerRuntime = "remote"
}
// Make sure that container runtime is valid
if TestContext.ContainerRuntime != "remote" {
klog.Errorf("Unsupported CRI container runtime: %q", TestContext.ContainerRuntime)
os.Exit(1)
}
var err error var err error
TestContext.CloudConfig.Provider, err = SetupProviderConfig(TestContext.Provider) TestContext.CloudConfig.Provider, err = SetupProviderConfig(TestContext.Provider)
if err != nil { if err != nil {

View File

@ -48,7 +48,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
}, },
} }
runtimeClass := newRuntimeClass(f.Namespace.Name, "conflict-runtimeclass", framework.TestContext.ContainerRuntime) runtimeClass := newRuntimeClass(f.Namespace.Name, "conflict-runtimeclass")
runtimeClass.Scheduling = scheduling runtimeClass.Scheduling = scheduling
rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), runtimeClass, metav1.CreateOptions{}) rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), runtimeClass, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create RuntimeClass resource") framework.ExpectNoError(err, "failed to create RuntimeClass resource")
@ -102,7 +102,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, taint) defer e2enode.RemoveTaintOffNode(f.ClientSet, nodeName, taint)
ginkgo.By("Trying to create runtimeclass and pod") ginkgo.By("Trying to create runtimeclass and pod")
runtimeClass := newRuntimeClass(f.Namespace.Name, "non-conflict-runtimeclass", framework.TestContext.ContainerRuntime) runtimeClass := newRuntimeClass(f.Namespace.Name, "non-conflict-runtimeclass")
runtimeClass.Scheduling = scheduling runtimeClass.Scheduling = scheduling
rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), runtimeClass, metav1.CreateOptions{}) rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), runtimeClass, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create RuntimeClass resource") framework.ExpectNoError(err, "failed to create RuntimeClass resource")
@ -148,7 +148,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
} }
ginkgo.By("Trying to create runtimeclass and pod") ginkgo.By("Trying to create runtimeclass and pod")
runtimeClass := newRuntimeClass(f.Namespace.Name, "non-conflict-runtimeclass", framework.TestContext.ContainerRuntime) runtimeClass := newRuntimeClass(f.Namespace.Name, "non-conflict-runtimeclass")
runtimeClass.Scheduling = scheduling runtimeClass.Scheduling = scheduling
rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), runtimeClass, metav1.CreateOptions{}) rc, err := f.ClientSet.NodeV1().RuntimeClasses().Create(context.TODO(), runtimeClass, metav1.CreateOptions{})
framework.ExpectNoError(err, "failed to create RuntimeClass resource") framework.ExpectNoError(err, "failed to create RuntimeClass resource")
@ -170,7 +170,7 @@ var _ = SIGDescribe("RuntimeClass", func() {
}) })
// newRuntimeClass returns a test runtime class. // newRuntimeClass returns a test runtime class.
func newRuntimeClass(namespace, name, handler string) *nodev1.RuntimeClass { func newRuntimeClass(namespace, name string) *nodev1.RuntimeClass {
uniqueName := fmt.Sprintf("%s-%s", namespace, name) uniqueName := fmt.Sprintf("%s-%s", namespace, name)
return runtimeclasstest.NewRuntimeClass(uniqueName, e2enode.PreconfiguredRuntimeClassHandler(handler)) return runtimeclasstest.NewRuntimeClass(uniqueName, e2enode.PreconfiguredRuntimeClassHandler)
} }

View File

@ -231,7 +231,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.ExpectNoError(err, "unable to apply fake resource to %v", testNodeName) framework.ExpectNoError(err, "unable to apply fake resource to %v", testNodeName)
// Register a runtimeClass with overhead set as 25% of the available beard-seconds // Register a runtimeClass with overhead set as 25% of the available beard-seconds
handler = e2enode.PreconfiguredRuntimeClassHandler(framework.TestContext.ContainerRuntime) handler = e2enode.PreconfiguredRuntimeClassHandler
rc := &nodev1.RuntimeClass{ rc := &nodev1.RuntimeClass{
ObjectMeta: metav1.ObjectMeta{Name: handler}, ObjectMeta: metav1.ObjectMeta{Name: handler},
@ -263,7 +263,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
} }
// remove RuntimeClass // remove RuntimeClass
cs.NodeV1beta1().RuntimeClasses().Delete(context.TODO(), e2enode.PreconfiguredRuntimeClassHandler(framework.TestContext.ContainerRuntime), metav1.DeleteOptions{}) cs.NodeV1beta1().RuntimeClasses().Delete(context.TODO(), e2enode.PreconfiguredRuntimeClassHandler, metav1.DeleteOptions{})
}) })
ginkgo.It("verify pod overhead is accounted for", func() { ginkgo.It("verify pod overhead is accounted for", func() {

View File

@ -54,10 +54,8 @@ var _ = utils.SIGDescribe("Subpath", func() {
Release: v1.12 Release: v1.12
Testname: SubPath: Reading content from a secret volume. Testname: SubPath: Reading content from a secret volume.
Description: Containers in a pod can read content from a secret mounted volume which was configured with a subpath. Description: Containers in a pod can read content from a secret mounted volume which was configured with a subpath.
This test is marked [Excluded:WindowsDocker] since Docker does not support creating individual file mounts for containers on Windows.
*/ */
framework.ConformanceIt("should support subpaths with secret pod [Excluded:WindowsDocker]", func() { framework.ConformanceIt("should support subpaths with secret pod", func() {
// TODO(claudiub): Remove [Excluded:WindowsDocker] tag if Containerd becomes the default container runtime on Windows.
pod := testsuites.SubpathTestPod(f, "secret-key", "secret", &v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: "my-secret"}}, privilegedSecurityContext) pod := testsuites.SubpathTestPod(f, "secret-key", "secret", &v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: "my-secret"}}, privilegedSecurityContext)
testsuites.TestBasicSubpath(f, "secret-value", pod) testsuites.TestBasicSubpath(f, "secret-value", pod)
}) })
@ -66,10 +64,8 @@ var _ = utils.SIGDescribe("Subpath", func() {
Release: v1.12 Release: v1.12
Testname: SubPath: Reading content from a configmap volume. Testname: SubPath: Reading content from a configmap volume.
Description: Containers in a pod can read content from a configmap mounted volume which was configured with a subpath. Description: Containers in a pod can read content from a configmap mounted volume which was configured with a subpath.
This test is marked [Excluded:WindowsDocker] since Docker does not support creating individual file mounts for containers on Windows.
*/ */
framework.ConformanceIt("should support subpaths with configmap pod [Excluded:WindowsDocker]", func() { framework.ConformanceIt("should support subpaths with configmap pod", func() {
// TODO(claudiub): Remove [Excluded:WindowsDocker] tag if Containerd becomes the only container runtime on Windows.
pod := testsuites.SubpathTestPod(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext) pod := testsuites.SubpathTestPod(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext)
testsuites.TestBasicSubpath(f, "configmap-value", pod) testsuites.TestBasicSubpath(f, "configmap-value", pod)
}) })
@ -78,10 +74,8 @@ var _ = utils.SIGDescribe("Subpath", func() {
Release: v1.12 Release: v1.12
Testname: SubPath: Reading content from a configmap volume. Testname: SubPath: Reading content from a configmap volume.
Description: Containers in a pod can read content from a configmap mounted volume which was configured with a subpath and also using a mountpath that is a specific file. Description: Containers in a pod can read content from a configmap mounted volume which was configured with a subpath and also using a mountpath that is a specific file.
This test is marked [Excluded:WindowsDocker] since Docker does not support creating individual file mounts for containers on Windows.
*/ */
framework.ConformanceIt("should support subpaths with configmap pod with mountPath of existing file [Excluded:WindowsDocker]", func() { framework.ConformanceIt("should support subpaths with configmap pod with mountPath of existing file", func() {
// TODO(claudiub): Remove [Excluded:WindowsDocker] tag if Containerd becomes the default container runtime on Windows.
pod := testsuites.SubpathTestPod(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext) pod := testsuites.SubpathTestPod(f, "configmap-key", "configmap", &v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}, privilegedSecurityContext)
file := "/etc/resolv.conf" file := "/etc/resolv.conf"
pod.Spec.Containers[0].VolumeMounts[0].MountPath = file pod.Spec.Containers[0].VolumeMounts[0].MountPath = file
@ -92,10 +86,8 @@ var _ = utils.SIGDescribe("Subpath", func() {
Release: v1.12 Release: v1.12
Testname: SubPath: Reading content from a downwardAPI volume. Testname: SubPath: Reading content from a downwardAPI volume.
Description: Containers in a pod can read content from a downwardAPI mounted volume which was configured with a subpath. Description: Containers in a pod can read content from a downwardAPI mounted volume which was configured with a subpath.
This test is marked [Excluded:WindowsDocker] since Docker does not support creating individual file mounts for containers on Windows.
*/ */
framework.ConformanceIt("should support subpaths with downward pod [Excluded:WindowsDocker]", func() { framework.ConformanceIt("should support subpaths with downward pod", func() {
// TODO(claudiub): Remove [Excluded:WindowsDocker] tag if Containerd becomes the default container runtime on Windows.
pod := testsuites.SubpathTestPod(f, "downward/podname", "downwardAPI", &v1.VolumeSource{ pod := testsuites.SubpathTestPod(f, "downward/podname", "downwardAPI", &v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{ DownwardAPI: &v1.DownwardAPIVolumeSource{
Items: []v1.DownwardAPIVolumeFile{{Path: "downward/podname", FieldRef: &v1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.name"}}}, Items: []v1.DownwardAPIVolumeFile{{Path: "downward/podname", FieldRef: &v1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.name"}}},
@ -108,10 +100,8 @@ var _ = utils.SIGDescribe("Subpath", func() {
Release: v1.12 Release: v1.12
Testname: SubPath: Reading content from a projected volume. Testname: SubPath: Reading content from a projected volume.
Description: Containers in a pod can read content from a projected mounted volume which was configured with a subpath. Description: Containers in a pod can read content from a projected mounted volume which was configured with a subpath.
This test is marked [Excluded:WindowsDocker] since Docker does not support creating individual file mounts for containers on Windows.
*/ */
framework.ConformanceIt("should support subpaths with projected pod [Excluded:WindowsDocker]", func() { framework.ConformanceIt("should support subpaths with projected pod", func() {
// TODO(claudiub): Remove [Excluded:WindowsDocker] tag once Containerd becomes the default container runtime on Windows.
pod := testsuites.SubpathTestPod(f, "projected/configmap-key", "projected", &v1.VolumeSource{ pod := testsuites.SubpathTestPod(f, "projected/configmap-key", "projected", &v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{ Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{ Sources: []v1.VolumeProjection{

View File

@ -76,7 +76,7 @@ var (
User_NTAuthoritySystem = "NT AUTHORITY\\SYSTEM" User_NTAuthoritySystem = "NT AUTHORITY\\SYSTEM"
) )
var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [Excluded:WindowsDocker] [MinimumKubeletVersion:1.22] HostProcess containers", func() { var _ = SIGDescribe("[Feature:WindowsHostProcessContainers] [MinimumKubeletVersion:1.22] HostProcess containers", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
e2eskipper.SkipUnlessNodeOSDistroIs("windows") e2eskipper.SkipUnlessNodeOSDistroIs("windows")
}) })

View File

@ -24,9 +24,7 @@ import (
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubelogs "k8s.io/kubernetes/pkg/kubelet/logs" kubelogs "k8s.io/kubernetes/pkg/kubelet/logs"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -43,12 +41,6 @@ const (
var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() { var _ = SIGDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive]", func() {
f := framework.NewDefaultFramework("container-log-rotation-test") f := framework.NewDefaultFramework("container-log-rotation-test")
ginkgo.Context("when a container generates a lot of log", func() { ginkgo.Context("when a container generates a lot of log", func() {
ginkgo.BeforeEach(func() {
if framework.TestContext.ContainerRuntime != kubetypes.RemoteContainerRuntime {
e2eskipper.Skipf("Skipping ContainerLogRotation test since the container runtime is not remote")
}
})
tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) { tempSetCurrentKubeletConfig(f, func(initialConfig *kubeletconfig.KubeletConfiguration) {
initialConfig.ContainerLogMaxFiles = testContainerLogMaxFiles initialConfig.ContainerLogMaxFiles = testContainerLogMaxFiles
initialConfig.ContainerLogMaxSize = testContainerLogMaxSize initialConfig.ContainerLogMaxSize = testContainerLogMaxSize

View File

@ -1,140 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2enode
import (
"context"
"fmt"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
var _ = SIGDescribe("Docker features [Feature:Docker][Legacy:Docker]", func() {
f := framework.NewDefaultFramework("docker-feature-test")
ginkgo.BeforeEach(func() {
e2eskipper.RunIfContainerRuntimeIs("docker")
})
ginkgo.Context("when live-restore is enabled [Serial] [Slow] [Disruptive]", func() {
ginkgo.It("containers should not be disrupted when the daemon shuts down and restarts", func() {
const (
podName = "live-restore-test-pod"
containerName = "live-restore-test-container"
)
isSupported, err := isDockerLiveRestoreSupported()
framework.ExpectNoError(err)
if !isSupported {
e2eskipper.Skipf("Docker live-restore is not supported.")
}
isEnabled, err := isDockerLiveRestoreEnabled()
framework.ExpectNoError(err)
if !isEnabled {
e2eskipper.Skipf("Docker live-restore is not enabled.")
}
ginkgo.By("Create the test pod.")
pod := f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: podName},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.Nginx),
}},
},
})
ginkgo.By("Ensure that the container is running before Docker is down.")
gomega.Eventually(func() bool {
return isContainerRunning(pod.Status.PodIP)
}).Should(gomega.BeTrue())
startTime1, err := getContainerStartTime(f, podName, containerName)
framework.ExpectNoError(err)
ginkgo.By("Stop Docker daemon.")
framework.ExpectNoError(stopDockerDaemon())
isDockerDown := true
defer func() {
if isDockerDown {
ginkgo.By("Start Docker daemon.")
framework.ExpectNoError(startDockerDaemon())
}
}()
ginkgo.By("Ensure that the container is running after Docker is down.")
gomega.Consistently(func() bool {
return isContainerRunning(pod.Status.PodIP)
}).Should(gomega.BeTrue())
ginkgo.By("Start Docker daemon.")
framework.ExpectNoError(startDockerDaemon())
isDockerDown = false
ginkgo.By("Ensure that the container is running after Docker has restarted.")
gomega.Consistently(func() bool {
return isContainerRunning(pod.Status.PodIP)
}).Should(gomega.BeTrue())
ginkgo.By("Ensure that the container has not been restarted after Docker is restarted.")
gomega.Consistently(func() bool {
startTime2, err := getContainerStartTime(f, podName, containerName)
framework.ExpectNoError(err)
return startTime1 == startTime2
}, 3*time.Second, time.Second).Should(gomega.BeTrue())
})
})
})
// isContainerRunning returns true if the container is running by checking
// whether the server is responding, and false otherwise.
func isContainerRunning(podIP string) bool {
output, err := runCommand("curl", podIP)
if err != nil {
return false
}
return strings.Contains(output, "Welcome to nginx!")
}
// getContainerStartTime returns the start time of the container with the
// containerName of the pod having the podName.
func getContainerStartTime(f *framework.Framework, podName, containerName string) (time.Time, error) {
pod, err := f.PodClient().Get(context.TODO(), podName, metav1.GetOptions{})
if err != nil {
return time.Time{}, fmt.Errorf("failed to get pod %q: %v", podName, err)
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name != containerName {
continue
}
if status.State.Running == nil {
return time.Time{}, fmt.Errorf("%v/%v is not running", podName, containerName)
}
return status.State.Running.StartedAt.Time, nil
}
return time.Time{}, fmt.Errorf("failed to find %v/%v", podName, containerName)
}

View File

@ -1,116 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2enode
import (
"fmt"
"strings"
"github.com/blang/semver"
systemdutil "github.com/coreos/go-systemd/v22/util"
)
// getDockerAPIVersion returns the Docker's API version.
func getDockerAPIVersion() (semver.Version, error) {
output, err := runCommand("docker", "version", "-f", "{{.Server.APIVersion}}")
if err != nil {
return semver.Version{}, fmt.Errorf("failed to get docker server version: %v", err)
}
return semver.MustParse(strings.TrimSpace(output) + ".0"), nil
}
// isSharedPIDNamespaceSupported returns true if the Docker version is 1.13.1+
// (API version 1.26+), and false otherwise.
func isSharedPIDNamespaceSupported() (bool, error) {
version, err := getDockerAPIVersion()
if err != nil {
return false, err
}
return version.GTE(semver.MustParse("1.26.0")), nil
}
// isDockerLiveRestoreSupported returns true if live-restore is supported in
// the current Docker version.
func isDockerLiveRestoreSupported() (bool, error) {
version, err := getDockerAPIVersion()
if err != nil {
return false, err
}
return version.GTE(semver.MustParse("1.26.0")), nil
}
// getDockerInfo returns the Info struct for the running Docker daemon.
func getDockerInfo(key string) (string, error) {
output, err := runCommand("docker", "info", "-f", "{{."+key+"}}")
if err != nil {
return "", fmt.Errorf("failed to get docker info: %v", err)
}
return strings.TrimSpace(output), nil
}
// isDockerLiveRestoreEnabled returns true if live-restore is enabled in the
// Docker.
func isDockerLiveRestoreEnabled() (bool, error) {
info, err := getDockerInfo("LiveRestoreEnabled")
if err != nil {
return false, err
}
return info == "true", nil
}
// getDockerLoggingDriver returns the name of the logging driver.
func getDockerLoggingDriver() (string, error) {
info, err := getDockerInfo("LoggingDriver")
if err != nil {
return "", err
}
return info, nil
}
// isDockerSELinuxSupportEnabled checks whether the Docker daemon was started
// with SELinux support enabled.
func isDockerSELinuxSupportEnabled() (bool, error) {
info, err := getDockerInfo("SecurityOptions")
if err != nil {
return false, err
}
return strings.Contains(info, "name=selinux"), nil
}
// startDockerDaemon starts the Docker daemon.
func startDockerDaemon() error {
switch {
case systemdutil.IsRunningSystemd():
_, err := runCommand("systemctl", "start", "docker")
return err
default:
_, err := runCommand("service", "docker", "start")
return err
}
}
// stopDockerDaemon stops the Docker daemon.
func stopDockerDaemon() error {
switch {
case systemdutil.IsRunningSystemd():
_, err := runCommand("systemctl", "stop", "docker")
return err
default:
_, err := runCommand("service", "docker", "stop")
return err
}
}

View File

@ -1,231 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2enode
import (
"crypto/md5"
"fmt"
"os"
"os/exec"
"path"
"regexp"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
const (
testCheckpoint = "checkpoint-test"
// Container GC Period is 1 minute
gcTimeout = 3 * time.Minute
testCheckpointContent = `{"version":"v1","name":"fluentd-gcp-v2.0-vmnqx","namespace":"kube-system","data":{},"checksum":1799154314}`
)
var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Docker]", func() {
f := framework.NewDefaultFramework("dockerhism-checkpoint-test")
ginkgo.BeforeEach(func() {
e2eskipper.RunIfContainerRuntimeIs("docker")
})
ginkgo.It("should clean up pod sandbox checkpoint after pod deletion", func() {
podName := "pod-checkpoint-no-disrupt"
runPodCheckpointTest(f, podName, func() {
checkpoints := findCheckpoints(podName)
if len(checkpoints) == 0 {
framework.Failf("No checkpoint for the pod was found")
}
})
})
ginkgo.It("should remove dangling checkpoint file", func() {
filename := fmt.Sprintf("%x", md5.Sum([]byte(fmt.Sprintf("%s/%s", testCheckpoint, f.Namespace.Name))))
fullpath := path.Join(framework.TestContext.DockershimCheckpointDir, filename)
ginkgo.By(fmt.Sprintf("Write a file at %q", fullpath))
err := writeFileAndSync(fullpath, []byte(testCheckpointContent))
framework.ExpectNoError(err, "Failed to create file %q", fullpath)
ginkgo.By("Check if file is removed")
gomega.Eventually(func() bool {
if _, err := os.Stat(fullpath); os.IsNotExist(err) {
return true
}
return false
}, gcTimeout, 10*time.Second).Should(gomega.BeTrue())
})
ginkgo.Context("When pod sandbox checkpoint is missing", func() {
ginkgo.It("should complete pod sandbox clean up", func() {
podName := "pod-checkpoint-missing"
runPodCheckpointTest(f, podName, func() {
checkpoints := findCheckpoints(podName)
if len(checkpoints) == 0 {
framework.Failf("No checkpoint for the pod was found")
}
ginkgo.By("Removing checkpoint of test pod")
for _, filename := range checkpoints {
if len(filename) == 0 {
continue
}
framework.Logf("Removing checkpoint %q", filename)
_, err := exec.Command("sudo", "rm", filename).CombinedOutput()
framework.ExpectNoError(err, "Failed to remove checkpoint file %q: %v", string(filename), err)
}
})
})
})
ginkgo.Context("When all containers in pod are missing", func() {
ginkgo.It("should complete pod sandbox clean up based on the information in sandbox checkpoint", func() {
runPodCheckpointTest(f, "pod-containers-missing", func() {
ginkgo.By("Gathering pod container ids")
stdout, err := exec.Command("sudo", "docker", "ps", "-q", "-f",
fmt.Sprintf("name=%s", f.Namespace.Name)).CombinedOutput()
framework.ExpectNoError(err, "Failed to run docker ps: %v", err)
lines := strings.Split(string(stdout), "\n")
ids := []string{}
for _, id := range lines {
id = cleanString(id)
if len(id) > 0 {
ids = append(ids, id)
}
}
ginkgo.By("Stop and remove pod containers")
dockerStopCmd := append([]string{"docker", "stop"}, ids...)
_, err = exec.Command("sudo", dockerStopCmd...).CombinedOutput()
framework.ExpectNoError(err, "Failed to run command %v: %v", dockerStopCmd, err)
dockerRmCmd := append([]string{"docker", "rm"}, ids...)
_, err = exec.Command("sudo", dockerRmCmd...).CombinedOutput()
framework.ExpectNoError(err, "Failed to run command %v: %v", dockerRmCmd, err)
})
})
})
ginkgo.Context("When checkpoint file is corrupted", func() {
ginkgo.It("should complete pod sandbox clean up", func() {
podName := "pod-checkpoint-corrupted"
runPodCheckpointTest(f, podName, func() {
ginkgo.By("Corrupt checkpoint file")
checkpoints := findCheckpoints(podName)
if len(checkpoints) == 0 {
framework.Failf("No checkpoint for the pod was found")
}
for _, file := range checkpoints {
f, err := os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0644)
framework.ExpectNoError(err, "Failed to open file %q", file)
_, err = f.WriteString("blabblab")
framework.ExpectNoError(err, "Failed to write to file %q", file)
f.Sync()
f.Close()
}
})
})
})
})
func runPodCheckpointTest(f *framework.Framework, podName string, twist func()) {
podName = podName + string(uuid.NewUUID())
ginkgo.By(fmt.Sprintf("Creating test pod: %s", podName))
f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: podName},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Image: imageutils.GetPauseImageName(),
Name: "pause-container",
},
},
},
})
ginkgo.By("Performing disruptive operations")
twist()
ginkgo.By("Remove test pod")
f.PodClient().DeleteSync(podName, metav1.DeleteOptions{}, framework.DefaultPodDeletionTimeout)
ginkgo.By("Waiting for checkpoint to be removed")
if err := wait.PollImmediate(10*time.Second, gcTimeout, func() (bool, error) {
checkpoints := findCheckpoints(podName)
if len(checkpoints) == 0 {
return true, nil
}
framework.Logf("Checkpoint of %q still exists: %v", podName, checkpoints)
return false, nil
}); err != nil {
framework.Failf("Failed to observe checkpoint being removed within timeout: %v", err)
}
}
// cleanString cleans up any trailing spaces and new line character for the input string
func cleanString(output string) string {
processed := strings.TrimSpace(string(output))
regex := regexp.MustCompile(`\r?\n`)
processed = regex.ReplaceAllString(processed, "")
return processed
}
func writeFileAndSync(path string, data []byte) error {
f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
if err != nil {
return err
}
_, err = f.Write(data)
if err != nil {
return err
}
f.Sync()
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
// findCheckpoints returns all checkpoint files containing input string
func findCheckpoints(match string) []string {
ginkgo.By(fmt.Sprintf("Search checkpoints containing %q", match))
checkpoints := []string{}
stdout, err := exec.Command("sudo", "grep", "-rl", match, framework.TestContext.DockershimCheckpointDir).CombinedOutput()
if err != nil {
framework.Logf("grep from dockershim checkpoint directory returns error: %v", err)
}
if stdout == nil {
return checkpoints
}
files := strings.Split(string(stdout), "\n")
for _, file := range files {
cleaned := cleanString(file)
if len(cleaned) == 0 {
continue
}
checkpoints = append(checkpoints, cleaned)
}
return checkpoints
}

View File

@ -29,8 +29,6 @@ import (
"errors" "errors"
"os" "os"
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
) )
const success = "\033[0;32mSUCESS\033[0m" const success = "\033[0;32mSUCESS\033[0m"
@ -66,12 +64,9 @@ func check(options ...string) []error {
switch c { switch c {
case "all": case "all":
errs = appendNotNil(errs, kernel()) errs = appendNotNil(errs, kernel())
errs = appendNotNil(errs, containerRuntime())
errs = appendNotNil(errs, daemons()) errs = appendNotNil(errs, daemons())
errs = appendNotNil(errs, firewall()) errs = appendNotNil(errs, firewall())
errs = appendNotNil(errs, dns()) errs = appendNotNil(errs, dns())
case "containerruntime":
errs = appendNotNil(errs, containerRuntime())
case "daemons": case "daemons":
errs = appendNotNil(errs, daemons()) errs = appendNotNil(errs, daemons())
case "dns": case "dns":
@ -88,37 +83,6 @@ func check(options ...string) []error {
return errs return errs
} }
const dockerVersionRegex = `1\.[7-9]\.[0-9]+`
// containerRuntime checks that a suitable container runtime is installed and recognized by cadvisor: docker 1.7-1.9
func containerRuntime() error {
dockerRegex, err := regexp.Compile(dockerVersionRegex)
if err != nil {
// This should never happen and can only be fixed by changing the code
panic(err)
}
// Setup cadvisor to check the container environment
c, err := cadvisor.New(cadvisor.NewImageFsInfoProvider("docker", ""), "/var/lib/kubelet", []string{"/"}, false)
if err != nil {
return printError("Container Runtime Check: %s Could not start cadvisor %v", failed, err)
}
vi, err := c.VersionInfo()
if err != nil {
return printError("Container Runtime Check: %s Could not get VersionInfo %v", failed, err)
}
d := vi.DockerVersion
if !dockerRegex.Match([]byte(d)) {
return printError(
"Container Runtime Check: %s Docker version %s does not matching %s. You may need to run as root or the "+
"user the kubelet will run under.", failed, d, dockerVersionRegex)
}
return printSuccess("Container Runtime Check: %s", success)
}
const kubeletClusterDNSRegexStr = `\/kubelet.*--cluster-dns=(\S+) ` const kubeletClusterDNSRegexStr = `\/kubelet.*--cluster-dns=(\S+) `
const kubeletClusterDomainRegexStr = `\/kubelet.*--cluster-domain=(\S+)` const kubeletClusterDomainRegexStr = `\/kubelet.*--cluster-domain=(\S+)`

View File

@ -22,16 +22,12 @@ import (
"io/ioutil" "io/ioutil"
"os" "os"
"os/exec" "os/exec"
"regexp"
"strconv" "strconv"
"strings" "strings"
"github.com/onsi/ginkgo"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/blang/semver"
"github.com/onsi/ginkgo"
) )
// checkProcess checks whether there's a process whose command line contains // checkProcess checks whether there's a process whose command line contains
@ -111,215 +107,6 @@ func checkPublicGCR() error {
return nil return nil
} }
// checkDockerConfig runs docker's check-config.sh script and ensures that all
// expected kernel configs are enabled.
func checkDockerConfig() error {
var (
re = regexp.MustCompile("\x1B\\[([0-9]{1,2}(;[0-9]{1,2})?)?[mGK]")
bins = []string{
"/usr/share/docker.io/contrib/check-config.sh",
"/usr/share/docker/contrib/check-config.sh",
}
allowlist = map[string]bool{
"CONFIG_MEMCG_SWAP_ENABLED": true,
"CONFIG_RT_GROUP_SCHED": true,
"CONFIG_EXT3_FS": true,
"CONFIG_EXT3_FS_XATTR": true,
"CONFIG_EXT3_FS_POSIX_ACL": true,
"CONFIG_EXT3_FS_SECURITY": true,
"/dev/zfs": true,
"zfs command": true,
"zpool command": true,
}
missing = map[string]bool{}
)
// Allowlists CONFIG_DEVPTS_MULTIPLE_INSTANCES (meaning allowing it to be
// absent) if the kernel version is >= 4.8, because this option has been
// removed from the 4.8 kernel.
kernelVersion, err := getKernelVersion()
if err != nil {
return err
}
if kernelVersion.GTE(semver.MustParse("4.8.0")) {
allowlist["CONFIG_DEVPTS_MULTIPLE_INSTANCES"] = true
}
for _, bin := range bins {
if _, err := os.Stat(bin); os.IsNotExist(err) {
continue
}
// We don't check the return code because it's OK if the script returns
// a non-zero exit code just because the configs in the allowlist are
// missing.
output, _ := runCommand(bin)
for _, line := range strings.Split(output, "\n") {
if !strings.Contains(line, "missing") {
continue
}
line = re.ReplaceAllString(line, "")
fields := strings.Split(line, ":")
if len(fields) != 2 {
continue
}
key := strings.TrimFunc(fields[0], func(c rune) bool {
return c == ' ' || c == '-'
})
if _, found := allowlist[key]; !found {
missing[key] = true
}
}
if len(missing) != 0 {
return fmt.Errorf("missing docker config: %v", missing)
}
break
}
return nil
}
// checkDockerNetworkClient checks client networking by pinging an external IP
// address from a container.
func checkDockerNetworkClient() error {
imageName := imageutils.GetE2EImage(imageutils.BusyBox)
output, err := runCommand("docker", "run", "--rm", imageName, "sh", "-c", "ping -w 5 -q google.com")
if err != nil {
return err
}
if !strings.Contains(output, `0% packet loss`) {
return fmt.Errorf("failed to ping from container: %s", output)
}
return nil
}
// checkDockerNetworkServer checks server networking by running an echo server
// within a container and accessing it from outside.
func checkDockerNetworkServer() error {
const (
imageName = "k8s.gcr.io/nginx:1.7.9"
hostAddr = "127.0.0.1"
hostPort = "8088"
containerPort = "80"
containerID = "nginx"
message = "Welcome to nginx!"
)
var (
portMapping = fmt.Sprintf("%s:%s", hostPort, containerPort)
host = fmt.Sprintf("http://%s:%s", hostAddr, hostPort)
)
runCommand("docker", "rm", "-f", containerID)
if _, err := runCommand("docker", "run", "-d", "--name", containerID, "-p", portMapping, imageName); err != nil {
return err
}
output, err := runCommand("curl", host)
if err != nil {
return err
}
if !strings.Contains(output, message) {
return fmt.Errorf("failed to connect to container")
}
// Clean up
if _, err = runCommand("docker", "rm", "-f", containerID); err != nil {
return err
}
if _, err = runCommand("docker", "rmi", imageName); err != nil {
return err
}
return nil
}
// checkDockerAppArmor checks whether AppArmor is enabled and has the
// "docker-default" profile.
func checkDockerAppArmor() error {
buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled")
if err != nil {
return err
}
if string(buf) != "Y\n" {
return fmt.Errorf("apparmor module is not loaded")
}
// Checks that the "docker-default" profile is loaded and enforced.
buf, err = ioutil.ReadFile("/sys/kernel/security/apparmor/profiles")
if err != nil {
return err
}
if !strings.Contains(string(buf), "docker-default (enforce)") {
return fmt.Errorf("'docker-default' profile is not loaded and enforced")
}
// Checks that the `apparmor_parser` binary is present.
_, err = exec.LookPath("apparmor_parser")
if err != nil {
return fmt.Errorf("'apparmor_parser' is not in directories named by the PATH env")
}
return nil
}
// checkDockerSeccomp checks whether the Docker supports seccomp.
func checkDockerSeccomp() error {
const (
seccompProfileFileName = "/tmp/no_mkdir.json"
seccompProfile = `{
"defaultAction": "SCMP_ACT_ALLOW",
"syscalls": [
{
"name": "mkdir",
"action": "SCMP_ACT_ERRNO"
}
]}`
image = "gcr.io/google-appengine/debian8:2017-06-07-171918"
)
if err := ioutil.WriteFile(seccompProfileFileName, []byte(seccompProfile), 0644); err != nil {
return err
}
// Starts a container with no seccomp profile and ensures that unshare
// succeeds.
_, err := runCommand("docker", "run", "--rm", "-i", "--security-opt", "seccomp=unconfined", image, "unshare", "-r", "whoami")
if err != nil {
return err
}
// Starts a container with the default seccomp profile and ensures that
// unshare (a denylisted system call in the default profile) fails.
cmd := []string{"docker", "run", "--rm", "-i", image, "unshare", "-r", "whoami"}
_, err = runCommand(cmd...)
if err == nil {
return fmt.Errorf("%q did not fail as expected", strings.Join(cmd, " "))
}
// Starts a container with a custom seccomp profile that denylists mkdir
// and ensures that unshare succeeds.
_, err = runCommand("docker", "run", "--rm", "-i", "--security-opt", fmt.Sprintf("seccomp=%s", seccompProfileFileName), image, "unshare", "-r", "whoami")
if err != nil {
return err
}
// Starts a container with a custom seccomp profile that denylists mkdir
// and ensures that mkdir fails.
cmd = []string{"docker", "run", "--rm", "-i", "--security-opt", fmt.Sprintf("seccomp=%s", seccompProfileFileName), image, "mkdir", "-p", "/tmp/foo"}
_, err = runCommand(cmd...)
if err == nil {
return fmt.Errorf("%q did not fail as expected", strings.Join(cmd, " "))
}
return nil
}
// checkDockerStorageDriver checks whether the current storage driver used by
// Docker is overlay.
func checkDockerStorageDriver() error {
output, err := runCommand("docker", "info")
if err != nil {
return err
}
for _, line := range strings.Split(string(output), "\n") {
if !strings.Contains(line, "Storage Driver:") {
continue
}
if !strings.Contains(line, "overlay") {
return fmt.Errorf("storage driver is not 'overlay': %s", line)
}
return nil
}
return fmt.Errorf("failed to find storage driver")
}
var _ = SIGDescribe("GKE system requirements [NodeConformance][Feature:GKEEnv][NodeFeature:GKEEnv]", func() { var _ = SIGDescribe("GKE system requirements [NodeConformance][Feature:GKEEnv][NodeFeature:GKEEnv]", func() {
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
e2eskipper.RunIfSystemSpecNameIs("gke") e2eskipper.RunIfSystemSpecNameIs("gke")
@ -345,23 +132,6 @@ var _ = SIGDescribe("GKE system requirements [NodeConformance][Feature:GKEEnv][N
ginkgo.It("The GCR is accessible", func() { ginkgo.It("The GCR is accessible", func() {
framework.ExpectNoError(checkPublicGCR()) framework.ExpectNoError(checkPublicGCR())
}) })
ginkgo.It("The docker configuration validation should pass", func() {
e2eskipper.RunIfContainerRuntimeIs("docker")
framework.ExpectNoError(checkDockerConfig())
})
ginkgo.It("The docker container network should work", func() {
e2eskipper.RunIfContainerRuntimeIs("docker")
framework.ExpectNoError(checkDockerNetworkServer())
framework.ExpectNoError(checkDockerNetworkClient())
})
ginkgo.It("The docker daemon should support AppArmor and seccomp", func() {
e2eskipper.RunIfContainerRuntimeIs("docker")
framework.ExpectNoError(checkDockerAppArmor())
framework.ExpectNoError(checkDockerSeccomp())
})
ginkgo.It("The docker storage driver should work", func() {
framework.ExpectNoError(checkDockerStorageDriver())
})
}) })
// getPPID returns the PPID for the pid. // getPPID returns the PPID for the pid.
@ -423,21 +193,6 @@ func getCmdToProcessMap() (map[string][]process, error) {
return result, nil return result, nil
} }
// getKernelVersion returns the kernel version in the semantic version format.
func getKernelVersion() (*semver.Version, error) {
output, err := runCommand("uname", "-r")
if err != nil {
return nil, err
}
// An example 'output' could be "4.13.0-1001-gke".
v := strings.TrimSpace(strings.Split(output, "-")[0])
kernelVersion, err := semver.Make(v)
if err != nil {
return nil, fmt.Errorf("failed to convert %q to semantic version: %s", v, err)
}
return &kernelVersion, nil
}
// runCommand runs the cmd and returns the combined stdout and stderr, or an // runCommand runs the cmd and returns the combined stdout and stderr, or an
// error if the command failed. // error if the command failed.
func runCommand(cmd ...string) (string, error) { func runCommand(cmd ...string) (string, error) {

View File

@ -20,7 +20,6 @@ import (
"context" "context"
"fmt" "fmt"
"os" "os"
"os/exec"
"os/user" "os/user"
"sync" "sync"
"time" "time"
@ -117,21 +116,6 @@ type puller interface {
Name() string Name() string
} }
type dockerPuller struct {
}
func (dp *dockerPuller) Name() string {
return "docker"
}
func (dp *dockerPuller) Pull(image string) ([]byte, error) {
// TODO(random-liu): Use docker client to get rid of docker binary dependency.
if exec.Command("docker", "inspect", "--type=image", image).Run() != nil {
return exec.Command("docker", "pull", image).CombinedOutput()
}
return nil, nil
}
type remotePuller struct { type remotePuller struct {
imageService internalapi.ImageManagerService imageService internalapi.ImageManagerService
} }
@ -150,20 +134,13 @@ func (rp *remotePuller) Pull(image string) ([]byte, error) {
} }
func getPuller() (puller, error) { func getPuller() (puller, error) {
runtime := framework.TestContext.ContainerRuntime _, is, err := getCRIClient()
switch runtime { if err != nil {
case "docker": return nil, err
return &dockerPuller{}, nil
case "remote":
_, is, err := getCRIClient()
if err != nil {
return nil, err
}
return &remotePuller{
imageService: is,
}, nil
} }
return nil, fmt.Errorf("can't prepull images, unknown container runtime %q", runtime) return &remotePuller{
imageService: is,
}, nil
} }
// PrePullAllImages pre-fetches all images tests depend on so that we don't fail in an actual test. // PrePullAllImages pre-fetches all images tests depend on so that we don't fail in an actual test.

View File

@ -26,7 +26,6 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
) )
@ -116,29 +115,6 @@ var _ = SIGDescribe("ContainerLogPath [NodeConformance]", func() {
var logPodName string var logPodName string
ginkgo.BeforeEach(func() { ginkgo.BeforeEach(func() {
if framework.TestContext.ContainerRuntime == "docker" {
// Container Log Path support requires JSON logging driver.
// It does not work when Docker daemon is logging to journald.
d, err := getDockerLoggingDriver()
framework.ExpectNoError(err)
if d != "json-file" {
e2eskipper.Skipf("Skipping because Docker daemon is using a logging driver other than \"json-file\": %s", d)
}
// Even if JSON logging is in use, this test fails if SELinux support
// is enabled, since the isolation provided by the SELinux policy
// prevents processes running inside Docker containers (under SELinux
// type svirt_lxc_net_t) from accessing the log files which are owned
// by Docker (and labeled with the container_var_lib_t type.)
//
// Therefore, let's also skip this test when running with SELinux
// support enabled.
e, err := isDockerSELinuxSupportEnabled()
framework.ExpectNoError(err)
if e {
e2eskipper.Skipf("Skipping because Docker daemon is running with SELinux support enabled")
}
}
podClient = f.PodClient() podClient = f.PodClient()
logPodName = "log-pod-" + string(uuid.NewUUID()) logPodName = "log-pod-" + string(uuid.NewUUID())
err := createAndWaitPod(makeLogPod(logPodName, logString)) err := createAndWaitPod(makeLogPod(logPodName, logString))

View File

@ -84,10 +84,6 @@ var _ = SIGDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
ginkgo.Context("Container Runtime", func() { ginkgo.Context("Container Runtime", func() {
ginkgo.Context("Network", func() { ginkgo.Context("Network", func() {
ginkgo.It("should recover from ip leak", func() { ginkgo.It("should recover from ip leak", func() {
if framework.TestContext.ContainerRuntime == "docker" {
ginkgo.Skip("Test fails with in-tree docker. Skipping test.")
}
pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test") pods := newTestPods(podCount, false, imageutils.GetPauseImageName(), "restart-container-runtime-test")
ginkgo.By(fmt.Sprintf("Trying to create %d pods on node", len(pods))) ginkgo.By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
createBatchPodWithRateControl(f, pods, podCreationInterval) createBatchPodWithRateControl(f, pods, podCreationInterval)

View File

@ -104,7 +104,7 @@ var _ = SIGDescribe("Kubelet PodOverhead handling [LinuxOnly]", func() {
handler string handler string
) )
ginkgo.By("Creating a RuntimeClass with Overhead definied", func() { ginkgo.By("Creating a RuntimeClass with Overhead definied", func() {
handler = e2enode.PreconfiguredRuntimeClassHandler(framework.TestContext.ContainerRuntime) handler = e2enode.PreconfiguredRuntimeClassHandler
rc := &nodev1.RuntimeClass{ rc := &nodev1.RuntimeClass{
ObjectMeta: metav1.ObjectMeta{Name: handler}, ObjectMeta: metav1.ObjectMeta{Name: handler},
Handler: handler, Handler: handler,

View File

@ -28,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -72,13 +71,6 @@ var _ = SIGDescribe("Security Context", func() {
}) })
ginkgo.It("processes in containers sharing a pod namespace should be able to see each other", func() { ginkgo.It("processes in containers sharing a pod namespace should be able to see each other", func() {
ginkgo.By("Check whether shared PID namespace is supported.")
isEnabled, err := isSharedPIDNamespaceSupported()
framework.ExpectNoError(err)
if !isEnabled {
e2eskipper.Skipf("Skipped because shared PID namespace is not supported by this docker version.")
}
ginkgo.By("Create a pod with shared PID namespace.") ginkgo.By("Create a pod with shared PID namespace.")
f.PodClient().CreateSync(&v1.Pod{ f.PodClient().CreateSync(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "shared-pid-ns-test-pod"}, ObjectMeta: metav1.ObjectMeta{Name: "shared-pid-ns-test-pod"},

View File

@ -19,7 +19,6 @@ package e2enode
import ( import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os/exec"
"strings" "strings"
"time" "time"
@ -136,29 +135,6 @@ var _ = SIGDescribe("Summary API [NodeConformance]", func() {
"MajorPageFaults": bounded(0, expectedMajorPageFaultsUpperBound), "MajorPageFaults": bounded(0, expectedMajorPageFaultsUpperBound),
}) })
runtimeContExpectations := sysContExpectations().(*gstruct.FieldsMatcher) runtimeContExpectations := sysContExpectations().(*gstruct.FieldsMatcher)
if systemdutil.IsRunningSystemd() && framework.TestContext.ContainerRuntime == "docker" {
// Some Linux distributions still ship a docker.service that is missing
// a `Delegate=yes` setting (or equivalent CPUAccounting= and MemoryAccounting=)
// that allows us to monitor the container runtime resource usage through
// the "cpu" and "memory" cgroups.
//
// Make an exception here for those distros, only for Docker, so that they
// can pass the full node e2e tests even in that case.
//
// For newer container runtimes (using CRI) and even distros that still
// ship Docker, we should encourage them to always set `Delegate=yes` in
// order to make monitoring of the runtime possible.
stdout, err := exec.Command("systemctl", "show", "-p", "Delegate", "docker.service").CombinedOutput()
if err == nil && strings.TrimSpace(string(stdout)) == "Delegate=no" {
// Only make these optional if we can successfully confirm that
// Delegate is set to "no" (in other words, unset.) If we fail
// to check that, default to requiring it, which might cause
// false positives, but that should be the safer approach.
ginkgo.By("Making runtime container expectations optional, since systemd was not configured to Delegate=yes the cgroups")
runtimeContExpectations.Fields["Memory"] = gomega.Or(gomega.BeNil(), runtimeContExpectations.Fields["Memory"])
runtimeContExpectations.Fields["CPU"] = gomega.Or(gomega.BeNil(), runtimeContExpectations.Fields["CPU"])
}
}
systemContainers := gstruct.Elements{ systemContainers := gstruct.Elements{
"kubelet": sysContExpectations(), "kubelet": sysContExpectations(),
"runtime": runtimeContExpectations, "runtime": runtimeContExpectations,