mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Remove unnecessary docker specific logic in node e2e test.
This commit is contained in:
parent
f64c508e2e
commit
e05a5b9f7a
@ -120,13 +120,14 @@ go_test(
|
|||||||
"//pkg/apis/core:go_default_library",
|
"//pkg/apis/core:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
"//pkg/features:go_default_library",
|
||||||
"//pkg/kubelet:go_default_library",
|
"//pkg/kubelet:go_default_library",
|
||||||
|
"//pkg/kubelet/apis/cri:go_default_library",
|
||||||
|
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||||
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
|
||||||
"//pkg/kubelet/cm:go_default_library",
|
"//pkg/kubelet/cm:go_default_library",
|
||||||
"//pkg/kubelet/cm/cpumanager:go_default_library",
|
"//pkg/kubelet/cm/cpumanager:go_default_library",
|
||||||
"//pkg/kubelet/cm/cpuset:go_default_library",
|
"//pkg/kubelet/cm/cpuset:go_default_library",
|
||||||
"//pkg/kubelet/container:go_default_library",
|
"//pkg/kubelet/container:go_default_library",
|
||||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
|
||||||
"//pkg/kubelet/images:go_default_library",
|
"//pkg/kubelet/images:go_default_library",
|
||||||
"//pkg/kubelet/kubeletconfig:go_default_library",
|
"//pkg/kubelet/kubeletconfig:go_default_library",
|
||||||
"//pkg/kubelet/kubeletconfig/status:go_default_library",
|
"//pkg/kubelet/kubeletconfig/status:go_default_library",
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
|
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -76,10 +77,10 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
|||||||
f := framework.NewDefaultFramework("kubelet-container-manager")
|
f := framework.NewDefaultFramework("kubelet-container-manager")
|
||||||
Describe("Validate OOM score adjustments", func() {
|
Describe("Validate OOM score adjustments", func() {
|
||||||
Context("once the node is setup", func() {
|
Context("once the node is setup", func() {
|
||||||
It("docker daemon's oom-score-adj should be -999", func() {
|
It("container runtime's oom-score-adj should be -999", func() {
|
||||||
dockerPids, err := getPidsForProcess(dockerProcessName, dockerPidFile)
|
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||||
Expect(err).To(BeNil(), "failed to get list of docker daemon pids")
|
Expect(err).To(BeNil(), "failed to get list of container runtime pids")
|
||||||
for _, pid := range dockerPids {
|
for _, pid := range runtimePids {
|
||||||
Eventually(func() error {
|
Eventually(func() error {
|
||||||
return validateOOMScoreAdjSetting(pid, -999)
|
return validateOOMScoreAdjSetting(pid, -999)
|
||||||
}, 5*time.Minute, 30*time.Second).Should(BeNil())
|
}, 5*time.Minute, 30*time.Second).Should(BeNil())
|
||||||
@ -148,14 +149,22 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
|||||||
return validateOOMScoreAdjSetting(shPids[0], 1000)
|
return validateOOMScoreAdjSetting(shPids[0], 1000)
|
||||||
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
}, 2*time.Minute, time.Second*4).Should(BeNil())
|
||||||
})
|
})
|
||||||
// Log the running containers here to help debugging. Use `docker ps`
|
// Log the running containers here to help debugging.
|
||||||
// directly for now because the test is already docker specific.
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
if CurrentGinkgoTestDescription().Failed {
|
if CurrentGinkgoTestDescription().Failed {
|
||||||
By("Dump all running docker containers")
|
By("Dump all running containers")
|
||||||
output, err := exec.Command("docker", "ps").CombinedOutput()
|
runtime, _, err := getCRIClient()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
framework.Logf("Running docker containers:\n%s", string(output))
|
containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{
|
||||||
|
State: &runtimeapi.ContainerStateValue{
|
||||||
|
State: runtimeapi.ContainerState_CONTAINER_RUNNING,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
framework.Logf("Running containers:\n")
|
||||||
|
for _, c := range containers {
|
||||||
|
framework.Logf("%+v\n", c)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -27,9 +27,11 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
"k8s.io/kubernetes/pkg/kubelet/cm/cpuset"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -101,14 +103,21 @@ func getLocalNodeCPUDetails(f *framework.Framework) (cpuCapVal int64, cpuAllocVa
|
|||||||
return cpuCap.Value(), (cpuCap.Value() - cpuRes.Value()), cpuRes.Value()
|
return cpuCap.Value(), (cpuCap.Value() - cpuRes.Value()), cpuRes.Value()
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO(balajismaniam): Make this func generic to all container runtimes.
|
func waitForContainerRemoval(containerName, podName, podNS string) {
|
||||||
func waitForContainerRemoval(ctnPartName string) {
|
rs, _, err := getCRIClient()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Eventually(func() bool {
|
Eventually(func() bool {
|
||||||
err := exec.Command("/bin/sh", "-c", fmt.Sprintf("if [ -n \"$(docker ps -a | grep -i %s)\" ]; then exit 1; fi", ctnPartName)).Run()
|
containers, err := rs.ListContainers(&runtimeapi.ContainerFilter{
|
||||||
|
LabelSelector: map[string]string{
|
||||||
|
types.KubernetesPodNameLabel: podName,
|
||||||
|
types.KubernetesPodNamespaceLabel: podNS,
|
||||||
|
types.KubernetesContainerNameLabel: containerName,
|
||||||
|
},
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return len(containers) == 0
|
||||||
}, 2*time.Minute, 1*time.Second).Should(BeTrue())
|
}, 2*time.Minute, 1*time.Second).Should(BeTrue())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,9 +144,8 @@ func setOldKubeletConfig(f *framework.Framework, oldCfg *kubeletconfig.KubeletCo
|
|||||||
}
|
}
|
||||||
|
|
||||||
func enableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.KubeletConfiguration) {
|
func enableCPUManagerInKubelet(f *framework.Framework) (oldCfg *kubeletconfig.KubeletConfiguration) {
|
||||||
// Run only if the container runtime is Docker.
|
// Run only if the container runtime is not docker or remote (not rkt).
|
||||||
// TODO(balajismaniam): Make this test generic to all container runtimes.
|
framework.RunIfContainerRuntimeIs("docker", "remote")
|
||||||
framework.RunIfContainerRuntimeIs("docker")
|
|
||||||
|
|
||||||
// Enable CPU Manager in Kubelet with static policy.
|
// Enable CPU Manager in Kubelet with static policy.
|
||||||
oldCfg, err := getCurrentKubeletConfig()
|
oldCfg, err := getCurrentKubeletConfig()
|
||||||
@ -219,7 +227,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
By("by deleting the pods and waiting for container removal")
|
||||||
deletePods(f, []string{pod.Name})
|
deletePods(f, []string{pod.Name})
|
||||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name))
|
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||||
|
|
||||||
By("running a Gu pod")
|
By("running a Gu pod")
|
||||||
ctnAttrs = []ctnAttribute{
|
ctnAttrs = []ctnAttribute{
|
||||||
@ -245,7 +253,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
By("by deleting the pods and waiting for container removal")
|
||||||
deletePods(f, []string{pod.Name})
|
deletePods(f, []string{pod.Name})
|
||||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name))
|
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||||
|
|
||||||
By("running multiple Gu and non-Gu pods")
|
By("running multiple Gu and non-Gu pods")
|
||||||
ctnAttrs = []ctnAttribute{
|
ctnAttrs = []ctnAttribute{
|
||||||
@ -291,8 +299,8 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
By("by deleting the pods and waiting for container removal")
|
||||||
deletePods(f, []string{pod1.Name, pod2.Name})
|
deletePods(f, []string{pod1.Name, pod2.Name})
|
||||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod1.Spec.Containers[0].Name, pod1.Name))
|
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
|
||||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod2.Spec.Containers[0].Name, pod2.Name))
|
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
|
||||||
|
|
||||||
// Skip rest of the tests if CPU capacity < 3.
|
// Skip rest of the tests if CPU capacity < 3.
|
||||||
if cpuCap < 3 {
|
if cpuCap < 3 {
|
||||||
@ -327,7 +335,7 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
By("by deleting the pods and waiting for container removal")
|
||||||
deletePods(f, []string{pod.Name})
|
deletePods(f, []string{pod.Name})
|
||||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name))
|
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||||
|
|
||||||
By("running a Gu pod with multiple containers requesting integer CPUs")
|
By("running a Gu pod with multiple containers requesting integer CPUs")
|
||||||
ctnAttrs = []ctnAttribute{
|
ctnAttrs = []ctnAttribute{
|
||||||
@ -365,8 +373,8 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
By("by deleting the pods and waiting for container removal")
|
||||||
deletePods(f, []string{pod.Name})
|
deletePods(f, []string{pod.Name})
|
||||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[0].Name, pod.Name))
|
waitForContainerRemoval(pod.Spec.Containers[0].Name, pod.Name, pod.Namespace)
|
||||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod.Spec.Containers[1].Name, pod.Name))
|
waitForContainerRemoval(pod.Spec.Containers[1].Name, pod.Name, pod.Namespace)
|
||||||
|
|
||||||
By("running multiple Gu pods")
|
By("running multiple Gu pods")
|
||||||
ctnAttrs = []ctnAttribute{
|
ctnAttrs = []ctnAttribute{
|
||||||
@ -410,15 +418,15 @@ func runCPUManagerTests(f *framework.Framework) {
|
|||||||
|
|
||||||
By("by deleting the pods and waiting for container removal")
|
By("by deleting the pods and waiting for container removal")
|
||||||
deletePods(f, []string{pod1.Name, pod2.Name})
|
deletePods(f, []string{pod1.Name, pod2.Name})
|
||||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod1.Spec.Containers[0].Name, pod1.Name))
|
waitForContainerRemoval(pod1.Spec.Containers[0].Name, pod1.Name, pod1.Namespace)
|
||||||
waitForContainerRemoval(fmt.Sprintf("%s_%s", pod2.Spec.Containers[0].Name, pod2.Name))
|
waitForContainerRemoval(pod2.Spec.Containers[0].Name, pod2.Name, pod2.Namespace)
|
||||||
|
|
||||||
setOldKubeletConfig(f, oldCfg)
|
setOldKubeletConfig(f, oldCfg)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Serial because the test updates kubelet configuration.
|
// Serial because the test updates kubelet configuration.
|
||||||
var _ = SIGDescribe("CPU Manager [Feature:CPUManager]", func() {
|
var _ = SIGDescribe("CPU Manager [Serial] [Feature:CPUManager]", func() {
|
||||||
f := framework.NewDefaultFramework("cpu-manager-test")
|
f := framework.NewDefaultFramework("cpu-manager-test")
|
||||||
|
|
||||||
Context("With kubeconfig updated with static CPU Manager policy run the CPU Manager tests", func() {
|
Context("With kubeconfig updated with static CPU Manager policy run the CPU Manager tests", func() {
|
||||||
|
@ -45,6 +45,10 @@ const (
|
|||||||
var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker]", func() {
|
var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker]", func() {
|
||||||
f := framework.NewDefaultFramework("dockerhism-checkpoint-test")
|
f := framework.NewDefaultFramework("dockerhism-checkpoint-test")
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
framework.RunIfContainerRuntimeIs("docker")
|
||||||
|
})
|
||||||
|
|
||||||
It("should clean up pod sandbox checkpoint after pod deletion", func() {
|
It("should clean up pod sandbox checkpoint after pod deletion", func() {
|
||||||
podName := "pod-checkpoint-no-disrupt"
|
podName := "pod-checkpoint-no-disrupt"
|
||||||
runPodCheckpointTest(f, podName, func() {
|
runPodCheckpointTest(f, podName, func() {
|
||||||
|
@ -19,12 +19,13 @@ package e2e_node
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||||
|
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||||
|
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -130,8 +131,7 @@ var _ = framework.KubeDescribe("GarbageCollect [Serial]", func() {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
// TODO (dashpole): Once the Container Runtime Interface (CRI) is complete, generalize run on other runtimes (other than docker)
|
containerGCTest(f, test)
|
||||||
dockerContainerGCTest(f, test)
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -142,6 +142,32 @@ var _ = framework.KubeDescribe("GarbageCollect [Serial]", func() {
|
|||||||
// while containers are running, if not constrained by maxPerPodContainer or maxTotalContainers, keep an extra copy of each container
|
// while containers are running, if not constrained by maxPerPodContainer or maxTotalContainers, keep an extra copy of each container
|
||||||
// once pods are killed, all containers are eventually cleaned up
|
// once pods are killed, all containers are eventually cleaned up
|
||||||
func containerGCTest(f *framework.Framework, test testRun) {
|
func containerGCTest(f *framework.Framework, test testRun) {
|
||||||
|
var runtime internalapi.RuntimeService
|
||||||
|
BeforeEach(func() {
|
||||||
|
var err error
|
||||||
|
runtime, _, err = getCRIClient()
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
for _, pod := range test.testPods {
|
||||||
|
// Initialize the getContainerNames function to use CRI runtime client.
|
||||||
|
pod.getContainerNames = func() ([]string, error) {
|
||||||
|
relevantContainers := []string{}
|
||||||
|
containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{
|
||||||
|
LabelSelector: map[string]string{
|
||||||
|
types.KubernetesPodNameLabel: pod.podName,
|
||||||
|
types.KubernetesPodNamespaceLabel: f.Namespace.Name,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return relevantContainers, err
|
||||||
|
}
|
||||||
|
for _, container := range containers {
|
||||||
|
relevantContainers = append(relevantContainers, container.Labels[types.KubernetesContainerNameLabel])
|
||||||
|
}
|
||||||
|
return relevantContainers, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() {
|
Context(fmt.Sprintf("Garbage Collection Test: %s", test.testName), func() {
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
realPods := getPods(test.testPods)
|
realPods := getPods(test.testPods)
|
||||||
@ -175,7 +201,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
|||||||
for i := 0; i < pod.numContainers; i++ {
|
for i := 0; i < pod.numContainers; i++ {
|
||||||
containerCount := 0
|
containerCount := 0
|
||||||
for _, containerName := range containerNames {
|
for _, containerName := range containerNames {
|
||||||
if strings.Contains(containerName, pod.getContainerName(i)) {
|
if containerName == pod.getContainerName(i) {
|
||||||
containerCount += 1
|
containerCount += 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -203,7 +229,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
|||||||
for i := 0; i < pod.numContainers; i++ {
|
for i := 0; i < pod.numContainers; i++ {
|
||||||
containerCount := 0
|
containerCount := 0
|
||||||
for _, containerName := range containerNames {
|
for _, containerName := range containerNames {
|
||||||
if strings.Contains(containerName, pod.getContainerName(i)) {
|
if containerName == pod.getContainerName(i) {
|
||||||
containerCount += 1
|
containerCount += 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -245,39 +271,6 @@ func containerGCTest(f *framework.Framework, test testRun) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Runs containerGCTest using the docker runtime.
|
|
||||||
func dockerContainerGCTest(f *framework.Framework, test testRun) {
|
|
||||||
var runtime libdocker.Interface
|
|
||||||
BeforeEach(func() {
|
|
||||||
runtime = libdocker.ConnectToDockerOrDie(
|
|
||||||
defaultDockerEndpoint,
|
|
||||||
defaultRuntimeRequestTimeoutDuration,
|
|
||||||
defaultImagePullProgressDeadline,
|
|
||||||
false,
|
|
||||||
false,
|
|
||||||
)
|
|
||||||
})
|
|
||||||
for _, pod := range test.testPods {
|
|
||||||
// Initialize the getContainerNames function to use the libdocker api
|
|
||||||
thisPrefix := pod.containerPrefix
|
|
||||||
pod.getContainerNames = func() ([]string, error) {
|
|
||||||
relevantContainers := []string{}
|
|
||||||
dockerContainers, err := libdocker.GetKubeletDockerContainers(runtime, true)
|
|
||||||
if err != nil {
|
|
||||||
return relevantContainers, err
|
|
||||||
}
|
|
||||||
for _, container := range dockerContainers {
|
|
||||||
// only look for containers from this testspec
|
|
||||||
if strings.Contains(container.Names[0], thisPrefix) {
|
|
||||||
relevantContainers = append(relevantContainers, container.Names[0])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return relevantContainers, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
containerGCTest(f, test)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getPods(specs []*testPodSpec) (pods []*v1.Pod) {
|
func getPods(specs []*testPodSpec) (pods []*v1.Pod) {
|
||||||
for _, spec := range specs {
|
for _, spec := range specs {
|
||||||
By(fmt.Sprintf("Creating %v containers with restartCount: %v", spec.numContainers, spec.restartCount))
|
By(fmt.Sprintf("Creating %v containers with restartCount: %v", spec.numContainers, spec.restartCount))
|
||||||
|
@ -28,6 +28,7 @@ import (
|
|||||||
"os/exec"
|
"os/exec"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
)
|
)
|
||||||
@ -75,11 +76,11 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
|||||||
)
|
)
|
||||||
|
|
||||||
f := framework.NewDefaultFramework("restart-test")
|
f := framework.NewDefaultFramework("restart-test")
|
||||||
Context("Docker Daemon", func() {
|
Context("Container Runtime", func() {
|
||||||
Context("Network", func() {
|
Context("Network", func() {
|
||||||
It("should recover from ip leak", func() {
|
It("should recover from ip leak", func() {
|
||||||
|
|
||||||
pods := newTestPods(podCount, false, framework.GetPauseImageNameForHostArch(), "restart-docker-test")
|
pods := newTestPods(podCount, false, framework.GetPauseImageNameForHostArch(), "restart-container-runtime-test")
|
||||||
By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
By(fmt.Sprintf("Trying to create %d pods on node", len(pods)))
|
||||||
createBatchPodWithRateControl(f, pods, podCreationInterval)
|
createBatchPodWithRateControl(f, pods, podCreationInterval)
|
||||||
defer deletePodsSync(f, pods)
|
defer deletePodsSync(f, pods)
|
||||||
@ -88,34 +89,47 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive]", func() {
|
|||||||
// startTimeout fit on the node and the node is now saturated.
|
// startTimeout fit on the node and the node is now saturated.
|
||||||
runningPods := waitForPods(f, podCount, startTimeout)
|
runningPods := waitForPods(f, podCount, startTimeout)
|
||||||
if len(runningPods) < minPods {
|
if len(runningPods) < minPods {
|
||||||
framework.Failf("Failed to start %d pods, cannot test that restarting docker doesn't leak IPs", minPods)
|
framework.Failf("Failed to start %d pods, cannot test that restarting container runtime doesn't leak IPs", minPods)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := 0; i < restartCount; i += 1 {
|
for i := 0; i < restartCount; i += 1 {
|
||||||
By(fmt.Sprintf("Restarting Docker Daemon iteration %d", i))
|
By(fmt.Sprintf("Killing container runtime iteration %d", i))
|
||||||
|
// Wait for container runtime to be running
|
||||||
// TODO: Find a uniform way to deal with systemctl/initctl/service operations. #34494
|
var pid int
|
||||||
if stdout, err := exec.Command("sudo", "systemctl", "restart", "docker").CombinedOutput(); err != nil {
|
Eventually(func() error {
|
||||||
framework.Logf("Failed to trigger docker restart with systemd/systemctl: %v, stdout: %q", err, string(stdout))
|
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||||
if stdout, err = exec.Command("sudo", "service", "docker", "restart").CombinedOutput(); err != nil {
|
if err != nil {
|
||||||
framework.Failf("Failed to trigger docker restart with upstart/service: %v, stdout: %q", err, string(stdout))
|
return err
|
||||||
}
|
}
|
||||||
|
if len(runtimePids) != 1 {
|
||||||
|
return fmt.Errorf("unexpected container runtime pid list: %+v", runtimePids)
|
||||||
|
}
|
||||||
|
// Make sure the container runtime is running, pid got from pid file may not be running.
|
||||||
|
pid = runtimePids[0]
|
||||||
|
if _, err := exec.Command("sudo", "ps", "-p", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}, 1*time.Minute, 2*time.Second).Should(BeNil())
|
||||||
|
if stdout, err := exec.Command("sudo", "kill", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil {
|
||||||
|
framework.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout))
|
||||||
}
|
}
|
||||||
|
// Assume that container runtime will be restarted by systemd/supervisord etc.
|
||||||
time.Sleep(20 * time.Second)
|
time.Sleep(20 * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("Checking currently Running/Ready pods")
|
By("Checking currently Running/Ready pods")
|
||||||
postRestartRunningPods := waitForPods(f, len(runningPods), recoverTimeout)
|
postRestartRunningPods := waitForPods(f, len(runningPods), recoverTimeout)
|
||||||
if len(postRestartRunningPods) == 0 {
|
if len(postRestartRunningPods) == 0 {
|
||||||
framework.Failf("Failed to start *any* pods after docker restart, this might indicate an IP leak")
|
framework.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak")
|
||||||
}
|
}
|
||||||
By("Confirm no containers have terminated")
|
By("Confirm no containers have terminated")
|
||||||
for _, pod := range postRestartRunningPods {
|
for _, pod := range postRestartRunningPods {
|
||||||
if c := testutils.TerminatedContainers(pod); len(c) != 0 {
|
if c := testutils.TerminatedContainers(pod); len(c) != 0 {
|
||||||
framework.Failf("Pod %q has failed containers %+v after docker restart, this might indicate an IP leak", pod.Name, c)
|
framework.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
By(fmt.Sprintf("Docker restart test passed with %d pods", len(postRestartRunningPods)))
|
By(fmt.Sprintf("Container runtime restart test passed with %d pods", len(postRestartRunningPods)))
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
Loading…
Reference in New Issue
Block a user