feat: update multiple files in e2e node with framework helpers

This commit is contained in:
draveness 2019-07-31 15:59:40 +08:00
parent 377104418d
commit aeadd793cb
4 changed files with 32 additions and 33 deletions

View File

@ -582,7 +582,7 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
}
}
gomega.Expect(priorityPod).NotTo(gomega.BeNil())
gomega.Expect(priorityPod.Status.Phase).NotTo(gomega.Equal(v1.PodSucceeded),
framework.ExpectNotEqual(priorityPod.Status.Phase, v1.PodSucceeded,
fmt.Sprintf("pod: %s succeeded unexpectedly", priorityPod.Name))
// Check eviction ordering.
@ -597,20 +597,20 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
}
gomega.Expect(lowPriorityPod).NotTo(gomega.BeNil())
if priorityPodSpec.evictionPriority < lowPriorityPodSpec.evictionPriority && lowPriorityPod.Status.Phase == v1.PodRunning {
gomega.Expect(priorityPod.Status.Phase).NotTo(gomega.Equal(v1.PodFailed),
framework.ExpectNotEqual(priorityPod.Status.Phase, v1.PodFailed,
fmt.Sprintf("priority %d pod: %s failed before priority %d pod: %s",
priorityPodSpec.evictionPriority, priorityPodSpec.pod.Name, lowPriorityPodSpec.evictionPriority, lowPriorityPodSpec.pod.Name))
}
}
if priorityPod.Status.Phase == v1.PodFailed {
gomega.Expect(priorityPod.Status.Reason, eviction.Reason, "pod %s failed; expected Status.Reason to be %s, but got %s",
framework.ExpectEqual(priorityPod.Status.Reason, eviction.Reason, "pod %s failed; expected Status.Reason to be %s, but got %s",
priorityPod.Name, eviction.Reason, priorityPod.Status.Reason)
}
// EvictionPriority 0 pods should not fail
if priorityPodSpec.evictionPriority == 0 {
gomega.Expect(priorityPod.Status.Phase).NotTo(gomega.Equal(v1.PodFailed),
framework.ExpectNotEqual(priorityPod.Status.Phase, v1.PodFailed,
fmt.Sprintf("priority 0 pod: %s failed", priorityPod.Name))
}
@ -637,7 +637,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
}.AsSelector().String()
podEvictEvents, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(metav1.ListOptions{FieldSelector: selector})
gomega.Expect(err).To(gomega.BeNil(), "Unexpected error getting events during eviction test: %v", err)
gomega.Expect(len(podEvictEvents.Items)).To(gomega.Equal(1), "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
framework.ExpectEqual(len(podEvictEvents.Items), 1, "Expected to find 1 eviction event for pod %s, got %d", pod.Name, len(podEvictEvents.Items))
event := podEvictEvents.Items[0]
if expectedStarvedResource != noStarvedResource {
@ -646,7 +646,7 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the starved resource %s, but it was not found",
pod.Name, expectedStarvedResource)
starvedResource := v1.ResourceName(starved)
gomega.Expect(starvedResource).To(gomega.Equal(expectedStarvedResource), "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
framework.ExpectEqual(starvedResource, expectedStarvedResource, "Expected to the starved_resource annotation on pod %s to contain %s, but got %s instead",
pod.Name, expectedStarvedResource, starvedResource)
// We only check these keys for memory, because ephemeral storage evictions may be due to volume usage, in which case these values are not present
@ -656,22 +656,22 @@ func verifyEvictionEvents(f *framework.Framework, testSpecs []podEvictSpec, expe
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers, but it was not found",
pod.Name)
offendingContainers := strings.Split(offendersString, ",")
gomega.Expect(len(offendingContainers)).To(gomega.Equal(1), "Expected to find the offending container's usage in the %s annotation, but no container was found",
framework.ExpectEqual(len(offendingContainers), 1, "Expected to find the offending container's usage in the %s annotation, but no container was found",
eviction.OffendingContainersKey)
gomega.Expect(offendingContainers[0]).To(gomega.Equal(pod.Spec.Containers[0].Name), "Expected to find the offending container: %s's usage in the %s annotation, but found %s instead",
framework.ExpectEqual(offendingContainers[0], pod.Spec.Containers[0].Name, "Expected to find the offending container: %s's usage in the %s annotation, but found %s instead",
pod.Spec.Containers[0].Name, eviction.OffendingContainersKey, offendingContainers[0])
// Check the eviction.OffendingContainersUsageKey
offendingUsageString, found := event.Annotations[eviction.OffendingContainersUsageKey]
gomega.Expect(found).To(gomega.BeTrue(), "Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
framework.ExpectEqual(found, true, "Expected to find an annotation on the eviction event for pod %s containing the offending containers' usage, but it was not found",
pod.Name)
offendingContainersUsage := strings.Split(offendingUsageString, ",")
gomega.Expect(len(offendingContainersUsage)).To(gomega.Equal(1), "Expected to find the offending container's usage in the %s annotation, but found %+v",
framework.ExpectEqual(len(offendingContainersUsage), 1, "Expected to find the offending container's usage in the %s annotation, but found %+v",
eviction.OffendingContainersUsageKey, offendingContainersUsage)
usageQuantity, err := resource.ParseQuantity(offendingContainersUsage[0])
gomega.Expect(err).To(gomega.BeNil(), "Expected to be able to parse pod %s's %s annotation as a quantity, but got err: %v", pod.Name, eviction.OffendingContainersUsageKey, err)
request := pod.Spec.Containers[0].Resources.Requests[starvedResource]
gomega.Expect(usageQuantity.Cmp(request)).To(gomega.Equal(1), "Expected usage of offending container: %s in pod %s to exceed its request %s",
framework.ExpectEqual(usageQuantity.Cmp(request), 1, "Expected usage of offending container: %s in pod %s to exceed its request %s",
usageQuantity.String(), pod.Name, request.String())
}
}

View File

@ -90,7 +90,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
ginkgo.By("Confirming that after a kubelet and pod restart, GPU assignment is kept")
ensurePodContainerRestart(f, p1.Name, p1.Name)
devIdRestart1 := parseLog(f, p1.Name, p1.Name, deviceIDRE)
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
framework.ExpectEqual(devIdRestart1, devId1)
ginkgo.By("Restarting Kubelet and creating another pod")
restartKubelet()
@ -103,7 +103,7 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
ginkgo.By("Checking that pods got a different GPU")
devId2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
gomega.Expect(devId1).To(gomega.Not(gomega.Equal(devId2)))
framework.ExpectEqual(devId1, devId2)
ginkgo.By("Deleting device plugin.")
f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Delete(devicePluginPod.Name, &metav1.DeleteOptions{})
@ -116,20 +116,20 @@ var _ = framework.KubeDescribe("NVIDIA GPU Device Plugin [Feature:GPUDevicePlugi
ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin.")
ensurePodContainerRestart(f, p1.Name, p1.Name)
devIdRestart1 = parseLog(f, p1.Name, p1.Name, deviceIDRE)
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
framework.ExpectEqual(devIdRestart1, devId1)
ensurePodContainerRestart(f, p2.Name, p2.Name)
devIdRestart2 := parseLog(f, p2.Name, p2.Name, deviceIDRE)
gomega.Expect(devIdRestart2).To(gomega.Equal(devId2))
framework.ExpectEqual(devIdRestart2, devId2)
ginkgo.By("Restarting Kubelet.")
restartKubelet()
ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin and restart Kubelet.")
ensurePodContainerRestart(f, p1.Name, p1.Name)
devIdRestart1 = parseLog(f, p1.Name, p1.Name, deviceIDRE)
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
framework.ExpectEqual(devIdRestart1, devId1)
ensurePodContainerRestart(f, p2.Name, p2.Name)
devIdRestart2 = parseLog(f, p2.Name, p2.Name, deviceIDRE)
gomega.Expect(devIdRestart2).To(gomega.Equal(devId2))
framework.ExpectEqual(devIdRestart2, devId2)
logDevicePluginMetrics()
// Cleanup

View File

@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
ginkgo.By("create the static pod")
err := createStaticPod(podPath, staticPodName, ns,
imageutils.GetE2EImage(imageutils.Nginx), v1.RestartPolicyAlways)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("wait for the mirror pod to be running")
gomega.Eventually(func() error {
@ -65,13 +65,13 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
ginkgo.It("should be updated when static pod updated [NodeConformance]", func() {
ginkgo.By("get mirror pod uid")
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
uid := pod.UID
ginkgo.By("update the static pod container image")
image := imageutils.GetPauseImageName()
err = createStaticPod(podPath, staticPodName, ns, image, v1.RestartPolicyAlways)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("wait for the mirror pod to be updated")
gomega.Eventually(func() error {
@ -80,9 +80,9 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
ginkgo.By("check the mirror pod container image is updated")
pod, err = f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
gomega.Expect(len(pod.Spec.Containers)).Should(gomega.Equal(1))
gomega.Expect(pod.Spec.Containers[0].Image).Should(gomega.Equal(image))
framework.ExpectNoError(err)
framework.ExpectEqual(len(pod.Spec.Containers), 1)
framework.ExpectEqual(pod.Spec.Containers[0].Image, image)
})
/*
Release : v1.9
@ -92,12 +92,12 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
ginkgo.It("should be recreated when mirror pod gracefully deleted [NodeConformance]", func() {
ginkgo.By("get mirror pod uid")
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
uid := pod.UID
ginkgo.By("delete the mirror pod with grace period 30s")
err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(30))
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("wait for the mirror pod to be recreated")
gomega.Eventually(func() error {
@ -112,12 +112,12 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
ginkgo.It("should be recreated when mirror pod forcibly deleted [NodeConformance]", func() {
ginkgo.By("get mirror pod uid")
pod, err := f.ClientSet.CoreV1().Pods(ns).Get(mirrorPodName, metav1.GetOptions{})
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
uid := pod.UID
ginkgo.By("delete the mirror pod with grace period 0s")
err = f.ClientSet.CoreV1().Pods(ns).Delete(mirrorPodName, metav1.NewDeleteOptions(0))
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("wait for the mirror pod to be recreated")
gomega.Eventually(func() error {
@ -127,7 +127,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
ginkgo.AfterEach(func() {
ginkgo.By("delete the static pod")
err := deleteStaticPod(podPath, staticPodName, ns)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
framework.ExpectNoError(err)
ginkgo.By("wait for the mirror pod to disappear")
gomega.Eventually(func() error {

View File

@ -140,7 +140,7 @@ func tempSetCurrentKubeletConfig(f *framework.Framework, updateFunction func(ini
ginkgo.BeforeEach(func() {
configEnabled, err := isKubeletConfigEnabled(f)
framework.ExpectNoError(err)
gomega.Expect(configEnabled).To(gomega.BeTrue(), "The Dynamic Kubelet Configuration feature is not enabled.\n"+
framework.ExpectEqual(configEnabled, true, "The Dynamic Kubelet Configuration feature is not enabled.\n"+
"Pass --feature-gates=DynamicKubeletConfig=true to the Kubelet to enable this feature.\n"+
"For `make test-e2e-node`, you can set `TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'`.")
oldCfg, err = getCurrentKubeletConfig()
@ -347,7 +347,7 @@ func logNodeEvents(f *framework.Framework) {
func getLocalNode(f *framework.Framework) *v1.Node {
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
gomega.Expect(len(nodeList.Items)).To(gomega.Equal(1), "Unexpected number of node objects for node e2e. Expects only one node.")
framework.ExpectEqual(len(nodeList.Items), 1, "Unexpected number of node objects for node e2e. Expects only one node.")
return &nodeList.Items[0]
}
@ -423,7 +423,7 @@ func restartKubelet() {
framework.ExpectNoError(err)
regex := regexp.MustCompile("(kubelet-\\w+)")
matches := regex.FindStringSubmatch(string(stdout))
gomega.Expect(len(matches)).NotTo(gomega.BeZero())
framework.ExpectNotEqual(len(matches), 0)
kube := matches[0]
e2elog.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube)
stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput()
@ -433,9 +433,8 @@ func restartKubelet() {
func toCgroupFsName(cgroupName cm.CgroupName) string {
if framework.TestContext.KubeletConfig.CgroupDriver == "systemd" {
return cgroupName.ToSystemd()
} else {
return cgroupName.ToCgroupfs()
}
return cgroupName.ToCgroupfs()
}
// reduceAllocatableMemoryUsage uses memory.force_empty (https://lwn.net/Articles/432224/)