Merge pull request #80790 from k-toyoda-pi/use_expectequal_e2e_node_a-d

Use framework.ExpectEqual() in test/e2e_node/[a-d]
This commit is contained in:
Kubernetes Prow Robot 2019-08-06 23:57:03 -07:00 committed by GitHub
commit ee8ed53e0e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 19 additions and 19 deletions

View File

@ -199,10 +199,10 @@ func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod {
func expectSoftRejection(status v1.PodStatus) { func expectSoftRejection(status v1.PodStatus) {
args := []interface{}{"PodStatus: %+v", status} args := []interface{}{"PodStatus: %+v", status}
gomega.Expect(status.Phase).To(gomega.Equal(v1.PodPending), args...) framework.ExpectEqual(status.Phase, v1.PodPending, args...)
gomega.Expect(status.Reason).To(gomega.Equal("AppArmor"), args...) framework.ExpectEqual(status.Reason, "AppArmor", args...)
gomega.Expect(status.Message).To(gomega.ContainSubstring("AppArmor"), args...) gomega.Expect(status.Message).To(gomega.ContainSubstring("AppArmor"), args...)
gomega.Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(gomega.Equal("Blocked"), args...) framework.ExpectEqual(status.ContainerStatuses[0].State.Waiting.Reason, "Blocked", args...)
} }
func isAppArmorEnabled() bool { func isAppArmorEnabled() bool {

View File

@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive
} }
pod = f.PodClient().CreateSync(pod) pod = f.PodClient().CreateSync(pod)
ginkgo.By("get container log path") ginkgo.By("get container log path")
gomega.Expect(len(pod.Status.ContainerStatuses)).To(gomega.Equal(1)) framework.ExpectEqual(len(pod.Status.ContainerStatuses), 1)
id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
r, _, err := getCRIClient() r, _, err := getCRIClient()
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -90,7 +90,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
ginkgo.It("Kubelet's oom-score-adj should be -999", func() { ginkgo.It("Kubelet's oom-score-adj should be -999", func() {
kubeletPids, err := getPidsForProcess(kubeletProcessName, "") kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids") gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids")
gomega.Expect(len(kubeletPids)).To(gomega.Equal(1), "expected only one kubelet process; found %d", len(kubeletPids)) framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids))
gomega.Eventually(func() error { gomega.Eventually(func() error {
return validateOOMScoreAdjSetting(kubeletPids[0], -999) return validateOOMScoreAdjSetting(kubeletPids[0], -999)
}, 5*time.Minute, 30*time.Second).Should(gomega.BeNil()) }, 5*time.Minute, 30*time.Second).Should(gomega.BeNil())

View File

@ -93,7 +93,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
if p.Name == nonCriticalBestEffort.Name { if p.Name == nonCriticalBestEffort.Name {
gomega.Expect(p.Status.Phase).NotTo(gomega.Equal(v1.PodFailed), fmt.Sprintf("pod: %v should be preempted", p.Name)) gomega.Expect(p.Status.Phase).NotTo(gomega.Equal(v1.PodFailed), fmt.Sprintf("pod: %v should be preempted", p.Name))
} else { } else {
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodFailed), fmt.Sprintf("pod: %v should not be preempted", p.Name)) framework.ExpectEqual(p.Status.Phase, v1.PodFailed, fmt.Sprintf("pod: %v should not be preempted", p.Name))
} }
} }
}) })
@ -115,7 +115,7 @@ func getNodeCPUAndMemoryCapacity(f *framework.Framework) v1.ResourceList {
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{}) nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Assuming that there is only one node, because this is a node e2e test. // Assuming that there is only one node, because this is a node e2e test.
gomega.Expect(len(nodeList.Items)).To(gomega.Equal(1)) framework.ExpectEqual(len(nodeList.Items), 1)
capacity := nodeList.Items[0].Status.Allocatable capacity := nodeList.Items[0].Status.Allocatable
return v1.ResourceList{ return v1.ResourceList{
v1.ResourceCPU: capacity[v1.ResourceCPU], v1.ResourceCPU: capacity[v1.ResourceCPU],

View File

@ -105,7 +105,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
e2elog.Logf("pod resources %v", podResources) e2elog.Logf("pod resources %v", podResources)
gomega.Expect(err).To(gomega.BeNil()) gomega.Expect(err).To(gomega.BeNil())
gomega.Expect(len(podResources.PodResources)).To(gomega.Equal(2)) framework.ExpectEqual(len(podResources.PodResources), 2)
for _, res := range podResources.GetPodResources() { for _, res := range podResources.GetPodResources() {
if res.Name == pod1.Name { if res.Name == pod1.Name {
resourcesForOurPod = res resourcesForOurPod = res
@ -113,13 +113,13 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
} }
e2elog.Logf("resourcesForOurPod %v", resourcesForOurPod) e2elog.Logf("resourcesForOurPod %v", resourcesForOurPod)
gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil()) gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil())
gomega.Expect(resourcesForOurPod.Name).To(gomega.Equal(pod1.Name)) framework.ExpectEqual(resourcesForOurPod.Name, pod1.Name)
gomega.Expect(resourcesForOurPod.Namespace).To(gomega.Equal(pod1.Namespace)) framework.ExpectEqual(resourcesForOurPod.Namespace, pod1.Namespace)
gomega.Expect(len(resourcesForOurPod.Containers)).To(gomega.Equal(1)) framework.ExpectEqual(len(resourcesForOurPod.Containers), 1)
gomega.Expect(resourcesForOurPod.Containers[0].Name).To(gomega.Equal(pod1.Spec.Containers[0].Name)) framework.ExpectEqual(resourcesForOurPod.Containers[0].Name, pod1.Spec.Containers[0].Name)
gomega.Expect(len(resourcesForOurPod.Containers[0].Devices)).To(gomega.Equal(1)) framework.ExpectEqual(len(resourcesForOurPod.Containers[0].Devices), 1)
gomega.Expect(resourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(resourceName)) framework.ExpectEqual(resourcesForOurPod.Containers[0].Devices[0].ResourceName, resourceName)
gomega.Expect(len(resourcesForOurPod.Containers[0].Devices[0].DeviceIds)).To(gomega.Equal(1)) framework.ExpectEqual(len(resourcesForOurPod.Containers[0].Devices[0].DeviceIds), 1)
pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{}) pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -128,7 +128,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Confirming that device assignment persists even after container restart") ginkgo.By("Confirming that device assignment persists even after container restart")
devIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) devIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
gomega.Expect(devIdAfterRestart).To(gomega.Equal(devId1)) framework.ExpectEqual(devIdAfterRestart, devId1)
restartTime := time.Now() restartTime := time.Now()
ginkgo.By("Restarting Kubelet") ginkgo.By("Restarting Kubelet")
@ -167,7 +167,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ensurePodContainerRestart(f, pod1.Name, pod1.Name) ensurePodContainerRestart(f, pod1.Name, pod1.Name)
ginkgo.By("Confirming that after a kubelet restart, fake-device assignement is kept") ginkgo.By("Confirming that after a kubelet restart, fake-device assignement is kept")
devIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE) devIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1)) framework.ExpectEqual(devIdRestart1, devId1)
ginkgo.By("Waiting for resource to become available on the local node after re-registration") ginkgo.By("Waiting for resource to become available on the local node after re-registration")
gomega.Eventually(func() bool { gomega.Eventually(func() bool {
@ -200,11 +200,11 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin.") ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin.")
ensurePodContainerRestart(f, pod1.Name, pod1.Name) ensurePodContainerRestart(f, pod1.Name, pod1.Name)
devIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE) devIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1)) framework.ExpectEqual(devIdRestart1, devId1)
ensurePodContainerRestart(f, pod2.Name, pod2.Name) ensurePodContainerRestart(f, pod2.Name, pod2.Name)
devIdRestart2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE) devIdRestart2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
gomega.Expect(devIdRestart2).To(gomega.Equal(devId2)) framework.ExpectEqual(devIdRestart2, devId2)
ginkgo.By("Re-register resources") ginkgo.By("Re-register resources")
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp) devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)