mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #80790 from k-toyoda-pi/use_expectequal_e2e_node_a-d
Use framework.ExpectEqual() in test/e2e_node/[a-d]
This commit is contained in:
commit
ee8ed53e0e
@ -199,10 +199,10 @@ func createPodWithAppArmor(f *framework.Framework, profile string) *v1.Pod {
|
||||
|
||||
func expectSoftRejection(status v1.PodStatus) {
|
||||
args := []interface{}{"PodStatus: %+v", status}
|
||||
gomega.Expect(status.Phase).To(gomega.Equal(v1.PodPending), args...)
|
||||
gomega.Expect(status.Reason).To(gomega.Equal("AppArmor"), args...)
|
||||
framework.ExpectEqual(status.Phase, v1.PodPending, args...)
|
||||
framework.ExpectEqual(status.Reason, "AppArmor", args...)
|
||||
gomega.Expect(status.Message).To(gomega.ContainSubstring("AppArmor"), args...)
|
||||
gomega.Expect(status.ContainerStatuses[0].State.Waiting.Reason).To(gomega.Equal("Blocked"), args...)
|
||||
framework.ExpectEqual(status.ContainerStatuses[0].State.Waiting.Reason, "Blocked", args...)
|
||||
}
|
||||
|
||||
func isAppArmorEnabled() bool {
|
||||
|
@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive
|
||||
}
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
ginkgo.By("get container log path")
|
||||
gomega.Expect(len(pod.Status.ContainerStatuses)).To(gomega.Equal(1))
|
||||
framework.ExpectEqual(len(pod.Status.ContainerStatuses), 1)
|
||||
id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
|
||||
r, _, err := getCRIClient()
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -90,7 +90,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||
ginkgo.It("Kubelet's oom-score-adj should be -999", func() {
|
||||
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
|
||||
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids")
|
||||
gomega.Expect(len(kubeletPids)).To(gomega.Equal(1), "expected only one kubelet process; found %d", len(kubeletPids))
|
||||
framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids))
|
||||
gomega.Eventually(func() error {
|
||||
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
|
||||
}, 5*time.Minute, 30*time.Second).Should(gomega.BeNil())
|
||||
|
@ -93,7 +93,7 @@ var _ = framework.KubeDescribe("CriticalPod [Serial] [Disruptive] [NodeFeature:C
|
||||
if p.Name == nonCriticalBestEffort.Name {
|
||||
gomega.Expect(p.Status.Phase).NotTo(gomega.Equal(v1.PodFailed), fmt.Sprintf("pod: %v should be preempted", p.Name))
|
||||
} else {
|
||||
gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodFailed), fmt.Sprintf("pod: %v should not be preempted", p.Name))
|
||||
framework.ExpectEqual(p.Status.Phase, v1.PodFailed, fmt.Sprintf("pod: %v should not be preempted", p.Name))
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -115,7 +115,7 @@ func getNodeCPUAndMemoryCapacity(f *framework.Framework) v1.ResourceList {
|
||||
nodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
// Assuming that there is only one node, because this is a node e2e test.
|
||||
gomega.Expect(len(nodeList.Items)).To(gomega.Equal(1))
|
||||
framework.ExpectEqual(len(nodeList.Items), 1)
|
||||
capacity := nodeList.Items[0].Status.Allocatable
|
||||
return v1.ResourceList{
|
||||
v1.ResourceCPU: capacity[v1.ResourceCPU],
|
||||
|
@ -105,7 +105,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
|
||||
e2elog.Logf("pod resources %v", podResources)
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
gomega.Expect(len(podResources.PodResources)).To(gomega.Equal(2))
|
||||
framework.ExpectEqual(len(podResources.PodResources), 2)
|
||||
for _, res := range podResources.GetPodResources() {
|
||||
if res.Name == pod1.Name {
|
||||
resourcesForOurPod = res
|
||||
@ -113,13 +113,13 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
}
|
||||
e2elog.Logf("resourcesForOurPod %v", resourcesForOurPod)
|
||||
gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil())
|
||||
gomega.Expect(resourcesForOurPod.Name).To(gomega.Equal(pod1.Name))
|
||||
gomega.Expect(resourcesForOurPod.Namespace).To(gomega.Equal(pod1.Namespace))
|
||||
gomega.Expect(len(resourcesForOurPod.Containers)).To(gomega.Equal(1))
|
||||
gomega.Expect(resourcesForOurPod.Containers[0].Name).To(gomega.Equal(pod1.Spec.Containers[0].Name))
|
||||
gomega.Expect(len(resourcesForOurPod.Containers[0].Devices)).To(gomega.Equal(1))
|
||||
gomega.Expect(resourcesForOurPod.Containers[0].Devices[0].ResourceName).To(gomega.Equal(resourceName))
|
||||
gomega.Expect(len(resourcesForOurPod.Containers[0].Devices[0].DeviceIds)).To(gomega.Equal(1))
|
||||
framework.ExpectEqual(resourcesForOurPod.Name, pod1.Name)
|
||||
framework.ExpectEqual(resourcesForOurPod.Namespace, pod1.Namespace)
|
||||
framework.ExpectEqual(len(resourcesForOurPod.Containers), 1)
|
||||
framework.ExpectEqual(resourcesForOurPod.Containers[0].Name, pod1.Spec.Containers[0].Name)
|
||||
framework.ExpectEqual(len(resourcesForOurPod.Containers[0].Devices), 1)
|
||||
framework.ExpectEqual(resourcesForOurPod.Containers[0].Devices[0].ResourceName, resourceName)
|
||||
framework.ExpectEqual(len(resourcesForOurPod.Containers[0].Devices[0].DeviceIds), 1)
|
||||
|
||||
pod1, err = f.PodClient().Get(pod1.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
@ -128,7 +128,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
|
||||
ginkgo.By("Confirming that device assignment persists even after container restart")
|
||||
devIdAfterRestart := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
gomega.Expect(devIdAfterRestart).To(gomega.Equal(devId1))
|
||||
framework.ExpectEqual(devIdAfterRestart, devId1)
|
||||
|
||||
restartTime := time.Now()
|
||||
ginkgo.By("Restarting Kubelet")
|
||||
@ -167,7 +167,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||
ginkgo.By("Confirming that after a kubelet restart, fake-device assignement is kept")
|
||||
devIdRestart1 := parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
|
||||
framework.ExpectEqual(devIdRestart1, devId1)
|
||||
|
||||
ginkgo.By("Waiting for resource to become available on the local node after re-registration")
|
||||
gomega.Eventually(func() bool {
|
||||
@ -200,11 +200,11 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
ginkgo.By("Checking that scheduled pods can continue to run even after we delete device plugin.")
|
||||
ensurePodContainerRestart(f, pod1.Name, pod1.Name)
|
||||
devIdRestart1 = parseLog(f, pod1.Name, pod1.Name, deviceIDRE)
|
||||
gomega.Expect(devIdRestart1).To(gomega.Equal(devId1))
|
||||
framework.ExpectEqual(devIdRestart1, devId1)
|
||||
|
||||
ensurePodContainerRestart(f, pod2.Name, pod2.Name)
|
||||
devIdRestart2 := parseLog(f, pod2.Name, pod2.Name, deviceIDRE)
|
||||
gomega.Expect(devIdRestart2).To(gomega.Equal(devId2))
|
||||
framework.ExpectEqual(devIdRestart2, devId2)
|
||||
|
||||
ginkgo.By("Re-register resources")
|
||||
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)
|
||||
|
Loading…
Reference in New Issue
Block a user