Cleanup e2e_node test style

Remove the use of  Except(err).NotTo(HaveOccured()), and switch to using
framework ramework.ExpectNoError(err)
This commit is contained in:
Odin Ugedal 2019-06-08 19:44:08 +02:00
parent 11abb58a5b
commit 6bb8bb0227
No known key found for this signature in database
GPG Key ID: AFF9C8242CF7A7AF
16 changed files with 41 additions and 44 deletions

View File

@ -32,8 +32,6 @@ import (
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/perftype"
nodeperftype "k8s.io/kubernetes/test/e2e_node/perftype"
. "github.com/onsi/gomega"
)
const (
@ -156,7 +154,7 @@ func getThroughputPerfData(batchLag time.Duration, e2eLags []framework.PodLatenc
func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[string]string {
nodeName := framework.TestContext.NodeName
node, err := f.ClientSet.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
cpu, ok := node.Status.Capacity[v1.ResourceCPU]
if !ok {

View File

@ -82,9 +82,9 @@ var _ = framework.KubeDescribe("ContainerLogRotation [Slow] [Serial] [Disruptive
Expect(len(pod.Status.ContainerStatuses)).To(Equal(1))
id := kubecontainer.ParseContainerID(pod.Status.ContainerStatuses[0].ContainerID).ID
r, _, err := getCRIClient()
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
status, err := r.ContainerStatus(id)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
logPath := status.GetLogPath()
By("wait for container log being rotated to max file limit")
Eventually(func() (int, error) {

View File

@ -155,13 +155,13 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
if CurrentGinkgoTestDescription().Failed {
By("Dump all running containers")
runtime, _, err := getCRIClient()
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
containers, err := runtime.ListContainers(&runtimeapi.ContainerFilter{
State: &runtimeapi.ContainerStateValue{
State: runtimeapi.ContainerState_CONTAINER_RUNNING,
},
})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("Running containers:")
for _, c := range containers {
e2elog.Logf("%+v", c)

View File

@ -105,7 +105,7 @@ func getLocalNodeCPUDetails(f *framework.Framework) (cpuCapVal int64, cpuAllocVa
func waitForContainerRemoval(containerName, podName, podNS string) {
rs, _, err := getCRIClient()
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
Eventually(func() bool {
containers, err := rs.ListContainers(&runtimeapi.ContainerFilter{
LabelSelector: map[string]string{

View File

@ -456,7 +456,7 @@ func createBatchPodWithRateControl(f *framework.Framework, pods []*v1.Pod, inter
func getPodStartLatency(node string) (framework.KubeletLatencyMetrics, error) {
latencyMetrics := framework.KubeletLatencyMetrics{}
ms, err := metrics.GrabKubeletMetricsWithoutProxy(node, "/metrics")
Expect(err).NotTo(HaveOccurred(), "Failed to get kubelet metrics without proxy in node %s", node)
framework.ExpectNoError(err, "Failed to get kubelet metrics without proxy in node %s", node)
for _, samples := range ms {
for _, sample := range samples {

View File

@ -216,7 +216,7 @@ func maskLocksmithdOnCoreos() {
}
if bytes.Contains(data, []byte("ID=coreos")) {
output, err := exec.Command("systemctl", "mask", "--now", "locksmithd").CombinedOutput()
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("should be able to mask locksmithd - output: %q", string(output)))
framework.ExpectNoError(err, fmt.Sprintf("should be able to mask locksmithd - output: %q", string(output)))
klog.Infof("Locksmithd is masked successfully")
}
}
@ -229,7 +229,7 @@ func waitForNodeReady() {
nodeReadyPollInterval = 1 * time.Second
)
client, err := getAPIServerClient()
Expect(err).NotTo(HaveOccurred(), "should be able to get apiserver client.")
framework.ExpectNoError(err, "should be able to get apiserver client.")
Eventually(func() error {
node, err := getNode(client)
if err != nil {
@ -273,7 +273,7 @@ func updateTestContext() error {
// getNode gets node object from the apiserver.
func getNode(c *clientset.Clientset) (*v1.Node, error) {
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), "should be able to list nodes.")
framework.ExpectNoError(err, "should be able to list nodes.")
if nodes == nil {
return nil, fmt.Errorf("the node list is nil.")
}

View File

@ -307,7 +307,7 @@ var _ = framework.KubeDescribe("PriorityMemoryEvictionOrdering [Slow] [Serial] [
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
specs := []podEvictSpec{
{
@ -364,7 +364,7 @@ var _ = framework.KubeDescribe("PriorityLocalStorageEvictionOrdering [Slow] [Ser
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
specs := []podEvictSpec{
{
@ -417,7 +417,7 @@ var _ = framework.KubeDescribe("PriorityPidEvictionOrdering [Slow] [Serial] [Dis
})
AfterEach(func() {
err := f.ClientSet.SchedulingV1().PriorityClasses().Delete(highPriorityClassName, &metav1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
specs := []podEvictSpec{
{

View File

@ -144,7 +144,7 @@ func containerGCTest(f *framework.Framework, test testRun) {
BeforeEach(func() {
var err error
runtime, _, err = getCRIClient()
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
for _, pod := range test.testPods {
// Initialize the getContainerNames function to use CRI runtime client.

View File

@ -189,7 +189,7 @@ func runHugePagesTests(f *framework.Framework) {
verifyPod := makePodToVerifyHugePages("pod"+podUID, resource.MustParse("50Mi"))
f.PodClient().Create(verifyPod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
}

View File

@ -161,13 +161,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
}.AsSelector().String()
eventListOptions = metav1.ListOptions{FieldSelector: selector}
By("Create the test log file")
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
By("Create config map for the node problem detector")
_, err = c.CoreV1().ConfigMaps(ns).Create(&v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: configName},
Data: map[string]string{path.Base(configFile): config},
})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
By("Create the node problem detector")
hostPathType := new(v1.HostPathType)
*hostPathType = v1.HostPathType(string(v1.HostPathFileOrCreate))
@ -238,7 +238,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
},
})
pod, err := f.PodClient().Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
// TODO: remove hardcoded kubelet volume directory path
// framework.TestContext.KubeVolumeDir is currently not populated for node e2e
hostLogFile = "/var/lib/kubelet/pods/" + string(pod.UID) + "/volumes/kubernetes.io~empty-dir" + logFile
@ -340,7 +340,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
if test.messageNum > 0 {
By(fmt.Sprintf("Inject %d logs: %q", test.messageNum, test.message))
err := injectLog(hostLogFile, test.timestamp, test.message, test.messageNum)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
}
By(fmt.Sprintf("Wait for %d temp events generated", test.tempEvents))

View File

@ -132,7 +132,7 @@ func runPodPidsLimitTests(f *framework.Framework) {
verifyPod := makePodToVerifyPids("pod"+podUID, resource.MustParse("1024"))
f.PodClient().Create(verifyPod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, verifyPod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
}

View File

@ -29,7 +29,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/klog"
)
@ -161,7 +160,7 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
pod := makePodToVerifyCgroups(cgroupsToVerify)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
})
})
@ -199,15 +198,16 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
pod := makePodToVerifyCgroups(cgroupsToVerify)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
Expect(f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
err := f.PodClient().Delete(guaranteedPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
framework.ExpectNoError(err)
pod := makePodToVerifyCgroupRemoved("pod" + podUID)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
})
})
})
@ -243,15 +243,16 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
pod := makePodToVerifyCgroups(cgroupsToVerify)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
Expect(f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
err := f.PodClient().Delete(bestEffortPod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
framework.ExpectNoError(err)
pod := makePodToVerifyCgroupRemoved("besteffort/pod" + podUID)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
})
})
})
@ -287,15 +288,16 @@ var _ = framework.KubeDescribe("Kubelet Cgroup Manager", func() {
pod := makePodToVerifyCgroups(cgroupsToVerify)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
By("Checking if the pod cgroup was deleted", func() {
gp := int64(1)
Expect(f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})).NotTo(HaveOccurred())
err := f.PodClient().Delete(burstablePod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &gp})
framework.ExpectNoError(err)
pod := makePodToVerifyCgroupRemoved("burstable/pod" + podUID)
f.PodClient().Create(pod)
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
err = e2epod.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
framework.ExpectNoError(err)
})
})
})

View File

@ -374,7 +374,7 @@ func deletePodsSync(f *framework.Framework, pods []*v1.Pod) {
defer wg.Done()
err := f.PodClient().Delete(pod.ObjectMeta.Name, metav1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
Expect(e2epod.WaitForPodToDisappear(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(HaveOccurred())

View File

@ -30,7 +30,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
@ -190,7 +189,7 @@ func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimi
// Obtain memory PerfData
usagePerContainer, err := rc.GetLatest()
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
e2elog.Logf("%s", formatResourceUsageStats(usagePerContainer))
usagePerNode := make(framework.ResourceUsagePerNode)

View File

@ -31,7 +31,6 @@ import (
"k8s.io/kubernetes/test/e2e_node/services"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
@ -84,7 +83,7 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
configFile := filepath.Join(services.KubeletRootDirectory, "config.json")
err := ioutil.WriteFile(configFile, []byte(auth), 0644)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
defer os.Remove(configFile)
// checkContainerStatus checks whether the container status matches expectation.

View File

@ -28,7 +28,6 @@ import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
@ -73,7 +72,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
},
})
err := e2epod.WaitForPodSuccessInNamespace(f.ClientSet, memoryBackedPod.Name, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
By("Verifying the memory backed volume was removed from node", func() {
volumePath := fmt.Sprintf("/tmp/%s/volumes/kubernetes.io~empty-dir/%s", string(memoryBackedPod.UID), volumeName)
@ -121,7 +120,7 @@ var _ = framework.KubeDescribe("Kubelet Volume Manager", func() {
}
<-time.After(10 * time.Second)
}
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(err)
})
})
})