mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Convert ExpectEqual(err, nil) to ExpectNoError(err)
This commit is contained in:
parent
389c40540d
commit
c0db5b2562
@ -43,7 +43,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
||||
c = f.ClientSet
|
||||
ns = f.Namespace.Name
|
||||
systemPods, err := e2epod.GetPodsInNamespace(c, ns, map[string]string{})
|
||||
framework.ExpectEqual(err, nil)
|
||||
framework.ExpectNoError(err)
|
||||
systemPodsNo = int32(len(systemPods))
|
||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||
@ -94,13 +94,13 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
||||
// the cluster is restored to health.
|
||||
ginkgo.By("waiting for system pods to successfully restart")
|
||||
err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||
framework.ExpectEqual(err, nil)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("node lease should be deleted when corresponding node is deleted", func() {
|
||||
leaseClient := c.CoordinationV1().Leases(v1.NamespaceNodeLease)
|
||||
err := e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
|
||||
framework.ExpectEqual(err, nil)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("verify node lease exists for every nodes")
|
||||
originalNodes, err := e2enode.GetReadySchedulableNodes(c)
|
||||
@ -124,11 +124,11 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
||||
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
|
||||
ginkgo.By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes))
|
||||
err = framework.ResizeGroup(group, targetNumNodes)
|
||||
framework.ExpectEqual(err, nil)
|
||||
framework.ExpectNoError(err)
|
||||
err = framework.WaitForGroupSize(group, targetNumNodes)
|
||||
framework.ExpectEqual(err, nil)
|
||||
framework.ExpectNoError(err)
|
||||
err = e2enode.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute)
|
||||
framework.ExpectEqual(err, nil)
|
||||
framework.ExpectNoError(err)
|
||||
targetNodes, err := e2enode.GetReadySchedulableNodes(c)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(targetNodes.Items), int(targetNumNodes))
|
||||
|
@ -61,7 +61,7 @@ var _ = SIGDescribe("[Feature:CloudProvider][Disruptive] Nodes", func() {
|
||||
}
|
||||
|
||||
newNodes, err := e2enode.CheckReady(c, len(origNodes.Items)-1, 5*time.Minute)
|
||||
framework.ExpectEqual(err, nil)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(newNodes), len(origNodes.Items)-1)
|
||||
|
||||
_, err = c.CoreV1().Nodes().Get(nodeToDelete.Name, metav1.GetOptions{})
|
||||
|
@ -79,7 +79,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||
ginkgo.Context("once the node is setup", func() {
|
||||
ginkgo.It("container runtime's oom-score-adj should be -999", func() {
|
||||
runtimePids, err := getPidsForProcess(framework.TestContext.ContainerRuntimeProcessName, framework.TestContext.ContainerRuntimePidFile)
|
||||
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of container runtime pids")
|
||||
framework.ExpectNoError(err, "failed to get list of container runtime pids")
|
||||
for _, pid := range runtimePids {
|
||||
gomega.Eventually(func() error {
|
||||
return validateOOMScoreAdjSetting(pid, -999)
|
||||
@ -88,7 +88,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||
})
|
||||
ginkgo.It("Kubelet's oom-score-adj should be -999", func() {
|
||||
kubeletPids, err := getPidsForProcess(kubeletProcessName, "")
|
||||
gomega.Expect(err).To(gomega.BeNil(), "failed to get list of kubelet pids")
|
||||
framework.ExpectNoError(err, "failed to get list of kubelet pids")
|
||||
framework.ExpectEqual(len(kubeletPids), 1, "expected only one kubelet process; found %d", len(kubeletPids))
|
||||
gomega.Eventually(func() error {
|
||||
return validateOOMScoreAdjSetting(kubeletPids[0], -999)
|
||||
@ -100,7 +100,7 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
|
||||
// created before this test, and may not be infra
|
||||
// containers. They should be excluded from the test.
|
||||
existingPausePIDs, err := getPidsForProcess("pause", "")
|
||||
gomega.Expect(err).To(gomega.BeNil(), "failed to list all pause processes on the node")
|
||||
framework.ExpectNoError(err, "failed to list all pause processes on the node")
|
||||
existingPausePIDSet := sets.NewInt(existingPausePIDs...)
|
||||
|
||||
podClient := f.PodClient()
|
||||
|
@ -102,7 +102,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
podResources, err := getNodeDevices()
|
||||
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
|
||||
framework.Logf("pod resources %v", podResources)
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(len(podResources.PodResources), 2)
|
||||
for _, res := range podResources.GetPodResources() {
|
||||
if res.Name == pod1.Name {
|
||||
|
@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
|
||||
|
||||
nodeTime = time.Now()
|
||||
bootTime, err = util.GetBootTime()
|
||||
gomega.Expect(err).To(gomega.BeNil())
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// Set lookback duration longer than node up time.
|
||||
// Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.
|
||||
|
Loading…
Reference in New Issue
Block a user