Merge pull request #121888 from SD-13/e2e-gomega-be-true-or-false

Enhance boolean assertions when fail
This commit is contained in:
Kubernetes Prow Robot 2024-08-20 04:24:42 -07:00 committed by GitHub
commit d770dd695a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
35 changed files with 135 additions and 161 deletions

View File

@ -26,7 +26,7 @@ Usage: $0 [-r <revision>|-a] [-s] [-c none|<config>] [-- <golangci-lint run flag
-a: automatically select the common base of origin/master and HEAD -a: automatically select the common base of origin/master and HEAD
as revision as revision
-s: select a strict configuration for new code -s: select a strict configuration for new code
-n: in addition to strict checking, also enable hints (aka nits) that may are may not -n: in addition to strict checking, also enable hints (aka nits) that may or may not
be useful be useful
-g <github action file>: also write results with --out-format=github-actions -g <github action file>: also write results with --out-format=github-actions
to a separate file to a separate file

View File

@ -308,7 +308,7 @@ var _ = SIGDescribe("Discovery", func() {
break break
} }
} }
gomega.Expect(foundResource).To(gomega.BeTrue(), "Resource %q was not found inside of resourceList\n%#v", t.validResource, resourceList.APIResources) gomega.Expect(foundResource).To(gomega.BeTrueBecause("Resource %q was not found inside of resourceList\n%#v", t.validResource, resourceList.APIResources))
} }
}) })
}) })

View File

@ -72,7 +72,7 @@ var _ = SIGDescribe("client-go should negotiate", func() {
defer w.Stop() defer w.Stop()
evt, ok := <-w.ResultChan() evt, ok := <-w.ResultChan()
o.Expect(ok).To(o.BeTrue()) o.Expect(ok).To(o.BeTrueBecause("unexpected watch event: %v, %#v", evt.Type, evt.Object))
switch evt.Type { switch evt.Type {
case watch.Added, watch.Modified: case watch.Added, watch.Modified:
// this is allowed // this is allowed

View File

@ -825,7 +825,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
_, err := createValidatingWebhookConfiguration(ctx, f, validatingWebhookConfiguration) _, err := createValidatingWebhookConfiguration(ctx, f, validatingWebhookConfiguration)
gomega.Expect(err).To(gomega.HaveOccurred(), "create validatingwebhookconfiguration should have been denied by the api-server") gomega.Expect(err).To(gomega.HaveOccurred(), "create validatingwebhookconfiguration should have been denied by the api-server")
expectedErrMsg := "compilation failed" expectedErrMsg := "compilation failed"
gomega.Expect(strings.Contains(err.Error(), expectedErrMsg)).To(gomega.BeTrue()) gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring(expectedErrMsg)))
}) })
/* /*
@ -849,7 +849,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
_, err := createMutatingWebhookConfiguration(ctx, f, mutatingWebhookConfiguration) _, err := createMutatingWebhookConfiguration(ctx, f, mutatingWebhookConfiguration)
gomega.Expect(err).To(gomega.HaveOccurred(), "create mutatingwebhookconfiguration should have been denied by the api-server") gomega.Expect(err).To(gomega.HaveOccurred(), "create mutatingwebhookconfiguration should have been denied by the api-server")
expectedErrMsg := "compilation failed" expectedErrMsg := "compilation failed"
gomega.Expect(strings.Contains(err.Error(), expectedErrMsg)).To(gomega.BeTrue()) gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring(expectedErrMsg)))
}) })
/* /*
@ -908,7 +908,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
"mutation-start": "yes", "mutation-start": "yes",
"mutation-stage-1": "yes", "mutation-stage-1": "yes",
} }
gomega.Expect(reflect.DeepEqual(expectedConfigMapData, mutatedCM.Data)).To(gomega.BeTrue()) gomega.Expect(mutatedCM.Data).Should(gomega.Equal(expectedConfigMapData))
ginkgo.By("create the configmap with 'skip-me' name") ginkgo.By("create the configmap with 'skip-me' name")
@ -918,7 +918,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
expectedConfigMapData = map[string]string{ expectedConfigMapData = map[string]string{
"mutation-start": "yes", "mutation-start": "yes",
} }
gomega.Expect(reflect.DeepEqual(expectedConfigMapData, skippedCM.Data)).To(gomega.BeTrue()) gomega.Expect(skippedCM.Data).Should(gomega.Equal(expectedConfigMapData))
}) })
}) })

View File

@ -209,8 +209,8 @@ var _ = SIGDescribe("DisruptionController", func() {
return false, err return false, err
} }
return isPDBErroring(pdb), nil return isPDBErroring(pdb), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod shouldn't error for "+ }, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrueBecause("pod shouldn't error for " +
"unmanaged pod") "unmanaged pod"))
}) })
evictionCases := []struct { evictionCases := []struct {

View File

@ -113,7 +113,7 @@ var _ = SIGDescribe("Probing container", func() {
return false, err return false, err
} }
return podutil.IsPodReady(p), nil return podutil.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod should not be ready") }, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrueBecause("pod should not be ready"))
p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{}) p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -726,7 +726,7 @@ done
} }
} }
return false, nil return false, nil
}, 1*time.Minute, framework.Poll).ShouldNot(gomega.BeTrue(), "should not see liveness probes") }, 1*time.Minute, framework.Poll).ShouldNot(gomega.BeTrueBecause("should not see liveness probes"))
}) })
}) })
@ -792,7 +792,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, feature.SidecarContainers, "P
return false, err return false, err
} }
return podutil.IsPodReady(p), nil return podutil.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod should not be ready") }, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrueBecause("pod should not be ready"))
p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{}) p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -1484,7 +1484,7 @@ done
} }
} }
return false, nil return false, nil
}, 1*time.Minute, framework.Poll).ShouldNot(gomega.BeTrue(), "should not see liveness probes") }, 1*time.Minute, framework.Poll).ShouldNot(gomega.BeTrueBecause("should not see liveness probes"))
}) })
}) })

View File

@ -50,7 +50,7 @@ var _ = ginkgo.Describe("log", func() {
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
framework.Logf("after") framework.Logf("after")
gomega.Expect(true).To(gomega.BeFalse(), "true is never false either") gomega.Expect(true).To(gomega.BeFalseBecause("artificial assertion failure"))
}) })
ginkgo.It("fails", func() { ginkgo.It("fails", func() {
func() { func() {
@ -58,7 +58,7 @@ var _ = ginkgo.Describe("log", func() {
}() }()
}) })
ginkgo.It("asserts", func() { ginkgo.It("asserts", func() {
gomega.Expect(false).To(gomega.BeTrue(), "false is never true") gomega.Expect(false).To(gomega.BeTrueBecause("artificial assertion failure"))
}) })
ginkgo.It("error", func() { ginkgo.It("error", func() {
err := errors.New("an error with a long, useless description") err := errors.New("an error with a long, useless description")
@ -106,10 +106,7 @@ In [It] at: log_test.go:57 <time>
< Exit [It] fails - log_test.go:55 <time> < Exit [It] fails - log_test.go:55 <time>
> Enter [AfterEach] log - log_test.go:51 <time> > Enter [AfterEach] log - log_test.go:51 <time>
<klog> log_test.go:52] after <klog> log_test.go:52] after
[FAILED] true is never false either [FAILED] artificial assertion failure
Expected
<bool>: true
to be false
In [AfterEach] at: log_test.go:53 <time> In [AfterEach] at: log_test.go:53 <time>
< Exit [AfterEach] log - log_test.go:51 <time> < Exit [AfterEach] log - log_test.go:51 <time>
`, `,
@ -119,10 +116,7 @@ In [AfterEach] at: log_test.go:53 <time>
Status: "failed", Status: "failed",
Failure: &reporters.JUnitFailure{ Failure: &reporters.JUnitFailure{
Type: "failed", Type: "failed",
Description: `[FAILED] false is never true Description: `[FAILED] artificial assertion failure
Expected
<bool>: false
to be true
In [It] at: log_test.go:61 <time> In [It] at: log_test.go:61 <time>
There were additional failures detected after the initial failure. These are visible in the timeline There were additional failures detected after the initial failure. These are visible in the timeline
@ -132,18 +126,12 @@ There were additional failures detected after the initial failure. These are vis
<klog> log_test.go:49] before <klog> log_test.go:49] before
< Exit [BeforeEach] log - log_test.go:48 <time> < Exit [BeforeEach] log - log_test.go:48 <time>
> Enter [It] asserts - log_test.go:60 <time> > Enter [It] asserts - log_test.go:60 <time>
[FAILED] false is never true [FAILED] artificial assertion failure
Expected
<bool>: false
to be true
In [It] at: log_test.go:61 <time> In [It] at: log_test.go:61 <time>
< Exit [It] asserts - log_test.go:60 <time> < Exit [It] asserts - log_test.go:60 <time>
> Enter [AfterEach] log - log_test.go:51 <time> > Enter [AfterEach] log - log_test.go:51 <time>
<klog> log_test.go:52] after <klog> log_test.go:52] after
[FAILED] true is never false either [FAILED] artificial assertion failure
Expected
<bool>: true
to be false
In [AfterEach] at: log_test.go:53 <time> In [AfterEach] at: log_test.go:53 <time>
< Exit [AfterEach] log - log_test.go:51 <time> < Exit [AfterEach] log - log_test.go:51 <time>
`, `,
@ -174,10 +162,7 @@ In [It] at: log_test.go:65 <time>
< Exit [It] error - log_test.go:63 <time> < Exit [It] error - log_test.go:63 <time>
> Enter [AfterEach] log - log_test.go:51 <time> > Enter [AfterEach] log - log_test.go:51 <time>
<klog> log_test.go:52] after <klog> log_test.go:52] after
[FAILED] true is never false either [FAILED] artificial assertion failure
Expected
<bool>: true
to be false
In [AfterEach] at: log_test.go:53 <time> In [AfterEach] at: log_test.go:53 <time>
< Exit [AfterEach] log - log_test.go:51 <time> < Exit [AfterEach] log - log_test.go:51 <time>
`, `,
@ -210,10 +195,7 @@ In [It] at: log_test.go:68 <time>
< Exit [It] equal - log_test.go:67 <time> < Exit [It] equal - log_test.go:67 <time>
> Enter [AfterEach] log - log_test.go:51 <time> > Enter [AfterEach] log - log_test.go:51 <time>
<klog> log_test.go:52] after <klog> log_test.go:52] after
[FAILED] true is never false either [FAILED] artificial assertion failure
Expected
<bool>: true
to be false
In [AfterEach] at: log_test.go:53 <time> In [AfterEach] at: log_test.go:53 <time>
< Exit [AfterEach] log - log_test.go:51 <time> < Exit [AfterEach] log - log_test.go:51 <time>
`, `,
@ -238,10 +220,7 @@ In [It] at: log_test.go:44 <time>
< Exit [It] fails with helper - log_test.go:70 <time> < Exit [It] fails with helper - log_test.go:70 <time>
> Enter [AfterEach] log - log_test.go:51 <time> > Enter [AfterEach] log - log_test.go:51 <time>
<klog> log_test.go:52] after <klog> log_test.go:52] after
[FAILED] true is never false either [FAILED] artificial assertion failure
Expected
<bool>: true
to be false
In [AfterEach] at: log_test.go:53 <time> In [AfterEach] at: log_test.go:53 <time>
< Exit [AfterEach] log - log_test.go:51 <time> < Exit [AfterEach] log - log_test.go:51 <time>
`, `,
@ -251,10 +230,7 @@ In [AfterEach] at: log_test.go:53 <time>
Status: "failed", Status: "failed",
Failure: &reporters.JUnitFailure{ Failure: &reporters.JUnitFailure{
Type: "failed", Type: "failed",
Description: `[FAILED] true is never false either Description: `[FAILED] artificial assertion failure
Expected
<bool>: true
to be false
In [AfterEach] at: log_test.go:53 <time> In [AfterEach] at: log_test.go:53 <time>
`, `,
}, },
@ -267,10 +243,7 @@ In [AfterEach] at: log_test.go:53 <time>
< Exit [It] redirects klog - log_test.go:73 <time> < Exit [It] redirects klog - log_test.go:73 <time>
> Enter [AfterEach] log - log_test.go:51 <time> > Enter [AfterEach] log - log_test.go:51 <time>
<klog> log_test.go:52] after <klog> log_test.go:52] after
[FAILED] true is never false either [FAILED] artificial assertion failure
Expected
<bool>: true
to be false
In [AfterEach] at: log_test.go:53 <time> In [AfterEach] at: log_test.go:53 <time>
< Exit [AfterEach] log - log_test.go:51 <time> < Exit [AfterEach] log - log_test.go:51 <time>
`, `,

View File

@ -266,7 +266,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
} }
// If the image policy is not PullAlways, the image must be in the pre-pull list and // If the image policy is not PullAlways, the image must be in the pre-pull list and
// pre-pulled. // pre-pulled.
gomega.Expect(ImagePrePullList.Has(c.Image)).To(gomega.BeTrue(), "Image %q is not in the pre-pull list, consider adding it to PrePulledImages in test/e2e/common/util.go or NodePrePullImageList in test/e2e_node/image_list.go", c.Image) gomega.Expect(ImagePrePullList.Has(c.Image)).To(gomega.BeTrueBecause("Image %q is not in the pre-pull list, consider adding it to PrePulledImages in test/e2e/common/util.go or NodePrePullImageList in test/e2e_node/image_list.go", c.Image))
// Do not pull images during the tests because the images in pre-pull list should have // Do not pull images during the tests because the images in pre-pull list should have
// been prepulled. // been prepulled.
c.ImagePullPolicy = v1.PullNever c.ImagePullPolicy = v1.PullNever

View File

@ -178,7 +178,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
gomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil()) gomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil())
allowVolumeExpansion := *l.resource.Sc.AllowVolumeExpansion allowVolumeExpansion := *l.resource.Sc.AllowVolumeExpansion
gomega.Expect(allowVolumeExpansion).To(gomega.BeFalse()) gomega.Expect(allowVolumeExpansion).To(gomega.BeFalseBecause("expected AllowVolumeExpansion value to be false"))
ginkgo.By("Expanding non-expandable pvc") ginkgo.By("Expanding non-expandable pvc")
currentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage] currentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]
newSize := currentPvcSize.DeepCopy() newSize := currentPvcSize.DeepCopy()

View File

@ -116,7 +116,7 @@ func runDensityBatchTest(ctx context.Context, f *framework.Framework, testArg de
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return len(watchTimes) == testArg.podsNr return len(watchTimes) == testArg.podsNr
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue()) }, 10*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("All pods should be observed by the watch."))
if len(watchTimes) < testArg.podsNr { if len(watchTimes) < testArg.podsNr {
framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.") framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.")

View File

@ -141,11 +141,10 @@ var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framew
// and authenticating with it. // and authenticating with it.
ginkgo.By("checking that nltest /QUERY returns successfully") ginkgo.By("checking that nltest /QUERY returns successfully")
var output string var output string
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() error {
output, err = runKubectlExecInNamespace(f.Namespace.Name, podName, "nltest", "/QUERY") output, err = runKubectlExecInNamespace(f.Namespace.Name, podName, "nltest", "/QUERY")
if err != nil { if err != nil {
framework.Logf("unable to run command in container via exec: %s", err) return fmt.Errorf("unable to run command in container via exec: %w", err)
return false
} }
if !isValidOutput(output) { if !isValidOutput(output) {
@ -153,14 +152,12 @@ var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framew
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-gmsa/#troubleshooting // https://kubernetes.io/docs/tasks/configure-pod-container/configure-gmsa/#troubleshooting
output, err = runKubectlExecInNamespace(f.Namespace.Name, podName, "nltest", fmt.Sprintf("/sc_reset:%s", gmsaDomain)) output, err = runKubectlExecInNamespace(f.Namespace.Name, podName, "nltest", fmt.Sprintf("/sc_reset:%s", gmsaDomain))
if err != nil { if err != nil {
framework.Logf("unable to run command in container via exec: %s", err) return fmt.Errorf("unable to run command in container via exec: %w", err)
return false
} }
framework.Logf("failed to connect to domain; tried resetting the domain, output:\n%s", string(output)) return fmt.Errorf("failed to connect to domain; tried resetting the domain, output:\n%v", string(output))
return false
} }
return true return nil
}, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue()) }, 1*time.Minute, 1*time.Second).Should(gomega.Succeed())
}) })
ginkgo.It("can read and write file to remote SMB folder", func(ctx context.Context) { ginkgo.It("can read and write file to remote SMB folder", func(ctx context.Context) {
@ -208,16 +205,16 @@ var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framew
ginkgo.By("checking that file can be read and write from the remote folder successfully") ginkgo.By("checking that file can be read and write from the remote folder successfully")
filePath := fmt.Sprintf("\\\\%s\\%s\\write-test-%s.txt", gmsaDomainIP, gmsaSharedFolder, string(uuid.NewUUID())[0:4]) filePath := fmt.Sprintf("\\\\%s\\%s\\write-test-%s.txt", gmsaDomainIP, gmsaSharedFolder, string(uuid.NewUUID())[0:4])
gomega.Eventually(ctx, func() bool {
gomega.Eventually(ctx, func() error {
// The filePath is a remote folder, do not change the format of it // The filePath is a remote folder, do not change the format of it
_, _ = runKubectlExecInNamespace(f.Namespace.Name, podName, "--", "powershell.exe", "-Command", "echo 'This is a test file.' > "+filePath) _, _ = runKubectlExecInNamespace(f.Namespace.Name, podName, "--", "powershell.exe", "-Command", "echo 'This is a test file.' > "+filePath)
output, err := runKubectlExecInNamespace(f.Namespace.Name, podName, "powershell.exe", "--", "cat", filePath) _, err := runKubectlExecInNamespace(f.Namespace.Name, podName, "powershell.exe", "--", "cat", filePath)
if err != nil { if err != nil {
framework.Logf("unable to get file from AD server: %s", err) return err
return false
} }
return strings.Contains(output, "This is a test file.") return nil
}, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue()) }, 1*time.Minute, 1*time.Second).Should(gomega.Succeed())
}) })
}) })

View File

@ -113,10 +113,13 @@ var _ = sigDescribe(feature.Windows, "GMSA Kubelet", framework.WithSlow(), skipU
// even for bogus creds, `nltest /PARENTDOMAIN` simply returns the AD domain, which is enough for our purpose here. // even for bogus creds, `nltest /PARENTDOMAIN` simply returns the AD domain, which is enough for our purpose here.
// note that the "eventually" part seems to be needed to account for the fact that powershell containers // note that the "eventually" part seems to be needed to account for the fact that powershell containers
// are a bit slow to become responsive, even when docker reports them as running. // are a bit slow to become responsive, even when docker reports them as running.
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() error {
output, err = e2ekubectl.RunKubectl(f.Namespace.Name, "exec", namespaceOption, podName, containerOption, "--", "nltest", "/PARENTDOMAIN") output, err = e2ekubectl.RunKubectl(f.Namespace.Name, "exec", namespaceOption, podName, containerOption, "--", "nltest", "/PARENTDOMAIN")
return err == nil if err != nil {
}, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue()) return err
}
return nil
}, 1*time.Minute, 1*time.Second).Should(gomega.Succeed())
if !strings.HasPrefix(output, domain) { if !strings.HasPrefix(output, domain) {
framework.Failf("Expected %q to start with %q", output, domain) framework.Failf("Expected %q to start with %q", output, domain)

View File

@ -173,7 +173,7 @@ func overrideAllocatableMemoryTest(ctx context.Context, f *framework.Framework,
} }
} }
return false return false
}, 3*time.Minute, 10*time.Second).Should(gomega.BeTrue()) }, 3*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("Expected %s pod to be failed scheduling", podName))
} }
func getNodeMemory(ctx context.Context, f *framework.Framework, node v1.Node) nodeMemory { func getNodeMemory(ctx context.Context, f *framework.Framework, node v1.Node) nodeMemory {

View File

@ -102,7 +102,7 @@ var _ = sigDescribe(feature.Windows, "SecurityContext", skipUnlessWindows(func()
} }
return false return false
}, framework.PodStartTimeout, 1*time.Second).Should(gomega.BeTrue()) }, framework.PodStartTimeout, 1*time.Second).Should(gomega.BeTrueBecause("expected pod to be terminated"))
}) })
ginkgo.It("should not be able to create pods with unknown usernames at Container level", func(ctx context.Context) { ginkgo.It("should not be able to create pods with unknown usernames at Container level", func(ctx context.Context) {

View File

@ -69,7 +69,7 @@ var _ = Describe("kubeadm-certs [copy-certs]", func() {
gomega.Expect(s.OwnerReferences).To(gomega.HaveLen(1), "%s should have one owner reference", kubeadmCertsSecretName) gomega.Expect(s.OwnerReferences).To(gomega.HaveLen(1), "%s should have one owner reference", kubeadmCertsSecretName)
ownRef := s.OwnerReferences[0] ownRef := s.OwnerReferences[0]
gomega.Expect(ownRef.Kind).To(gomega.Equal("Secret"), "%s should be owned by a secret", kubeadmCertsSecretName) gomega.Expect(ownRef.Kind).To(gomega.Equal("Secret"), "%s should be owned by a secret", kubeadmCertsSecretName)
gomega.Expect(*ownRef.BlockOwnerDeletion).To(gomega.BeTrue(), "%s should be deleted on owner deletion", kubeadmCertsSecretName) gomega.Expect(*ownRef.BlockOwnerDeletion).To(gomega.BeTrueBecause("%s should be deleted on owner deletion", kubeadmCertsSecretName))
o := GetSecret(f.ClientSet, kubeSystemNamespace, ownRef.Name) o := GetSecret(f.ClientSet, kubeSystemNamespace, ownRef.Name)
gomega.Expect(o.Type).To(gomega.Equal(corev1.SecretTypeBootstrapToken), "%s should have an owner reference that refers to a bootstrap-token", kubeadmCertsSecretName) gomega.Expect(o.Type).To(gomega.Equal(corev1.SecretTypeBootstrapToken), "%s should have an owner reference that refers to a bootstrap-token", kubeadmCertsSecretName)

View File

@ -175,7 +175,7 @@ func ExpectSubjectHasAccessToResource(c clientset.Interface, subjectKind, subjec
s, err := c.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{}) s, err := c.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{})
framework.ExpectNoError(err, "error getting SubjectAccessReview for %s %s to resource %+v", subjectKind, subject, *sar.Spec.ResourceAttributes) framework.ExpectNoError(err, "error getting SubjectAccessReview for %s %s to resource %+v", subjectKind, subject, *sar.Spec.ResourceAttributes)
gomega.Expect(s.Status.Allowed).Should(gomega.BeTrue(), "%s %s has no access to resource %+v", subjectKind, subject, *sar.Spec.ResourceAttributes) gomega.Expect(s.Status.Allowed).Should(gomega.BeTrueBecause("%s %s has no access to resource %+v", subjectKind, subject, *sar.Spec.ResourceAttributes))
} }
// matchers // matchers

View File

@ -992,7 +992,7 @@ var _ = SIGDescribe(framework.WithSerial(), "Containers Lifecycle", func() {
restartKubelet := stopKubelet() restartKubelet := stopKubelet()
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("kubelet was expected to be stopped but it is still running"))
ginkgo.By("Stopping the pod sandbox to simulate the node reboot") ginkgo.By("Stopping the pod sandbox to simulate the node reboot")
err = rs.StopPodSandbox(ctx, podSandboxID) err = rs.StopPodSandbox(ctx, podSandboxID)
@ -1002,7 +1002,7 @@ var _ = SIGDescribe(framework.WithSerial(), "Containers Lifecycle", func() {
restartKubelet() restartKubelet()
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("kubelet was expected to be healthy"))
ginkgo.By("Waiting for the pod to be re-initialized and run") ginkgo.By("Waiting for the pod to be re-initialized and run")
err = e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "re-initialized", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) { err = e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "re-initialized", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) {
@ -3656,7 +3656,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, framework.WithSerial(), "Cont
restartKubelet := stopKubelet() restartKubelet := stopKubelet()
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet would have been stopped but it is still running"))
ginkgo.By("Stopping the pod sandbox to simulate the node reboot") ginkgo.By("Stopping the pod sandbox to simulate the node reboot")
err = rs.StopPodSandbox(ctx, podSandboxID) err = rs.StopPodSandbox(ctx, podSandboxID)
@ -3666,7 +3666,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, framework.WithSerial(), "Cont
restartKubelet() restartKubelet()
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("kubelet was expected to be healthy"))
ginkgo.By("Waiting for the pod to be re-initialized and run") ginkgo.By("Waiting for the pod to be re-initialized and run")
err = e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "re-initialized", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) { err = e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "re-initialized", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) {

View File

@ -186,7 +186,7 @@ func waitForContainerRemoval(ctx context.Context, containerName, podName, podNS
return false return false
} }
return len(containers) == 0 return len(containers) == 0
}, 2*time.Minute, 1*time.Second).Should(gomega.BeTrue()) }, 2*time.Minute, 1*time.Second).Should(gomega.BeTrueBecause("Containers were expected to be removed"))
} }
func isHTEnabled() bool { func isHTEnabled() bool {
@ -770,8 +770,8 @@ func runCPUManagerTests(f *framework.Framework) {
gomega.Expect(cpus.Size()).To(gomega.Equal(1), "expected cpu set size == 1, got %q", cpus.String()) gomega.Expect(cpus.Size()).To(gomega.Equal(1), "expected cpu set size == 1, got %q", cpus.String())
gomega.Expect(reusableCPUs.Equals(nonReusableCPUs)).To(gomega.BeTrue(), "expected reusable cpuset [%s] to be equal to non-reusable cpuset [%s]", reusableCPUs.String(), nonReusableCPUs.String()) gomega.Expect(reusableCPUs.Equals(nonReusableCPUs)).To(gomega.BeTrueBecause("expected reusable cpuset [%s] to be equal to non-reusable cpuset [%s]", reusableCPUs.String(), nonReusableCPUs.String()))
gomega.Expect(nonReusableCPUs.Intersection(cpus).IsEmpty()).To(gomega.BeTrue(), "expected non-reusable cpuset [%s] to be disjoint from cpuset [%s]", nonReusableCPUs.String(), cpus.String()) gomega.Expect(nonReusableCPUs.Intersection(cpus).IsEmpty()).To(gomega.BeTrueBecause("expected non-reusable cpuset [%s] to be disjoint from cpuset [%s]", nonReusableCPUs.String(), cpus.String()))
ginkgo.By("by deleting the pods and waiting for container removal") ginkgo.By("by deleting the pods and waiting for container removal")
deletePods(ctx, f, []string{pod.Name}) deletePods(ctx, f, []string{pod.Name})

View File

@ -366,7 +366,7 @@ func runDensityBatchTest(ctx context.Context, f *framework.Framework, rc *Resour
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return len(watchTimes) == testArg.podsNr return len(watchTimes) == testArg.podsNr
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue()) }, 10*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("All pods should be observed by the watch."))
if len(watchTimes) < testArg.podsNr { if len(watchTimes) < testArg.podsNr {
framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.") framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.")

View File

@ -106,7 +106,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
nodes, err := e2enode.TotalReady(ctx, f.ClientSet) nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return nodes == 1 return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue()) }, time.Minute, time.Second).Should(gomega.BeTrueBecause("expected node to be ready"))
// Before we run the device plugin test, we need to ensure // Before we run the device plugin test, we need to ensure
// that the cluster is in a clean state and there are no // that the cluster is in a clean state and there are no
@ -143,7 +143,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
gomega.Eventually(ctx, func(ctx context.Context) bool { gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f) node, ready := getLocalTestNode(ctx, f)
return ready && CountSampleDeviceCapacity(node) > 0 return ready && CountSampleDeviceCapacity(node) > 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) }, 5*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected devices to be available on local node"))
framework.Logf("Successfully created device plugin pod") framework.Logf("Successfully created device plugin pod")
ginkgo.By(fmt.Sprintf("Waiting for the resource exported by the sample device plugin to become available on the local node (instances: %d)", expectedSampleDevsAmount)) ginkgo.By(fmt.Sprintf("Waiting for the resource exported by the sample device plugin to become available on the local node (instances: %d)", expectedSampleDevsAmount))
@ -152,7 +152,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
return ready && return ready &&
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount && CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue()) }, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available on local node"))
}) })
ginkgo.AfterEach(func(ctx context.Context) { ginkgo.AfterEach(func(ctx context.Context) {
@ -177,7 +177,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
gomega.Eventually(ctx, func(ctx context.Context) bool { gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f) node, ready := getLocalTestNode(ctx, f)
return ready && CountSampleDeviceCapacity(node) <= 0 return ready && CountSampleDeviceCapacity(node) <= 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) }, 5*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected devices to be unavailable on local node"))
ginkgo.By("devices now unavailable on the local node") ginkgo.By("devices now unavailable on the local node")
}) })
@ -336,7 +336,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
return ready && return ready &&
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount && CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue()) }, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available after restart"))
ginkgo.By("Checking the same instance of the pod is still running") ginkgo.By("Checking the same instance of the pod is still running")
gomega.Eventually(ctx, getPodByName). gomega.Eventually(ctx, getPodByName).
@ -465,7 +465,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
return ready && return ready &&
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount && CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue()) }, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available after re-registration"))
// crosscheck that after device plugin restart the device assignment is preserved and // crosscheck that after device plugin restart the device assignment is preserved and
// stable from the kubelet's perspective. // stable from the kubelet's perspective.
@ -540,7 +540,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
return ready && return ready &&
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount && CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue()) }, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available after restart"))
ginkgo.By("Checking the same instance of the pod is still running after the device plugin restart") ginkgo.By("Checking the same instance of the pod is still running after the device plugin restart")
gomega.Eventually(ctx, getPodByName). gomega.Eventually(ctx, getPodByName).
@ -570,7 +570,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ok := kubeletHealthCheck(kubeletHealthCheckURL) ok := kubeletHealthCheck(kubeletHealthCheckURL)
framework.Logf("kubelet health check at %q value=%v", kubeletHealthCheckURL, ok) framework.Logf("kubelet health check at %q value=%v", kubeletHealthCheckURL, ok)
return ok return ok
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
framework.Logf("Delete the pod while the kubelet is not running") framework.Logf("Delete the pod while the kubelet is not running")
// Delete pod sync by name will force delete the pod, removing it from kubelet's config // Delete pod sync by name will force delete the pod, removing it from kubelet's config
@ -584,7 +584,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
ok := kubeletHealthCheck(kubeletHealthCheckURL) ok := kubeletHealthCheck(kubeletHealthCheckURL)
framework.Logf("kubelet health check at %q value=%v", kubeletHealthCheckURL, ok) framework.Logf("kubelet health check at %q value=%v", kubeletHealthCheckURL, ok)
return ok return ok
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
framework.Logf("wait for the pod %v to disappear", pod.Name) framework.Logf("wait for the pod %v to disappear", pod.Name)
gomega.Eventually(ctx, func(ctx context.Context) error { gomega.Eventually(ctx, func(ctx context.Context) error {
@ -714,11 +714,13 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.BeforeEach(func(ctx context.Context) {
ginkgo.By("Wait for node to be ready") ginkgo.By("Wait for node to be ready")
gomega.Eventually(ctx, func(ctx context.Context) bool { gomega.Eventually(ctx, func(ctx context.Context) error {
nodes, err := e2enode.TotalReady(ctx, f.ClientSet) _, err := e2enode.TotalReady(ctx, f.ClientSet)
framework.ExpectNoError(err) if err != nil {
return nodes == 1 return err
}, time.Minute, time.Second).Should(gomega.BeTrue()) }
return nil
}, time.Minute, time.Second).Should(gomega.Equal(1), "one node should be ready")
// Before we run the device plugin test, we need to ensure // Before we run the device plugin test, we need to ensure
// that the cluster is in a clean state and there are no // that the cluster is in a clean state and there are no
@ -800,7 +802,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
gomega.Eventually(ctx, func(ctx context.Context) bool { gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f) node, ready := getLocalTestNode(ctx, f)
return ready && CountSampleDeviceCapacity(node) > 0 return ready && CountSampleDeviceCapacity(node) > 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) }, 5*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected devices to be available on the local node"))
framework.Logf("Successfully created device plugin pod") framework.Logf("Successfully created device plugin pod")
ginkgo.By(fmt.Sprintf("Waiting for the resource exported by the sample device plugin to become available on the local node (instances: %d)", expectedSampleDevsAmount)) ginkgo.By(fmt.Sprintf("Waiting for the resource exported by the sample device plugin to become available on the local node (instances: %d)", expectedSampleDevsAmount))
@ -809,7 +811,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
return ready && return ready &&
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount && CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue()) }, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available on local node"))
}) })
ginkgo.AfterEach(func(ctx context.Context) { ginkgo.AfterEach(func(ctx context.Context) {
@ -835,7 +837,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
gomega.Eventually(ctx, func(ctx context.Context) bool { gomega.Eventually(ctx, func(ctx context.Context) bool {
node, ready := getLocalTestNode(ctx, f) node, ready := getLocalTestNode(ctx, f)
return ready && CountSampleDeviceCapacity(node) <= 0 return ready && CountSampleDeviceCapacity(node) <= 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) }, 5*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected devices to be unavailable on local node"))
ginkgo.By("devices now unavailable on the local node") ginkgo.By("devices now unavailable on the local node")
}) })
@ -934,15 +936,15 @@ func ensurePodContainerRestart(ctx context.Context, f *framework.Framework, podN
framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err) framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err)
} }
initialCount = p.Status.ContainerStatuses[0].RestartCount initialCount = p.Status.ContainerStatuses[0].RestartCount
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() int {
p, err = e2epod.NewPodClient(f).Get(ctx, podName, metav1.GetOptions{}) p, err = e2epod.NewPodClient(f).Get(ctx, podName, metav1.GetOptions{})
if err != nil || len(p.Status.ContainerStatuses) < 1 { if err != nil || len(p.Status.ContainerStatuses) < 1 {
return false return 0
} }
currentCount = p.Status.ContainerStatuses[0].RestartCount currentCount = p.Status.ContainerStatuses[0].RestartCount
framework.Logf("initial %v, current %v", initialCount, currentCount) framework.Logf("initial %v, current %v", initialCount, currentCount)
return currentCount > initialCount return int(currentCount)
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue()) }, 5*time.Minute, framework.Poll).Should(gomega.BeNumerically(">", initialCount))
} }
// parseLog returns the matching string for the specified regular expression parsed from the container logs. // parseLog returns the matching string for the specified regular expression parsed from the container logs.

View File

@ -225,12 +225,12 @@ var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, "[No
restartKubelet(true) restartKubelet(true)
ginkgo.By("verifying that the hugepages-3Mi resource no longer is present") ginkgo.By("verifying that the hugepages-3Mi resource no longer is present")
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() resource.Quantity {
node, err = f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{}) node, err = f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err, "while getting node status") framework.ExpectNoError(err, "while getting node status")
_, isPresent := node.Status.Capacity["hugepages-3Mi"] // abc, error := node.Status.Capacity["hugepages-3Mi"]
return isPresent return node.Status.Capacity["hugepages-3Mi"]
}, 30*time.Second, framework.Poll).Should(gomega.BeFalse()) }, 30*time.Second, framework.Poll).Should(gomega.BeNil())
}) })
ginkgo.It("should add resources for new huge page sizes on kubelet restart", func(ctx context.Context) { ginkgo.It("should add resources for new huge page sizes on kubelet restart", func(ctx context.Context) {
@ -245,12 +245,11 @@ var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, "[No
startKubelet() startKubelet()
ginkgo.By("verifying that the hugepages-2Mi resource is present") ginkgo.By("verifying that the hugepages-2Mi resource is present")
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() resource.Quantity {
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{}) node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err, "while getting node status") framework.ExpectNoError(err, "while getting node status")
_, isPresent := node.Status.Capacity["hugepages-2Mi"] return node.Status.Capacity["hugepages-2Mi"]
return isPresent }, 30*time.Second, framework.Poll).ShouldNot(gomega.BeNil())
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
}) })
ginkgo.When("start the pod", func() { ginkgo.When("start the pod", func() {

View File

@ -59,7 +59,7 @@ var _ = SIGDescribe("Kubelet Config", framework.WithSlow(), framework.WithSerial
// wait until the kubelet health check will fail // wait until the kubelet health check will fail
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
configDir := framework.TestContext.KubeletConfigDropinDir configDir := framework.TestContext.KubeletConfigDropinDir
@ -132,7 +132,7 @@ featureGates:
// wait until the kubelet health check will succeed // wait until the kubelet health check will succeed
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
mergedConfig, err := getCurrentKubeletConfig(ctx) mergedConfig, err := getCurrentKubeletConfig(ctx)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -43,7 +43,7 @@ var _ = SIGDescribe("Lock contention", framework.WithSlow(), framework.WithDisru
ginkgo.By("perform kubelet health check to check if kubelet is healthy and running.") ginkgo.By("perform kubelet health check to check if kubelet is healthy and running.")
// Precautionary check that kubelet is healthy before running the test. // Precautionary check that kubelet is healthy before running the test.
gomega.Expect(kubeletHealthCheck(kubeletHealthCheckURL)).To(gomega.BeTrue()) gomega.Expect(kubeletHealthCheck(kubeletHealthCheckURL)).To(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
ginkgo.By("acquiring the lock on lock file i.e /var/run/kubelet.lock") ginkgo.By("acquiring the lock on lock file i.e /var/run/kubelet.lock")
// Open the file with the intention to acquire the lock, this would imitate the behaviour // Open the file with the intention to acquire the lock, this would imitate the behaviour
@ -71,6 +71,6 @@ var _ = SIGDescribe("Lock contention", framework.WithSlow(), framework.WithDisru
// It should not be as the lock contention forces the kubelet to stop. // It should not be as the lock contention forces the kubelet to stop.
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, 10*time.Second, time.Second).Should(gomega.BeFalse()) }, 10*time.Second, time.Second).Should(gomega.BeFalseBecause("expected kubelet to not be in healthy state"))
}) })
}) })

View File

@ -543,7 +543,7 @@ var _ = SIGDescribe("Memory Manager", framework.WithDisruptive(), framework.With
for _, containerMemory := range containerResource.Memory { for _, containerMemory := range containerResource.Memory {
q := c.Resources.Limits[v1.ResourceName(containerMemory.MemoryType)] q := c.Resources.Limits[v1.ResourceName(containerMemory.MemoryType)]
value, ok := q.AsInt64() value, ok := q.AsInt64()
gomega.Expect(ok).To(gomega.BeTrue()) gomega.Expect(ok).To(gomega.BeTrueBecause("cannot convert value to integer"))
gomega.Expect(value).To(gomega.BeEquivalentTo(containerMemory.Size_)) gomega.Expect(value).To(gomega.BeEquivalentTo(containerMemory.Size_))
} }
} }
@ -626,9 +626,9 @@ var _ = SIGDescribe("Memory Manager", framework.WithDisruptive(), framework.With
return true return true
}, time.Minute, 5*time.Second).Should( }, time.Minute, 5*time.Second).Should(
gomega.BeTrue(), gomega.BeTrueBecause(
"the pod succeeded to start, when it should fail with the admission error", "the pod succeeded to start, when it should fail with the admission error",
) ))
}) })
ginkgo.JustAfterEach(func(ctx context.Context) { ginkgo.JustAfterEach(func(ctx context.Context) {

View File

@ -243,7 +243,7 @@ var _ = SIGDescribe("MirrorPod", func() {
// Wait 5 mins for syncTerminatedPod to fail. We expect that the pod volume should not be cleaned up because the NFS server is down. // Wait 5 mins for syncTerminatedPod to fail. We expect that the pod volume should not be cleaned up because the NFS server is down.
gomega.Consistently(func() bool { gomega.Consistently(func() bool {
return podVolumeDirectoryExists(types.UID(hash)) return podVolumeDirectoryExists(types.UID(hash))
}, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), "pod volume should exist while nfs server is stopped") }, 5*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("pod volume should exist while nfs server is stopped"))
ginkgo.By("Start the NFS server") ginkgo.By("Start the NFS server")
restartNfsServer(f, nfsServerPod) restartNfsServer(f, nfsServerPod)
@ -251,7 +251,7 @@ var _ = SIGDescribe("MirrorPod", func() {
ginkgo.By("Waiting for the pod volume to deleted after the NFS server is started") ginkgo.By("Waiting for the pod volume to deleted after the NFS server is started")
gomega.Eventually(func() bool { gomega.Eventually(func() bool {
return podVolumeDirectoryExists(types.UID(hash)) return podVolumeDirectoryExists(types.UID(hash))
}, 5*time.Minute, 10*time.Second).Should(gomega.BeFalse(), "pod volume should be deleted after nfs server is started") }, 5*time.Minute, 10*time.Second).Should(gomega.BeFalseBecause("pod volume should be deleted after nfs server is started"))
// Create the static pod again with the same config and expect it to start running // Create the static pod again with the same config and expect it to start running
err = createStaticPodUsingNfs(nfsServerHost, node, "sleep 999999", podPath, staticPodName, ns) err = createStaticPodUsingNfs(nfsServerHost, node, "sleep 999999", podPath, staticPodName, ns)

View File

@ -190,7 +190,7 @@ func runTest(ctx context.Context, f *framework.Framework) error {
// wait until the kubelet health check will fail // wait until the kubelet health check will fail
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, time.Minute, time.Second).Should(gomega.BeFalse()) }, time.Minute, time.Second).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(oldCfg)) framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(oldCfg))
@ -200,7 +200,7 @@ func runTest(ctx context.Context, f *framework.Framework) error {
// wait until the kubelet health check will succeed // wait until the kubelet health check will succeed
gomega.Eventually(ctx, func(ctx context.Context) bool { gomega.Eventually(ctx, func(ctx context.Context) bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue()) }, 2*time.Minute, 5*time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
} }
}) })
if err := createTemporaryCgroupsForReservation(cgroupManager); err != nil { if err := createTemporaryCgroupsForReservation(cgroupManager); err != nil {
@ -218,7 +218,7 @@ func runTest(ctx context.Context, f *framework.Framework) error {
// wait until the kubelet health check will fail // wait until the kubelet health check will fail
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, time.Minute, time.Second).Should(gomega.BeFalse()) }, time.Minute, time.Second).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
expectedNAPodCgroup := cm.NewCgroupName(cm.RootCgroupName, nodeAllocatableCgroup) expectedNAPodCgroup := cm.NewCgroupName(cm.RootCgroupName, nodeAllocatableCgroup)
@ -240,7 +240,7 @@ func runTest(ctx context.Context, f *framework.Framework) error {
// wait until the kubelet health check will succeed // wait until the kubelet health check will succeed
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue()) }, 2*time.Minute, 5*time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
if err != nil { if err != nil {
return err return err

View File

@ -57,7 +57,7 @@ func setKubeletConfig(ctx context.Context, f *framework.Framework, cfg *kubeletc
// wait until the kubelet health check will fail // wait until the kubelet health check will fail
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, time.Minute, time.Second).Should(gomega.BeFalse()) }, time.Minute, time.Second).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(cfg)) framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(cfg))
@ -67,7 +67,7 @@ func setKubeletConfig(ctx context.Context, f *framework.Framework, cfg *kubeletc
// wait until the kubelet health check will succeed // wait until the kubelet health check will succeed
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue()) }, 2*time.Minute, 5*time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
} }
// Wait for the Kubelet to be ready. // Wait for the Kubelet to be ready.
@ -75,7 +75,7 @@ func setKubeletConfig(ctx context.Context, f *framework.Framework, cfg *kubeletc
nodes, err := e2enode.TotalReady(ctx, f.ClientSet) nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return nodes == 1 return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue()) }, time.Minute, time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
} }
// Serial because the test updates kubelet configuration. // Serial because the test updates kubelet configuration.

View File

@ -126,7 +126,7 @@ func runPodFailingConditionsTest(f *framework.Framework, hasInitContainers, chec
// Verify PodInitialized is set if init containers are not present (since without init containers, it gets set very early) // Verify PodInitialized is set if init containers are not present (since without init containers, it gets set very early)
initializedTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.PodInitialized, true) initializedTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.PodInitialized, true)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(initializedTime.Before(scheduledTime)).NotTo(gomega.BeTrue(), fmt.Sprintf("pod without init containers is initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime)) gomega.Expect(initializedTime.Before(scheduledTime)).NotTo(gomega.BeTrueBecause("pod without init containers is initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime))
} }
// Verify ContainersReady is not set (since sandboxcreation is blocked) // Verify ContainersReady is not set (since sandboxcreation is blocked)
@ -228,28 +228,28 @@ func runPodReadyConditionsTest(f *framework.Framework, hasInitContainers, checkP
if hasInitContainers { if hasInitContainers {
// With init containers, verify the sequence of conditions is: Scheduled => PodReadyToStartContainers => Initialized // With init containers, verify the sequence of conditions is: Scheduled => PodReadyToStartContainers => Initialized
gomega.Expect(readyToStartContainersTime.Before(scheduledTime)).ToNot(gomega.BeTrue(), fmt.Sprintf("pod with init containers is initialized at: %v which is before pod has ready to start at: %v", initializedTime, readyToStartContainersTime)) gomega.Expect(readyToStartContainersTime.Before(scheduledTime)).ToNot(gomega.BeTrueBecause("pod with init containers is initialized at: %v which is before pod has ready to start at: %v", initializedTime, readyToStartContainersTime))
gomega.Expect(initializedTime.Before(readyToStartContainersTime)).ToNot(gomega.BeTrue(), fmt.Sprintf("pod with init containers is initialized at: %v which is before pod has ready to start at: %v", initializedTime, readyToStartContainersTime)) gomega.Expect(initializedTime.Before(readyToStartContainersTime)).ToNot(gomega.BeTrueBecause("pod with init containers is initialized at: %v which is before pod has ready to start at: %v", initializedTime, readyToStartContainersTime))
} else { } else {
// Without init containers, verify the sequence of conditions is: Scheduled => Initialized => PodReadyToStartContainers // Without init containers, verify the sequence of conditions is: Scheduled => Initialized => PodReadyToStartContainers
condBeforeContainersReadyTransitionTime = readyToStartContainersTime condBeforeContainersReadyTransitionTime = readyToStartContainersTime
errSubstrIfContainersReadyTooEarly = "ready to start" errSubstrIfContainersReadyTooEarly = "ready to start"
gomega.Expect(initializedTime.Before(scheduledTime)).NotTo(gomega.BeTrue(), fmt.Sprintf("pod without init containers initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime)) gomega.Expect(initializedTime.Before(scheduledTime)).NotTo(gomega.BeTrueBecause("pod without init containers initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime))
gomega.Expect(readyToStartContainersTime.Before(initializedTime)).NotTo(gomega.BeTrue(), fmt.Sprintf("pod without init containers has ready to start at: %v which is before pod is initialized at: %v", readyToStartContainersTime, initializedTime)) gomega.Expect(readyToStartContainersTime.Before(initializedTime)).NotTo(gomega.BeTrueBecause("pod without init containers has ready to start at: %v which is before pod is initialized at: %v", readyToStartContainersTime, initializedTime))
} }
} else { } else {
// In the absence of PodHasReadyToStartContainers feature disabled, verify the sequence is: Scheduled => Initialized // In the absence of PodHasReadyToStartContainers feature disabled, verify the sequence is: Scheduled => Initialized
gomega.Expect(initializedTime.Before(scheduledTime)).NotTo(gomega.BeTrue(), fmt.Sprintf("pod initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime)) gomega.Expect(initializedTime.Before(scheduledTime)).NotTo(gomega.BeTrueBecause("pod initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime))
} }
// Verify the next condition to get set is ContainersReady // Verify the next condition to get set is ContainersReady
containersReadyTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.ContainersReady, true) containersReadyTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.ContainersReady, true)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(containersReadyTime.Before(condBeforeContainersReadyTransitionTime)).NotTo(gomega.BeTrue(), fmt.Sprintf("containers ready at: %v which is before pod %s: %v", containersReadyTime, errSubstrIfContainersReadyTooEarly, initializedTime)) gomega.Expect(containersReadyTime.Before(condBeforeContainersReadyTransitionTime)).NotTo(gomega.BeTrueBecause("containers ready at: %v which is before pod %s: %v", containersReadyTime, errSubstrIfContainersReadyTooEarly, initializedTime))
// Verify ContainersReady => PodReady // Verify ContainersReady => PodReady
podReadyTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.PodReady, true) podReadyTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.PodReady, true)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(podReadyTime.Before(containersReadyTime)).NotTo(gomega.BeTrue(), fmt.Sprintf("pod ready at: %v which is before pod containers ready at: %v", podReadyTime, containersReadyTime)) gomega.Expect(podReadyTime.Before(containersReadyTime)).NotTo(gomega.BeTrueBecause("pod ready at: %v which is before pod containers ready at: %v", podReadyTime, containersReadyTime))
} }
} }

View File

@ -740,7 +740,7 @@ func podresourcesGetAllocatableResourcesTests(ctx context.Context, cli kubeletpo
expectedCPUs := onlineCPUs.Difference(reservedSystemCPUs) expectedCPUs := onlineCPUs.Difference(reservedSystemCPUs)
ginkgo.By(fmt.Sprintf("expecting CPUs '%v'='%v'", allocatableCPUs, expectedCPUs)) ginkgo.By(fmt.Sprintf("expecting CPUs '%v'='%v'", allocatableCPUs, expectedCPUs))
gomega.ExpectWithOffset(1, allocatableCPUs.Equals(expectedCPUs)).To(gomega.BeTrue(), "mismatch expecting CPUs") gomega.ExpectWithOffset(1, allocatableCPUs.Equals(expectedCPUs)).To(gomega.BeTrueBecause("mismatch expecting CPUs"))
} }
if sd == nil { // no devices in the environment, so expect no devices if sd == nil { // no devices in the environment, so expect no devices
@ -1326,7 +1326,7 @@ func waitForTopologyUnawareResources(ctx context.Context, f *framework.Framework
node := getLocalNode(ctx, f) node := getLocalNode(ctx, f)
resourceAmount := CountSampleDeviceAllocatable(node) resourceAmount := CountSampleDeviceAllocatable(node)
return resourceAmount > 0 return resourceAmount > 0
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrue()) }, 2*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected %q resources to be available, got no resources", defaultTopologyUnawareResourceName))
} }
func getPodResourcesMetrics(ctx context.Context) (e2emetrics.KubeletMetrics, error) { func getPodResourcesMetrics(ctx context.Context) (e2emetrics.KubeletMetrics, error) {

View File

@ -313,7 +313,7 @@ var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), fra
// wait until the kubelet health check will fail // wait until the kubelet health check will fail
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
ginkgo.By("Starting the kubelet") ginkgo.By("Starting the kubelet")
startKubelet() startKubelet()
@ -321,14 +321,14 @@ var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), fra
// wait until the kubelet health check will succeed // wait until the kubelet health check will succeed
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
// Wait for the Kubelet to be ready. // Wait for the Kubelet to be ready.
gomega.Eventually(ctx, func(ctx context.Context) bool { gomega.Eventually(ctx, func(ctx context.Context) bool {
nodes, err := e2enode.TotalReady(ctx, f.ClientSet) nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return nodes == 1 return nodes == 1
}, time.Minute, f.Timeouts.Poll).Should(gomega.BeTrue()) }, time.Minute, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
ginkgo.By(fmt.Sprintf("After the kubelet is restarted, verify the pod (%s/%s) is deleted by kubelet", pod.Namespace, pod.Name)) ginkgo.By(fmt.Sprintf("After the kubelet is restarted, verify the pod (%s/%s) is deleted by kubelet", pod.Namespace, pod.Name))
gomega.Eventually(ctx, func(ctx context.Context) error { gomega.Eventually(ctx, func(ctx context.Context) error {
@ -366,7 +366,7 @@ var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), fra
// wait until the kubelet health check will fail // wait until the kubelet health check will fail
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
// Create the pod bound to the node. It will remain in the Pending // Create the pod bound to the node. It will remain in the Pending
// phase as Kubelet is down. // phase as Kubelet is down.
@ -384,14 +384,14 @@ var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), fra
// wait until the kubelet health check will succeed // wait until the kubelet health check will succeed
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
// Wait for the Kubelet to be ready. // Wait for the Kubelet to be ready.
gomega.Eventually(ctx, func(ctx context.Context) bool { gomega.Eventually(ctx, func(ctx context.Context) bool {
nodes, err := e2enode.TotalReady(ctx, f.ClientSet) nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return nodes == 1 return nodes == 1
}, time.Minute, f.Timeouts.Poll).Should(gomega.BeTrue()) }, time.Minute, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
ginkgo.By(fmt.Sprintf("After the kubelet is restarted, verify the pod (%v/%v) is deleted by kubelet", pod.Namespace, pod.Name)) ginkgo.By(fmt.Sprintf("After the kubelet is restarted, verify the pod (%v/%v) is deleted by kubelet", pod.Namespace, pod.Name))
gomega.Eventually(ctx, func(ctx context.Context) error { gomega.Eventually(ctx, func(ctx context.Context) error {
@ -444,7 +444,7 @@ var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), fra
// wait until the kubelet health check will fail // wait until the kubelet health check will fail
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
ginkgo.By(fmt.Sprintf("Deleting the pod (%v/%v) to set a deletion timestamp", pod.Namespace, pod.Name)) ginkgo.By(fmt.Sprintf("Deleting the pod (%v/%v) to set a deletion timestamp", pod.Namespace, pod.Name))
err = e2epod.NewPodClient(f).Delete(ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod}) err = e2epod.NewPodClient(f).Delete(ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod})
@ -460,14 +460,14 @@ var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), fra
// wait until the kubelet health check will succeed // wait until the kubelet health check will succeed
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
// Wait for the Kubelet to be ready. // Wait for the Kubelet to be ready.
gomega.Eventually(ctx, func(ctx context.Context) bool { gomega.Eventually(ctx, func(ctx context.Context) bool {
nodes, err := e2enode.TotalReady(ctx, f.ClientSet) nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return nodes == 1 return nodes == 1
}, time.Minute, f.Timeouts.Poll).Should(gomega.BeTrue()) }, time.Minute, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
ginkgo.By(fmt.Sprintf("Once Kubelet is restarted, verify the pod (%v/%v) is deleted by kubelet", pod.Namespace, pod.Name)) ginkgo.By(fmt.Sprintf("Once Kubelet is restarted, verify the pod (%v/%v) is deleted by kubelet", pod.Namespace, pod.Name))
gomega.Eventually(ctx, func(ctx context.Context) error { gomega.Eventually(ctx, func(ctx context.Context) error {

View File

@ -381,7 +381,7 @@ func runPodAndWaitUntilScheduled(f *framework.Framework, pod *v1.Pod) *v1.Pod {
isReady, err := testutils.PodRunningReady(pod) isReady, err := testutils.PodRunningReady(pod)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.ExpectWithOffset(1, isReady).To(gomega.BeTrueBecause("pod should be ready")) gomega.ExpectWithOffset(1, isReady).To(gomega.BeTrueBecause("pod %+v was expected to be ready", pod))
return pod return pod
} }

View File

@ -78,7 +78,7 @@ var _ = SIGDescribe("Terminate Pods", func() {
} }
} }
return false return false
}, 20*time.Second, 1*time.Second).Should(gomega.BeTrue()) }, 20*time.Second, 1*time.Second).Should(gomega.BeTrueBecause("expected container to be ready"))
err := client.Delete(context.Background(), pod.Name, metav1.DeleteOptions{}) err := client.Delete(context.Background(), pod.Name, metav1.DeleteOptions{})
@ -87,7 +87,7 @@ var _ = SIGDescribe("Terminate Pods", func() {
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
_, err := client.Get(context.TODO(), pod.Name, metav1.GetOptions{}) _, err := client.Get(context.TODO(), pod.Name, metav1.GetOptions{})
return apierrors.IsNotFound(err) return apierrors.IsNotFound(err)
}, 10*time.Second, time.Second).Should(gomega.BeTrue()) }, 10*time.Second, time.Second).Should(gomega.BeTrueBecause("expected pod to disappear from API server within 10 seconds"))
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })

View File

@ -536,7 +536,7 @@ func createSRIOVPodOrFail(ctx context.Context, f *framework.Framework) *v1.Pod {
return dpPod return dpPod
} }
// waitForSRIOVResources waits until enough SRIOV resources are avaailable, expecting to complete within the timeout. // waitForSRIOVResources waits until enough SRIOV resources are available, expecting to complete within the timeout.
// if exits successfully, updates the sriovData with the resources which were found. // if exits successfully, updates the sriovData with the resources which were found.
func waitForSRIOVResources(ctx context.Context, f *framework.Framework, sd *sriovData) { func waitForSRIOVResources(ctx context.Context, f *framework.Framework, sd *sriovData) {
sriovResourceName := "" sriovResourceName := ""
@ -546,7 +546,7 @@ func waitForSRIOVResources(ctx context.Context, f *framework.Framework, sd *srio
node := getLocalNode(ctx, f) node := getLocalNode(ctx, f)
sriovResourceName, sriovResourceAmount = findSRIOVResource(node) sriovResourceName, sriovResourceAmount = findSRIOVResource(node)
return sriovResourceAmount > minSriovResource return sriovResourceAmount > minSriovResource
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrue()) }, 2*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected SRIOV resources to be available within the timout"))
sd.resourceName = sriovResourceName sd.resourceName = sriovResourceName
sd.resourceAmount = sriovResourceAmount sd.resourceAmount = sriovResourceAmount

View File

@ -77,7 +77,7 @@ var _ = SIGDescribe("Unknown Pods", framework.WithSerial(), framework.WithDisrup
// wait until the kubelet health check will fail // wait until the kubelet health check will fail
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
framework.Logf("Delete the static pod manifest while the kubelet is not running") framework.Logf("Delete the static pod manifest while the kubelet is not running")
file := staticPodPath(podPath, staticPodName, ns) file := staticPodPath(podPath, staticPodName, ns)
@ -91,7 +91,7 @@ var _ = SIGDescribe("Unknown Pods", framework.WithSerial(), framework.WithDisrup
// wait until the kubelet health check will succeed // wait until the kubelet health check will succeed
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
framework.Logf("wait for the mirror pod %v to disappear", mirrorPodName) framework.Logf("wait for the mirror pod %v to disappear", mirrorPodName)
gomega.Eventually(ctx, func(ctx context.Context) error { gomega.Eventually(ctx, func(ctx context.Context) error {
@ -148,7 +148,7 @@ var _ = SIGDescribe("Unknown Pods", framework.WithSerial(), framework.WithDisrup
// wait until the kubelet health check will fail // wait until the kubelet health check will fail
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
framework.Logf("Delete the pod while the kubelet is not running") framework.Logf("Delete the pod while the kubelet is not running")
// Delete pod sync by name will force delete the pod, removing it from kubelet's config // Delete pod sync by name will force delete the pod, removing it from kubelet's config
@ -160,7 +160,7 @@ var _ = SIGDescribe("Unknown Pods", framework.WithSerial(), framework.WithDisrup
// wait until the kubelet health check will succeed // wait until the kubelet health check will succeed
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue()) }, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
framework.Logf("wait for the pod %v to disappear", podName) framework.Logf("wait for the pod %v to disappear", podName)
gomega.Eventually(ctx, func(ctx context.Context) error { gomega.Eventually(ctx, func(ctx context.Context) error {

View File

@ -221,7 +221,7 @@ func updateKubeletConfig(ctx context.Context, f *framework.Framework, kubeletCon
// wait until the kubelet health check will fail // wait until the kubelet health check will fail
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, time.Minute, time.Second).Should(gomega.BeFalse()) }, time.Minute, time.Second).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
// Delete CPU and memory manager state files to be sure it will not prevent the kubelet restart // Delete CPU and memory manager state files to be sure it will not prevent the kubelet restart
if deleteStateFiles { if deleteStateFiles {
@ -240,14 +240,14 @@ func waitForKubeletToStart(ctx context.Context, f *framework.Framework) {
// wait until the kubelet health check will succeed // wait until the kubelet health check will succeed
gomega.Eventually(ctx, func() bool { gomega.Eventually(ctx, func() bool {
return kubeletHealthCheck(kubeletHealthCheckURL) return kubeletHealthCheck(kubeletHealthCheckURL)
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue()) }, 2*time.Minute, 5*time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
// Wait for the Kubelet to be ready. // Wait for the Kubelet to be ready.
gomega.Eventually(ctx, func(ctx context.Context) bool { gomega.Eventually(ctx, func(ctx context.Context) bool {
nodes, err := e2enode.TotalReady(ctx, f.ClientSet) nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return nodes == 1 return nodes == 1
}, time.Minute, time.Second).Should(gomega.BeTrue()) }, time.Minute, time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
} }
func deleteStateFile(stateFileName string) { func deleteStateFile(stateFileName string) {