mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-09 03:57:41 +00:00
Merge pull request #121888 from SD-13/e2e-gomega-be-true-or-false
Enhance boolean assertions when fail
This commit is contained in:
commit
d770dd695a
@ -26,7 +26,7 @@ Usage: $0 [-r <revision>|-a] [-s] [-c none|<config>] [-- <golangci-lint run flag
|
||||
-a: automatically select the common base of origin/master and HEAD
|
||||
as revision
|
||||
-s: select a strict configuration for new code
|
||||
-n: in addition to strict checking, also enable hints (aka nits) that may are may not
|
||||
-n: in addition to strict checking, also enable hints (aka nits) that may or may not
|
||||
be useful
|
||||
-g <github action file>: also write results with --out-format=github-actions
|
||||
to a separate file
|
||||
|
@ -308,7 +308,7 @@ var _ = SIGDescribe("Discovery", func() {
|
||||
break
|
||||
}
|
||||
}
|
||||
gomega.Expect(foundResource).To(gomega.BeTrue(), "Resource %q was not found inside of resourceList\n%#v", t.validResource, resourceList.APIResources)
|
||||
gomega.Expect(foundResource).To(gomega.BeTrueBecause("Resource %q was not found inside of resourceList\n%#v", t.validResource, resourceList.APIResources))
|
||||
}
|
||||
})
|
||||
})
|
||||
|
@ -72,7 +72,7 @@ var _ = SIGDescribe("client-go should negotiate", func() {
|
||||
defer w.Stop()
|
||||
|
||||
evt, ok := <-w.ResultChan()
|
||||
o.Expect(ok).To(o.BeTrue())
|
||||
o.Expect(ok).To(o.BeTrueBecause("unexpected watch event: %v, %#v", evt.Type, evt.Object))
|
||||
switch evt.Type {
|
||||
case watch.Added, watch.Modified:
|
||||
// this is allowed
|
||||
|
@ -825,7 +825,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
|
||||
_, err := createValidatingWebhookConfiguration(ctx, f, validatingWebhookConfiguration)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "create validatingwebhookconfiguration should have been denied by the api-server")
|
||||
expectedErrMsg := "compilation failed"
|
||||
gomega.Expect(strings.Contains(err.Error(), expectedErrMsg)).To(gomega.BeTrue())
|
||||
gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring(expectedErrMsg)))
|
||||
})
|
||||
|
||||
/*
|
||||
@ -849,7 +849,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
|
||||
_, err := createMutatingWebhookConfiguration(ctx, f, mutatingWebhookConfiguration)
|
||||
gomega.Expect(err).To(gomega.HaveOccurred(), "create mutatingwebhookconfiguration should have been denied by the api-server")
|
||||
expectedErrMsg := "compilation failed"
|
||||
gomega.Expect(strings.Contains(err.Error(), expectedErrMsg)).To(gomega.BeTrue())
|
||||
gomega.Expect(err).To(gomega.MatchError(gomega.ContainSubstring(expectedErrMsg)))
|
||||
})
|
||||
|
||||
/*
|
||||
@ -908,7 +908,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
|
||||
"mutation-start": "yes",
|
||||
"mutation-stage-1": "yes",
|
||||
}
|
||||
gomega.Expect(reflect.DeepEqual(expectedConfigMapData, mutatedCM.Data)).To(gomega.BeTrue())
|
||||
gomega.Expect(mutatedCM.Data).Should(gomega.Equal(expectedConfigMapData))
|
||||
|
||||
ginkgo.By("create the configmap with 'skip-me' name")
|
||||
|
||||
@ -918,7 +918,7 @@ var _ = SIGDescribe("AdmissionWebhook [Privileged:ClusterAdmin]", func() {
|
||||
expectedConfigMapData = map[string]string{
|
||||
"mutation-start": "yes",
|
||||
}
|
||||
gomega.Expect(reflect.DeepEqual(expectedConfigMapData, skippedCM.Data)).To(gomega.BeTrue())
|
||||
gomega.Expect(skippedCM.Data).Should(gomega.Equal(expectedConfigMapData))
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -209,8 +209,8 @@ var _ = SIGDescribe("DisruptionController", func() {
|
||||
return false, err
|
||||
}
|
||||
return isPDBErroring(pdb), nil
|
||||
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod shouldn't error for "+
|
||||
"unmanaged pod")
|
||||
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrueBecause("pod shouldn't error for " +
|
||||
"unmanaged pod"))
|
||||
})
|
||||
|
||||
evictionCases := []struct {
|
||||
|
@ -113,7 +113,7 @@ var _ = SIGDescribe("Probing container", func() {
|
||||
return false, err
|
||||
}
|
||||
return podutil.IsPodReady(p), nil
|
||||
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod should not be ready")
|
||||
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrueBecause("pod should not be ready"))
|
||||
|
||||
p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
@ -726,7 +726,7 @@ done
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}, 1*time.Minute, framework.Poll).ShouldNot(gomega.BeTrue(), "should not see liveness probes")
|
||||
}, 1*time.Minute, framework.Poll).ShouldNot(gomega.BeTrueBecause("should not see liveness probes"))
|
||||
})
|
||||
})
|
||||
|
||||
@ -792,7 +792,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, feature.SidecarContainers, "P
|
||||
return false, err
|
||||
}
|
||||
return podutil.IsPodReady(p), nil
|
||||
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrue(), "pod should not be ready")
|
||||
}, 1*time.Minute, 1*time.Second).ShouldNot(gomega.BeTrueBecause("pod should not be ready"))
|
||||
|
||||
p, err := podClient.Get(ctx, p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
@ -1484,7 +1484,7 @@ done
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}, 1*time.Minute, framework.Poll).ShouldNot(gomega.BeTrue(), "should not see liveness probes")
|
||||
}, 1*time.Minute, framework.Poll).ShouldNot(gomega.BeTrueBecause("should not see liveness probes"))
|
||||
})
|
||||
})
|
||||
|
||||
|
@ -50,7 +50,7 @@ var _ = ginkgo.Describe("log", func() {
|
||||
})
|
||||
ginkgo.AfterEach(func() {
|
||||
framework.Logf("after")
|
||||
gomega.Expect(true).To(gomega.BeFalse(), "true is never false either")
|
||||
gomega.Expect(true).To(gomega.BeFalseBecause("artificial assertion failure"))
|
||||
})
|
||||
ginkgo.It("fails", func() {
|
||||
func() {
|
||||
@ -58,7 +58,7 @@ var _ = ginkgo.Describe("log", func() {
|
||||
}()
|
||||
})
|
||||
ginkgo.It("asserts", func() {
|
||||
gomega.Expect(false).To(gomega.BeTrue(), "false is never true")
|
||||
gomega.Expect(false).To(gomega.BeTrueBecause("artificial assertion failure"))
|
||||
})
|
||||
ginkgo.It("error", func() {
|
||||
err := errors.New("an error with a long, useless description")
|
||||
@ -106,10 +106,7 @@ In [It] at: log_test.go:57 <time>
|
||||
< Exit [It] fails - log_test.go:55 <time>
|
||||
> Enter [AfterEach] log - log_test.go:51 <time>
|
||||
<klog> log_test.go:52] after
|
||||
[FAILED] true is never false either
|
||||
Expected
|
||||
<bool>: true
|
||||
to be false
|
||||
[FAILED] artificial assertion failure
|
||||
In [AfterEach] at: log_test.go:53 <time>
|
||||
< Exit [AfterEach] log - log_test.go:51 <time>
|
||||
`,
|
||||
@ -119,10 +116,7 @@ In [AfterEach] at: log_test.go:53 <time>
|
||||
Status: "failed",
|
||||
Failure: &reporters.JUnitFailure{
|
||||
Type: "failed",
|
||||
Description: `[FAILED] false is never true
|
||||
Expected
|
||||
<bool>: false
|
||||
to be true
|
||||
Description: `[FAILED] artificial assertion failure
|
||||
In [It] at: log_test.go:61 <time>
|
||||
|
||||
There were additional failures detected after the initial failure. These are visible in the timeline
|
||||
@ -132,18 +126,12 @@ There were additional failures detected after the initial failure. These are vis
|
||||
<klog> log_test.go:49] before
|
||||
< Exit [BeforeEach] log - log_test.go:48 <time>
|
||||
> Enter [It] asserts - log_test.go:60 <time>
|
||||
[FAILED] false is never true
|
||||
Expected
|
||||
<bool>: false
|
||||
to be true
|
||||
[FAILED] artificial assertion failure
|
||||
In [It] at: log_test.go:61 <time>
|
||||
< Exit [It] asserts - log_test.go:60 <time>
|
||||
> Enter [AfterEach] log - log_test.go:51 <time>
|
||||
<klog> log_test.go:52] after
|
||||
[FAILED] true is never false either
|
||||
Expected
|
||||
<bool>: true
|
||||
to be false
|
||||
[FAILED] artificial assertion failure
|
||||
In [AfterEach] at: log_test.go:53 <time>
|
||||
< Exit [AfterEach] log - log_test.go:51 <time>
|
||||
`,
|
||||
@ -174,10 +162,7 @@ In [It] at: log_test.go:65 <time>
|
||||
< Exit [It] error - log_test.go:63 <time>
|
||||
> Enter [AfterEach] log - log_test.go:51 <time>
|
||||
<klog> log_test.go:52] after
|
||||
[FAILED] true is never false either
|
||||
Expected
|
||||
<bool>: true
|
||||
to be false
|
||||
[FAILED] artificial assertion failure
|
||||
In [AfterEach] at: log_test.go:53 <time>
|
||||
< Exit [AfterEach] log - log_test.go:51 <time>
|
||||
`,
|
||||
@ -210,10 +195,7 @@ In [It] at: log_test.go:68 <time>
|
||||
< Exit [It] equal - log_test.go:67 <time>
|
||||
> Enter [AfterEach] log - log_test.go:51 <time>
|
||||
<klog> log_test.go:52] after
|
||||
[FAILED] true is never false either
|
||||
Expected
|
||||
<bool>: true
|
||||
to be false
|
||||
[FAILED] artificial assertion failure
|
||||
In [AfterEach] at: log_test.go:53 <time>
|
||||
< Exit [AfterEach] log - log_test.go:51 <time>
|
||||
`,
|
||||
@ -238,10 +220,7 @@ In [It] at: log_test.go:44 <time>
|
||||
< Exit [It] fails with helper - log_test.go:70 <time>
|
||||
> Enter [AfterEach] log - log_test.go:51 <time>
|
||||
<klog> log_test.go:52] after
|
||||
[FAILED] true is never false either
|
||||
Expected
|
||||
<bool>: true
|
||||
to be false
|
||||
[FAILED] artificial assertion failure
|
||||
In [AfterEach] at: log_test.go:53 <time>
|
||||
< Exit [AfterEach] log - log_test.go:51 <time>
|
||||
`,
|
||||
@ -251,10 +230,7 @@ In [AfterEach] at: log_test.go:53 <time>
|
||||
Status: "failed",
|
||||
Failure: &reporters.JUnitFailure{
|
||||
Type: "failed",
|
||||
Description: `[FAILED] true is never false either
|
||||
Expected
|
||||
<bool>: true
|
||||
to be false
|
||||
Description: `[FAILED] artificial assertion failure
|
||||
In [AfterEach] at: log_test.go:53 <time>
|
||||
`,
|
||||
},
|
||||
@ -267,10 +243,7 @@ In [AfterEach] at: log_test.go:53 <time>
|
||||
< Exit [It] redirects klog - log_test.go:73 <time>
|
||||
> Enter [AfterEach] log - log_test.go:51 <time>
|
||||
<klog> log_test.go:52] after
|
||||
[FAILED] true is never false either
|
||||
Expected
|
||||
<bool>: true
|
||||
to be false
|
||||
[FAILED] artificial assertion failure
|
||||
In [AfterEach] at: log_test.go:53 <time>
|
||||
< Exit [AfterEach] log - log_test.go:51 <time>
|
||||
`,
|
||||
|
@ -266,7 +266,7 @@ func (c *PodClient) mungeSpec(pod *v1.Pod) {
|
||||
}
|
||||
// If the image policy is not PullAlways, the image must be in the pre-pull list and
|
||||
// pre-pulled.
|
||||
gomega.Expect(ImagePrePullList.Has(c.Image)).To(gomega.BeTrue(), "Image %q is not in the pre-pull list, consider adding it to PrePulledImages in test/e2e/common/util.go or NodePrePullImageList in test/e2e_node/image_list.go", c.Image)
|
||||
gomega.Expect(ImagePrePullList.Has(c.Image)).To(gomega.BeTrueBecause("Image %q is not in the pre-pull list, consider adding it to PrePulledImages in test/e2e/common/util.go or NodePrePullImageList in test/e2e_node/image_list.go", c.Image))
|
||||
// Do not pull images during the tests because the images in pre-pull list should have
|
||||
// been prepulled.
|
||||
c.ImagePullPolicy = v1.PullNever
|
||||
|
@ -178,7 +178,7 @@ func (v *volumeExpandTestSuite) DefineTests(driver storageframework.TestDriver,
|
||||
|
||||
gomega.Expect(l.resource.Sc.AllowVolumeExpansion).NotTo(gomega.BeNil())
|
||||
allowVolumeExpansion := *l.resource.Sc.AllowVolumeExpansion
|
||||
gomega.Expect(allowVolumeExpansion).To(gomega.BeFalse())
|
||||
gomega.Expect(allowVolumeExpansion).To(gomega.BeFalseBecause("expected AllowVolumeExpansion value to be false"))
|
||||
ginkgo.By("Expanding non-expandable pvc")
|
||||
currentPvcSize := l.resource.Pvc.Spec.Resources.Requests[v1.ResourceStorage]
|
||||
newSize := currentPvcSize.DeepCopy()
|
||||
|
@ -116,7 +116,7 @@ func runDensityBatchTest(ctx context.Context, f *framework.Framework, testArg de
|
||||
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return len(watchTimes) == testArg.podsNr
|
||||
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue())
|
||||
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("All pods should be observed by the watch."))
|
||||
|
||||
if len(watchTimes) < testArg.podsNr {
|
||||
framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
|
||||
|
@ -141,11 +141,10 @@ var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framew
|
||||
// and authenticating with it.
|
||||
ginkgo.By("checking that nltest /QUERY returns successfully")
|
||||
var output string
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
gomega.Eventually(ctx, func() error {
|
||||
output, err = runKubectlExecInNamespace(f.Namespace.Name, podName, "nltest", "/QUERY")
|
||||
if err != nil {
|
||||
framework.Logf("unable to run command in container via exec: %s", err)
|
||||
return false
|
||||
return fmt.Errorf("unable to run command in container via exec: %w", err)
|
||||
}
|
||||
|
||||
if !isValidOutput(output) {
|
||||
@ -153,14 +152,12 @@ var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framew
|
||||
// https://kubernetes.io/docs/tasks/configure-pod-container/configure-gmsa/#troubleshooting
|
||||
output, err = runKubectlExecInNamespace(f.Namespace.Name, podName, "nltest", fmt.Sprintf("/sc_reset:%s", gmsaDomain))
|
||||
if err != nil {
|
||||
framework.Logf("unable to run command in container via exec: %s", err)
|
||||
return false
|
||||
return fmt.Errorf("unable to run command in container via exec: %w", err)
|
||||
}
|
||||
framework.Logf("failed to connect to domain; tried resetting the domain, output:\n%s", string(output))
|
||||
return false
|
||||
return fmt.Errorf("failed to connect to domain; tried resetting the domain, output:\n%v", string(output))
|
||||
}
|
||||
return true
|
||||
}, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue())
|
||||
return nil
|
||||
}, 1*time.Minute, 1*time.Second).Should(gomega.Succeed())
|
||||
})
|
||||
|
||||
ginkgo.It("can read and write file to remote SMB folder", func(ctx context.Context) {
|
||||
@ -208,16 +205,16 @@ var _ = sigDescribe(feature.Windows, "GMSA Full", framework.WithSerial(), framew
|
||||
|
||||
ginkgo.By("checking that file can be read and write from the remote folder successfully")
|
||||
filePath := fmt.Sprintf("\\\\%s\\%s\\write-test-%s.txt", gmsaDomainIP, gmsaSharedFolder, string(uuid.NewUUID())[0:4])
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
|
||||
gomega.Eventually(ctx, func() error {
|
||||
// The filePath is a remote folder, do not change the format of it
|
||||
_, _ = runKubectlExecInNamespace(f.Namespace.Name, podName, "--", "powershell.exe", "-Command", "echo 'This is a test file.' > "+filePath)
|
||||
output, err := runKubectlExecInNamespace(f.Namespace.Name, podName, "powershell.exe", "--", "cat", filePath)
|
||||
_, err := runKubectlExecInNamespace(f.Namespace.Name, podName, "powershell.exe", "--", "cat", filePath)
|
||||
if err != nil {
|
||||
framework.Logf("unable to get file from AD server: %s", err)
|
||||
return false
|
||||
return err
|
||||
}
|
||||
return strings.Contains(output, "This is a test file.")
|
||||
}, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue())
|
||||
return nil
|
||||
}, 1*time.Minute, 1*time.Second).Should(gomega.Succeed())
|
||||
|
||||
})
|
||||
})
|
||||
|
@ -113,10 +113,13 @@ var _ = sigDescribe(feature.Windows, "GMSA Kubelet", framework.WithSlow(), skipU
|
||||
// even for bogus creds, `nltest /PARENTDOMAIN` simply returns the AD domain, which is enough for our purpose here.
|
||||
// note that the "eventually" part seems to be needed to account for the fact that powershell containers
|
||||
// are a bit slow to become responsive, even when docker reports them as running.
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
gomega.Eventually(ctx, func() error {
|
||||
output, err = e2ekubectl.RunKubectl(f.Namespace.Name, "exec", namespaceOption, podName, containerOption, "--", "nltest", "/PARENTDOMAIN")
|
||||
return err == nil
|
||||
}, 1*time.Minute, 1*time.Second).Should(gomega.BeTrue())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, 1*time.Minute, 1*time.Second).Should(gomega.Succeed())
|
||||
|
||||
if !strings.HasPrefix(output, domain) {
|
||||
framework.Failf("Expected %q to start with %q", output, domain)
|
||||
|
@ -173,7 +173,7 @@ func overrideAllocatableMemoryTest(ctx context.Context, f *framework.Framework,
|
||||
}
|
||||
}
|
||||
return false
|
||||
}, 3*time.Minute, 10*time.Second).Should(gomega.BeTrue())
|
||||
}, 3*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("Expected %s pod to be failed scheduling", podName))
|
||||
}
|
||||
|
||||
func getNodeMemory(ctx context.Context, f *framework.Framework, node v1.Node) nodeMemory {
|
||||
|
@ -102,7 +102,7 @@ var _ = sigDescribe(feature.Windows, "SecurityContext", skipUnlessWindows(func()
|
||||
}
|
||||
|
||||
return false
|
||||
}, framework.PodStartTimeout, 1*time.Second).Should(gomega.BeTrue())
|
||||
}, framework.PodStartTimeout, 1*time.Second).Should(gomega.BeTrueBecause("expected pod to be terminated"))
|
||||
})
|
||||
|
||||
ginkgo.It("should not be able to create pods with unknown usernames at Container level", func(ctx context.Context) {
|
||||
|
@ -69,7 +69,7 @@ var _ = Describe("kubeadm-certs [copy-certs]", func() {
|
||||
gomega.Expect(s.OwnerReferences).To(gomega.HaveLen(1), "%s should have one owner reference", kubeadmCertsSecretName)
|
||||
ownRef := s.OwnerReferences[0]
|
||||
gomega.Expect(ownRef.Kind).To(gomega.Equal("Secret"), "%s should be owned by a secret", kubeadmCertsSecretName)
|
||||
gomega.Expect(*ownRef.BlockOwnerDeletion).To(gomega.BeTrue(), "%s should be deleted on owner deletion", kubeadmCertsSecretName)
|
||||
gomega.Expect(*ownRef.BlockOwnerDeletion).To(gomega.BeTrueBecause("%s should be deleted on owner deletion", kubeadmCertsSecretName))
|
||||
|
||||
o := GetSecret(f.ClientSet, kubeSystemNamespace, ownRef.Name)
|
||||
gomega.Expect(o.Type).To(gomega.Equal(corev1.SecretTypeBootstrapToken), "%s should have an owner reference that refers to a bootstrap-token", kubeadmCertsSecretName)
|
||||
|
@ -175,7 +175,7 @@ func ExpectSubjectHasAccessToResource(c clientset.Interface, subjectKind, subjec
|
||||
s, err := c.AuthorizationV1().SubjectAccessReviews().Create(context.TODO(), sar, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "error getting SubjectAccessReview for %s %s to resource %+v", subjectKind, subject, *sar.Spec.ResourceAttributes)
|
||||
|
||||
gomega.Expect(s.Status.Allowed).Should(gomega.BeTrue(), "%s %s has no access to resource %+v", subjectKind, subject, *sar.Spec.ResourceAttributes)
|
||||
gomega.Expect(s.Status.Allowed).Should(gomega.BeTrueBecause("%s %s has no access to resource %+v", subjectKind, subject, *sar.Spec.ResourceAttributes))
|
||||
}
|
||||
|
||||
// matchers
|
||||
|
@ -992,7 +992,7 @@ var _ = SIGDescribe(framework.WithSerial(), "Containers Lifecycle", func() {
|
||||
restartKubelet := stopKubelet()
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("kubelet was expected to be stopped but it is still running"))
|
||||
|
||||
ginkgo.By("Stopping the pod sandbox to simulate the node reboot")
|
||||
err = rs.StopPodSandbox(ctx, podSandboxID)
|
||||
@ -1002,7 +1002,7 @@ var _ = SIGDescribe(framework.WithSerial(), "Containers Lifecycle", func() {
|
||||
restartKubelet()
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("kubelet was expected to be healthy"))
|
||||
|
||||
ginkgo.By("Waiting for the pod to be re-initialized and run")
|
||||
err = e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "re-initialized", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) {
|
||||
@ -3656,7 +3656,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, framework.WithSerial(), "Cont
|
||||
restartKubelet := stopKubelet()
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet would have been stopped but it is still running"))
|
||||
|
||||
ginkgo.By("Stopping the pod sandbox to simulate the node reboot")
|
||||
err = rs.StopPodSandbox(ctx, podSandboxID)
|
||||
@ -3666,7 +3666,7 @@ var _ = SIGDescribe(nodefeature.SidecarContainers, framework.WithSerial(), "Cont
|
||||
restartKubelet()
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("kubelet was expected to be healthy"))
|
||||
|
||||
ginkgo.By("Waiting for the pod to be re-initialized and run")
|
||||
err = e2epod.WaitForPodCondition(ctx, f.ClientSet, pod.Namespace, pod.Name, "re-initialized", f.Timeouts.PodStart, func(pod *v1.Pod) (bool, error) {
|
||||
|
@ -186,7 +186,7 @@ func waitForContainerRemoval(ctx context.Context, containerName, podName, podNS
|
||||
return false
|
||||
}
|
||||
return len(containers) == 0
|
||||
}, 2*time.Minute, 1*time.Second).Should(gomega.BeTrue())
|
||||
}, 2*time.Minute, 1*time.Second).Should(gomega.BeTrueBecause("Containers were expected to be removed"))
|
||||
}
|
||||
|
||||
func isHTEnabled() bool {
|
||||
@ -770,8 +770,8 @@ func runCPUManagerTests(f *framework.Framework) {
|
||||
|
||||
gomega.Expect(cpus.Size()).To(gomega.Equal(1), "expected cpu set size == 1, got %q", cpus.String())
|
||||
|
||||
gomega.Expect(reusableCPUs.Equals(nonReusableCPUs)).To(gomega.BeTrue(), "expected reusable cpuset [%s] to be equal to non-reusable cpuset [%s]", reusableCPUs.String(), nonReusableCPUs.String())
|
||||
gomega.Expect(nonReusableCPUs.Intersection(cpus).IsEmpty()).To(gomega.BeTrue(), "expected non-reusable cpuset [%s] to be disjoint from cpuset [%s]", nonReusableCPUs.String(), cpus.String())
|
||||
gomega.Expect(reusableCPUs.Equals(nonReusableCPUs)).To(gomega.BeTrueBecause("expected reusable cpuset [%s] to be equal to non-reusable cpuset [%s]", reusableCPUs.String(), nonReusableCPUs.String()))
|
||||
gomega.Expect(nonReusableCPUs.Intersection(cpus).IsEmpty()).To(gomega.BeTrueBecause("expected non-reusable cpuset [%s] to be disjoint from cpuset [%s]", nonReusableCPUs.String(), cpus.String()))
|
||||
|
||||
ginkgo.By("by deleting the pods and waiting for container removal")
|
||||
deletePods(ctx, f, []string{pod.Name})
|
||||
|
@ -366,7 +366,7 @@ func runDensityBatchTest(ctx context.Context, f *framework.Framework, rc *Resour
|
||||
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return len(watchTimes) == testArg.podsNr
|
||||
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue())
|
||||
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("All pods should be observed by the watch."))
|
||||
|
||||
if len(watchTimes) < testArg.podsNr {
|
||||
framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
|
||||
|
@ -106,7 +106,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
return nodes == 1
|
||||
}, time.Minute, time.Second).Should(gomega.BeTrue())
|
||||
}, time.Minute, time.Second).Should(gomega.BeTrueBecause("expected node to be ready"))
|
||||
|
||||
// Before we run the device plugin test, we need to ensure
|
||||
// that the cluster is in a clean state and there are no
|
||||
@ -143,7 +143,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
node, ready := getLocalTestNode(ctx, f)
|
||||
return ready && CountSampleDeviceCapacity(node) > 0
|
||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected devices to be available on local node"))
|
||||
framework.Logf("Successfully created device plugin pod")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for the resource exported by the sample device plugin to become available on the local node (instances: %d)", expectedSampleDevsAmount))
|
||||
@ -152,7 +152,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
return ready &&
|
||||
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
|
||||
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available on local node"))
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
@ -177,7 +177,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
node, ready := getLocalTestNode(ctx, f)
|
||||
return ready && CountSampleDeviceCapacity(node) <= 0
|
||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected devices to be unavailable on local node"))
|
||||
|
||||
ginkgo.By("devices now unavailable on the local node")
|
||||
})
|
||||
@ -336,7 +336,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
return ready &&
|
||||
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
|
||||
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available after restart"))
|
||||
|
||||
ginkgo.By("Checking the same instance of the pod is still running")
|
||||
gomega.Eventually(ctx, getPodByName).
|
||||
@ -465,7 +465,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
return ready &&
|
||||
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
|
||||
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available after re-registration"))
|
||||
|
||||
// crosscheck that after device plugin restart the device assignment is preserved and
|
||||
// stable from the kubelet's perspective.
|
||||
@ -540,7 +540,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
return ready &&
|
||||
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
|
||||
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available after restart"))
|
||||
|
||||
ginkgo.By("Checking the same instance of the pod is still running after the device plugin restart")
|
||||
gomega.Eventually(ctx, getPodByName).
|
||||
@ -570,7 +570,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
ok := kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
framework.Logf("kubelet health check at %q value=%v", kubeletHealthCheckURL, ok)
|
||||
return ok
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
|
||||
|
||||
framework.Logf("Delete the pod while the kubelet is not running")
|
||||
// Delete pod sync by name will force delete the pod, removing it from kubelet's config
|
||||
@ -584,7 +584,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
|
||||
ok := kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
framework.Logf("kubelet health check at %q value=%v", kubeletHealthCheckURL, ok)
|
||||
return ok
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
|
||||
|
||||
framework.Logf("wait for the pod %v to disappear", pod.Name)
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
@ -714,11 +714,13 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
|
||||
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
ginkgo.By("Wait for node to be ready")
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
return nodes == 1
|
||||
}, time.Minute, time.Second).Should(gomega.BeTrue())
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
_, err := e2enode.TotalReady(ctx, f.ClientSet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, time.Minute, time.Second).Should(gomega.Equal(1), "one node should be ready")
|
||||
|
||||
// Before we run the device plugin test, we need to ensure
|
||||
// that the cluster is in a clean state and there are no
|
||||
@ -800,7 +802,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
node, ready := getLocalTestNode(ctx, f)
|
||||
return ready && CountSampleDeviceCapacity(node) > 0
|
||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected devices to be available on the local node"))
|
||||
framework.Logf("Successfully created device plugin pod")
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Waiting for the resource exported by the sample device plugin to become available on the local node (instances: %d)", expectedSampleDevsAmount))
|
||||
@ -809,7 +811,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
|
||||
return ready &&
|
||||
CountSampleDeviceCapacity(node) == expectedSampleDevsAmount &&
|
||||
CountSampleDeviceAllocatable(node) == expectedSampleDevsAmount
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrueBecause("expected resource to be available on local node"))
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
@ -835,7 +837,7 @@ func testDevicePluginNodeReboot(f *framework.Framework, pluginSockDir string) {
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
node, ready := getLocalTestNode(ctx, f)
|
||||
return ready && CountSampleDeviceCapacity(node) <= 0
|
||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected devices to be unavailable on local node"))
|
||||
|
||||
ginkgo.By("devices now unavailable on the local node")
|
||||
})
|
||||
@ -934,15 +936,15 @@ func ensurePodContainerRestart(ctx context.Context, f *framework.Framework, podN
|
||||
framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err)
|
||||
}
|
||||
initialCount = p.Status.ContainerStatuses[0].RestartCount
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
gomega.Eventually(ctx, func() int {
|
||||
p, err = e2epod.NewPodClient(f).Get(ctx, podName, metav1.GetOptions{})
|
||||
if err != nil || len(p.Status.ContainerStatuses) < 1 {
|
||||
return false
|
||||
return 0
|
||||
}
|
||||
currentCount = p.Status.ContainerStatuses[0].RestartCount
|
||||
framework.Logf("initial %v, current %v", initialCount, currentCount)
|
||||
return currentCount > initialCount
|
||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||
return int(currentCount)
|
||||
}, 5*time.Minute, framework.Poll).Should(gomega.BeNumerically(">", initialCount))
|
||||
}
|
||||
|
||||
// parseLog returns the matching string for the specified regular expression parsed from the container logs.
|
||||
|
@ -225,12 +225,12 @@ var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, "[No
|
||||
restartKubelet(true)
|
||||
|
||||
ginkgo.By("verifying that the hugepages-3Mi resource no longer is present")
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
gomega.Eventually(ctx, func() resource.Quantity {
|
||||
node, err = f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "while getting node status")
|
||||
_, isPresent := node.Status.Capacity["hugepages-3Mi"]
|
||||
return isPresent
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeFalse())
|
||||
// abc, error := node.Status.Capacity["hugepages-3Mi"]
|
||||
return node.Status.Capacity["hugepages-3Mi"]
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeNil())
|
||||
})
|
||||
|
||||
ginkgo.It("should add resources for new huge page sizes on kubelet restart", func(ctx context.Context) {
|
||||
@ -245,12 +245,11 @@ var _ = SIGDescribe("HugePages", framework.WithSerial(), feature.HugePages, "[No
|
||||
startKubelet()
|
||||
|
||||
ginkgo.By("verifying that the hugepages-2Mi resource is present")
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
gomega.Eventually(ctx, func() resource.Quantity {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "while getting node status")
|
||||
_, isPresent := node.Status.Capacity["hugepages-2Mi"]
|
||||
return isPresent
|
||||
}, 30*time.Second, framework.Poll).Should(gomega.BeTrue())
|
||||
return node.Status.Capacity["hugepages-2Mi"]
|
||||
}, 30*time.Second, framework.Poll).ShouldNot(gomega.BeNil())
|
||||
})
|
||||
|
||||
ginkgo.When("start the pod", func() {
|
||||
|
@ -59,7 +59,7 @@ var _ = SIGDescribe("Kubelet Config", framework.WithSlow(), framework.WithSerial
|
||||
// wait until the kubelet health check will fail
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
|
||||
|
||||
configDir := framework.TestContext.KubeletConfigDropinDir
|
||||
|
||||
@ -132,7 +132,7 @@ featureGates:
|
||||
// wait until the kubelet health check will succeed
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
|
||||
|
||||
mergedConfig, err := getCurrentKubeletConfig(ctx)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -43,7 +43,7 @@ var _ = SIGDescribe("Lock contention", framework.WithSlow(), framework.WithDisru
|
||||
|
||||
ginkgo.By("perform kubelet health check to check if kubelet is healthy and running.")
|
||||
// Precautionary check that kubelet is healthy before running the test.
|
||||
gomega.Expect(kubeletHealthCheck(kubeletHealthCheckURL)).To(gomega.BeTrue())
|
||||
gomega.Expect(kubeletHealthCheck(kubeletHealthCheckURL)).To(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
|
||||
|
||||
ginkgo.By("acquiring the lock on lock file i.e /var/run/kubelet.lock")
|
||||
// Open the file with the intention to acquire the lock, this would imitate the behaviour
|
||||
@ -71,6 +71,6 @@ var _ = SIGDescribe("Lock contention", framework.WithSlow(), framework.WithDisru
|
||||
// It should not be as the lock contention forces the kubelet to stop.
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, 10*time.Second, time.Second).Should(gomega.BeFalse())
|
||||
}, 10*time.Second, time.Second).Should(gomega.BeFalseBecause("expected kubelet to not be in healthy state"))
|
||||
})
|
||||
})
|
||||
|
@ -543,7 +543,7 @@ var _ = SIGDescribe("Memory Manager", framework.WithDisruptive(), framework.With
|
||||
for _, containerMemory := range containerResource.Memory {
|
||||
q := c.Resources.Limits[v1.ResourceName(containerMemory.MemoryType)]
|
||||
value, ok := q.AsInt64()
|
||||
gomega.Expect(ok).To(gomega.BeTrue())
|
||||
gomega.Expect(ok).To(gomega.BeTrueBecause("cannot convert value to integer"))
|
||||
gomega.Expect(value).To(gomega.BeEquivalentTo(containerMemory.Size_))
|
||||
}
|
||||
}
|
||||
@ -626,9 +626,9 @@ var _ = SIGDescribe("Memory Manager", framework.WithDisruptive(), framework.With
|
||||
|
||||
return true
|
||||
}, time.Minute, 5*time.Second).Should(
|
||||
gomega.BeTrue(),
|
||||
"the pod succeeded to start, when it should fail with the admission error",
|
||||
)
|
||||
gomega.BeTrueBecause(
|
||||
"the pod succeeded to start, when it should fail with the admission error",
|
||||
))
|
||||
})
|
||||
|
||||
ginkgo.JustAfterEach(func(ctx context.Context) {
|
||||
|
@ -243,7 +243,7 @@ var _ = SIGDescribe("MirrorPod", func() {
|
||||
// Wait 5 mins for syncTerminatedPod to fail. We expect that the pod volume should not be cleaned up because the NFS server is down.
|
||||
gomega.Consistently(func() bool {
|
||||
return podVolumeDirectoryExists(types.UID(hash))
|
||||
}, 5*time.Minute, 10*time.Second).Should(gomega.BeTrue(), "pod volume should exist while nfs server is stopped")
|
||||
}, 5*time.Minute, 10*time.Second).Should(gomega.BeTrueBecause("pod volume should exist while nfs server is stopped"))
|
||||
|
||||
ginkgo.By("Start the NFS server")
|
||||
restartNfsServer(f, nfsServerPod)
|
||||
@ -251,7 +251,7 @@ var _ = SIGDescribe("MirrorPod", func() {
|
||||
ginkgo.By("Waiting for the pod volume to deleted after the NFS server is started")
|
||||
gomega.Eventually(func() bool {
|
||||
return podVolumeDirectoryExists(types.UID(hash))
|
||||
}, 5*time.Minute, 10*time.Second).Should(gomega.BeFalse(), "pod volume should be deleted after nfs server is started")
|
||||
}, 5*time.Minute, 10*time.Second).Should(gomega.BeFalseBecause("pod volume should be deleted after nfs server is started"))
|
||||
|
||||
// Create the static pod again with the same config and expect it to start running
|
||||
err = createStaticPodUsingNfs(nfsServerHost, node, "sleep 999999", podPath, staticPodName, ns)
|
||||
|
@ -190,7 +190,7 @@ func runTest(ctx context.Context, f *framework.Framework) error {
|
||||
// wait until the kubelet health check will fail
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||
}, time.Minute, time.Second).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
|
||||
|
||||
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(oldCfg))
|
||||
|
||||
@ -200,7 +200,7 @@ func runTest(ctx context.Context, f *framework.Framework) error {
|
||||
// wait until the kubelet health check will succeed
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
|
||||
}
|
||||
})
|
||||
if err := createTemporaryCgroupsForReservation(cgroupManager); err != nil {
|
||||
@ -218,7 +218,7 @@ func runTest(ctx context.Context, f *framework.Framework) error {
|
||||
// wait until the kubelet health check will fail
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||
}, time.Minute, time.Second).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
|
||||
|
||||
expectedNAPodCgroup := cm.NewCgroupName(cm.RootCgroupName, nodeAllocatableCgroup)
|
||||
|
||||
@ -240,7 +240,7 @@ func runTest(ctx context.Context, f *framework.Framework) error {
|
||||
// wait until the kubelet health check will succeed
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -57,7 +57,7 @@ func setKubeletConfig(ctx context.Context, f *framework.Framework, cfg *kubeletc
|
||||
// wait until the kubelet health check will fail
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||
}, time.Minute, time.Second).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
|
||||
|
||||
framework.ExpectNoError(e2enodekubelet.WriteKubeletConfigFile(cfg))
|
||||
|
||||
@ -67,7 +67,7 @@ func setKubeletConfig(ctx context.Context, f *framework.Framework, cfg *kubeletc
|
||||
// wait until the kubelet health check will succeed
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
|
||||
}
|
||||
|
||||
// Wait for the Kubelet to be ready.
|
||||
@ -75,7 +75,7 @@ func setKubeletConfig(ctx context.Context, f *framework.Framework, cfg *kubeletc
|
||||
nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
return nodes == 1
|
||||
}, time.Minute, time.Second).Should(gomega.BeTrue())
|
||||
}, time.Minute, time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
|
||||
}
|
||||
|
||||
// Serial because the test updates kubelet configuration.
|
||||
|
@ -126,7 +126,7 @@ func runPodFailingConditionsTest(f *framework.Framework, hasInitContainers, chec
|
||||
// Verify PodInitialized is set if init containers are not present (since without init containers, it gets set very early)
|
||||
initializedTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.PodInitialized, true)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(initializedTime.Before(scheduledTime)).NotTo(gomega.BeTrue(), fmt.Sprintf("pod without init containers is initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime))
|
||||
gomega.Expect(initializedTime.Before(scheduledTime)).NotTo(gomega.BeTrueBecause("pod without init containers is initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime))
|
||||
}
|
||||
|
||||
// Verify ContainersReady is not set (since sandboxcreation is blocked)
|
||||
@ -228,28 +228,28 @@ func runPodReadyConditionsTest(f *framework.Framework, hasInitContainers, checkP
|
||||
|
||||
if hasInitContainers {
|
||||
// With init containers, verify the sequence of conditions is: Scheduled => PodReadyToStartContainers => Initialized
|
||||
gomega.Expect(readyToStartContainersTime.Before(scheduledTime)).ToNot(gomega.BeTrue(), fmt.Sprintf("pod with init containers is initialized at: %v which is before pod has ready to start at: %v", initializedTime, readyToStartContainersTime))
|
||||
gomega.Expect(initializedTime.Before(readyToStartContainersTime)).ToNot(gomega.BeTrue(), fmt.Sprintf("pod with init containers is initialized at: %v which is before pod has ready to start at: %v", initializedTime, readyToStartContainersTime))
|
||||
gomega.Expect(readyToStartContainersTime.Before(scheduledTime)).ToNot(gomega.BeTrueBecause("pod with init containers is initialized at: %v which is before pod has ready to start at: %v", initializedTime, readyToStartContainersTime))
|
||||
gomega.Expect(initializedTime.Before(readyToStartContainersTime)).ToNot(gomega.BeTrueBecause("pod with init containers is initialized at: %v which is before pod has ready to start at: %v", initializedTime, readyToStartContainersTime))
|
||||
} else {
|
||||
// Without init containers, verify the sequence of conditions is: Scheduled => Initialized => PodReadyToStartContainers
|
||||
condBeforeContainersReadyTransitionTime = readyToStartContainersTime
|
||||
errSubstrIfContainersReadyTooEarly = "ready to start"
|
||||
gomega.Expect(initializedTime.Before(scheduledTime)).NotTo(gomega.BeTrue(), fmt.Sprintf("pod without init containers initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime))
|
||||
gomega.Expect(readyToStartContainersTime.Before(initializedTime)).NotTo(gomega.BeTrue(), fmt.Sprintf("pod without init containers has ready to start at: %v which is before pod is initialized at: %v", readyToStartContainersTime, initializedTime))
|
||||
gomega.Expect(initializedTime.Before(scheduledTime)).NotTo(gomega.BeTrueBecause("pod without init containers initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime))
|
||||
gomega.Expect(readyToStartContainersTime.Before(initializedTime)).NotTo(gomega.BeTrueBecause("pod without init containers has ready to start at: %v which is before pod is initialized at: %v", readyToStartContainersTime, initializedTime))
|
||||
}
|
||||
} else {
|
||||
// In the absence of PodHasReadyToStartContainers feature disabled, verify the sequence is: Scheduled => Initialized
|
||||
gomega.Expect(initializedTime.Before(scheduledTime)).NotTo(gomega.BeTrue(), fmt.Sprintf("pod initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime))
|
||||
gomega.Expect(initializedTime.Before(scheduledTime)).NotTo(gomega.BeTrueBecause("pod initialized at: %v which is before pod scheduled at: %v", initializedTime, scheduledTime))
|
||||
}
|
||||
// Verify the next condition to get set is ContainersReady
|
||||
containersReadyTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.ContainersReady, true)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(containersReadyTime.Before(condBeforeContainersReadyTransitionTime)).NotTo(gomega.BeTrue(), fmt.Sprintf("containers ready at: %v which is before pod %s: %v", containersReadyTime, errSubstrIfContainersReadyTooEarly, initializedTime))
|
||||
gomega.Expect(containersReadyTime.Before(condBeforeContainersReadyTransitionTime)).NotTo(gomega.BeTrueBecause("containers ready at: %v which is before pod %s: %v", containersReadyTime, errSubstrIfContainersReadyTooEarly, initializedTime))
|
||||
|
||||
// Verify ContainersReady => PodReady
|
||||
podReadyTime, err := getTransitionTimeForPodConditionWithStatus(p, v1.PodReady, true)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(podReadyTime.Before(containersReadyTime)).NotTo(gomega.BeTrue(), fmt.Sprintf("pod ready at: %v which is before pod containers ready at: %v", podReadyTime, containersReadyTime))
|
||||
gomega.Expect(podReadyTime.Before(containersReadyTime)).NotTo(gomega.BeTrueBecause("pod ready at: %v which is before pod containers ready at: %v", podReadyTime, containersReadyTime))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -740,7 +740,7 @@ func podresourcesGetAllocatableResourcesTests(ctx context.Context, cli kubeletpo
|
||||
expectedCPUs := onlineCPUs.Difference(reservedSystemCPUs)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("expecting CPUs '%v'='%v'", allocatableCPUs, expectedCPUs))
|
||||
gomega.ExpectWithOffset(1, allocatableCPUs.Equals(expectedCPUs)).To(gomega.BeTrue(), "mismatch expecting CPUs")
|
||||
gomega.ExpectWithOffset(1, allocatableCPUs.Equals(expectedCPUs)).To(gomega.BeTrueBecause("mismatch expecting CPUs"))
|
||||
}
|
||||
|
||||
if sd == nil { // no devices in the environment, so expect no devices
|
||||
@ -1326,7 +1326,7 @@ func waitForTopologyUnawareResources(ctx context.Context, f *framework.Framework
|
||||
node := getLocalNode(ctx, f)
|
||||
resourceAmount := CountSampleDeviceAllocatable(node)
|
||||
return resourceAmount > 0
|
||||
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected %q resources to be available, got no resources", defaultTopologyUnawareResourceName))
|
||||
}
|
||||
|
||||
func getPodResourcesMetrics(ctx context.Context) (e2emetrics.KubeletMetrics, error) {
|
||||
|
@ -313,7 +313,7 @@ var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), fra
|
||||
// wait until the kubelet health check will fail
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
|
||||
|
||||
ginkgo.By("Starting the kubelet")
|
||||
startKubelet()
|
||||
@ -321,14 +321,14 @@ var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), fra
|
||||
// wait until the kubelet health check will succeed
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
|
||||
|
||||
// Wait for the Kubelet to be ready.
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
return nodes == 1
|
||||
}, time.Minute, f.Timeouts.Poll).Should(gomega.BeTrue())
|
||||
}, time.Minute, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("After the kubelet is restarted, verify the pod (%s/%s) is deleted by kubelet", pod.Namespace, pod.Name))
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
@ -366,7 +366,7 @@ var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), fra
|
||||
// wait until the kubelet health check will fail
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
|
||||
|
||||
// Create the pod bound to the node. It will remain in the Pending
|
||||
// phase as Kubelet is down.
|
||||
@ -384,14 +384,14 @@ var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), fra
|
||||
// wait until the kubelet health check will succeed
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
|
||||
|
||||
// Wait for the Kubelet to be ready.
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
return nodes == 1
|
||||
}, time.Minute, f.Timeouts.Poll).Should(gomega.BeTrue())
|
||||
}, time.Minute, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("After the kubelet is restarted, verify the pod (%v/%v) is deleted by kubelet", pod.Namespace, pod.Name))
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
@ -444,7 +444,7 @@ var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), fra
|
||||
// wait until the kubelet health check will fail
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting the pod (%v/%v) to set a deletion timestamp", pod.Namespace, pod.Name))
|
||||
err = e2epod.NewPodClient(f).Delete(ctx, pod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gracePeriod})
|
||||
@ -460,14 +460,14 @@ var _ = SIGDescribe("Restart", framework.WithSerial(), framework.WithSlow(), fra
|
||||
// wait until the kubelet health check will succeed
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
|
||||
|
||||
// Wait for the Kubelet to be ready.
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
return nodes == 1
|
||||
}, time.Minute, f.Timeouts.Poll).Should(gomega.BeTrue())
|
||||
}, time.Minute, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Once Kubelet is restarted, verify the pod (%v/%v) is deleted by kubelet", pod.Namespace, pod.Name))
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
|
@ -381,7 +381,7 @@ func runPodAndWaitUntilScheduled(f *framework.Framework, pod *v1.Pod) *v1.Pod {
|
||||
|
||||
isReady, err := testutils.PodRunningReady(pod)
|
||||
framework.ExpectNoError(err)
|
||||
gomega.ExpectWithOffset(1, isReady).To(gomega.BeTrueBecause("pod should be ready"))
|
||||
gomega.ExpectWithOffset(1, isReady).To(gomega.BeTrueBecause("pod %+v was expected to be ready", pod))
|
||||
|
||||
return pod
|
||||
}
|
||||
|
@ -78,7 +78,7 @@ var _ = SIGDescribe("Terminate Pods", func() {
|
||||
}
|
||||
}
|
||||
return false
|
||||
}, 20*time.Second, 1*time.Second).Should(gomega.BeTrue())
|
||||
}, 20*time.Second, 1*time.Second).Should(gomega.BeTrueBecause("expected container to be ready"))
|
||||
|
||||
err := client.Delete(context.Background(), pod.Name, metav1.DeleteOptions{})
|
||||
|
||||
@ -87,7 +87,7 @@ var _ = SIGDescribe("Terminate Pods", func() {
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
_, err := client.Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
return apierrors.IsNotFound(err)
|
||||
}, 10*time.Second, time.Second).Should(gomega.BeTrue())
|
||||
}, 10*time.Second, time.Second).Should(gomega.BeTrueBecause("expected pod to disappear from API server within 10 seconds"))
|
||||
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
@ -536,7 +536,7 @@ func createSRIOVPodOrFail(ctx context.Context, f *framework.Framework) *v1.Pod {
|
||||
return dpPod
|
||||
}
|
||||
|
||||
// waitForSRIOVResources waits until enough SRIOV resources are avaailable, expecting to complete within the timeout.
|
||||
// waitForSRIOVResources waits until enough SRIOV resources are available, expecting to complete within the timeout.
|
||||
// if exits successfully, updates the sriovData with the resources which were found.
|
||||
func waitForSRIOVResources(ctx context.Context, f *framework.Framework, sd *sriovData) {
|
||||
sriovResourceName := ""
|
||||
@ -546,7 +546,7 @@ func waitForSRIOVResources(ctx context.Context, f *framework.Framework, sd *srio
|
||||
node := getLocalNode(ctx, f)
|
||||
sriovResourceName, sriovResourceAmount = findSRIOVResource(node)
|
||||
return sriovResourceAmount > minSriovResource
|
||||
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrue())
|
||||
}, 2*time.Minute, framework.Poll).Should(gomega.BeTrueBecause("expected SRIOV resources to be available within the timout"))
|
||||
|
||||
sd.resourceName = sriovResourceName
|
||||
sd.resourceAmount = sriovResourceAmount
|
||||
|
@ -77,7 +77,7 @@ var _ = SIGDescribe("Unknown Pods", framework.WithSerial(), framework.WithDisrup
|
||||
// wait until the kubelet health check will fail
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
|
||||
|
||||
framework.Logf("Delete the static pod manifest while the kubelet is not running")
|
||||
file := staticPodPath(podPath, staticPodName, ns)
|
||||
@ -91,7 +91,7 @@ var _ = SIGDescribe("Unknown Pods", framework.WithSerial(), framework.WithDisrup
|
||||
// wait until the kubelet health check will succeed
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
|
||||
|
||||
framework.Logf("wait for the mirror pod %v to disappear", mirrorPodName)
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
@ -148,7 +148,7 @@ var _ = SIGDescribe("Unknown Pods", framework.WithSerial(), framework.WithDisrup
|
||||
// wait until the kubelet health check will fail
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalse())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
|
||||
|
||||
framework.Logf("Delete the pod while the kubelet is not running")
|
||||
// Delete pod sync by name will force delete the pod, removing it from kubelet's config
|
||||
@ -160,7 +160,7 @@ var _ = SIGDescribe("Unknown Pods", framework.WithSerial(), framework.WithDisrup
|
||||
// wait until the kubelet health check will succeed
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrue())
|
||||
}, f.Timeouts.PodStart, f.Timeouts.Poll).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
|
||||
|
||||
framework.Logf("wait for the pod %v to disappear", podName)
|
||||
gomega.Eventually(ctx, func(ctx context.Context) error {
|
||||
|
@ -221,7 +221,7 @@ func updateKubeletConfig(ctx context.Context, f *framework.Framework, kubeletCon
|
||||
// wait until the kubelet health check will fail
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, time.Minute, time.Second).Should(gomega.BeFalse())
|
||||
}, time.Minute, time.Second).Should(gomega.BeFalseBecause("expected kubelet health check to be failed"))
|
||||
|
||||
// Delete CPU and memory manager state files to be sure it will not prevent the kubelet restart
|
||||
if deleteStateFiles {
|
||||
@ -240,14 +240,14 @@ func waitForKubeletToStart(ctx context.Context, f *framework.Framework) {
|
||||
// wait until the kubelet health check will succeed
|
||||
gomega.Eventually(ctx, func() bool {
|
||||
return kubeletHealthCheck(kubeletHealthCheckURL)
|
||||
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrue())
|
||||
}, 2*time.Minute, 5*time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in healthy state"))
|
||||
|
||||
// Wait for the Kubelet to be ready.
|
||||
gomega.Eventually(ctx, func(ctx context.Context) bool {
|
||||
nodes, err := e2enode.TotalReady(ctx, f.ClientSet)
|
||||
framework.ExpectNoError(err)
|
||||
return nodes == 1
|
||||
}, time.Minute, time.Second).Should(gomega.BeTrue())
|
||||
}, time.Minute, time.Second).Should(gomega.BeTrueBecause("expected kubelet to be in ready state"))
|
||||
}
|
||||
|
||||
func deleteStateFile(stateFileName string) {
|
||||
|
Loading…
Reference in New Issue
Block a user