Merge pull request #114469 from pohly/e2e-ginkgo-timeouts-cleanups

e2e ginkgo timeouts: cleanup commits
This commit is contained in:
Kubernetes Prow Robot 2022-12-14 01:01:34 -08:00 committed by GitHub
commit a35650b833
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 31 additions and 26 deletions

View File

@ -162,7 +162,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
found, unchanged := 0, 0
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
err := wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
if len(secrets.Items) == found {
@ -174,6 +174,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
found = len(secrets.Items)
return false, nil
})
framework.ExpectNoError(err)
defaultSecrets := fmt.Sprintf("%d", found)
hardSecrets := fmt.Sprintf("%d", found+1)
@ -327,7 +328,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
found, unchanged := 0, 0
// On contended servers the service account controller can slow down, leading to the count changing during a run.
// Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
wait.Poll(1*time.Second, time.Minute, func() (bool, error) {
err := wait.Poll(1*time.Second, time.Minute, func() (bool, error) {
configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
if len(configmaps.Items) == found {
@ -339,6 +340,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
found = len(configmaps.Items)
return false, nil
})
framework.ExpectNoError(err)
defaultConfigMaps := fmt.Sprintf("%d", found)
hardConfigMaps := fmt.Sprintf("%d", found+1)

View File

@ -539,8 +539,10 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
framework.Failf("Failed to create client pod: %v", err)
}
defer func() {
// testVolumeClient might get used more than once per test, therefore
// we have to clean up before returning.
e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name)
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
framework.ExpectNoError(e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete))
}()
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
@ -572,8 +574,10 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
return
}
defer func() {
// This pod must get deleted before the function returns becaue the test relies on
// the volume not being in use.
e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name)
e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
framework.ExpectNoError(e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete))
}()
ginkgo.By("Writing text file contents in the container.")

View File

@ -398,7 +398,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
return fmt.Errorf("received the wrong port: %d", p)
}
return nil
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
}, time.Minute, 10*time.Second).Should(gomega.Succeed())
gomega.Eventually(func() error {
channel, msg, err := wsRead(ws)
@ -412,7 +412,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
return fmt.Errorf("received the wrong port: %d", p)
}
return nil
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
}, time.Minute, 10*time.Second).Should(gomega.Succeed())
ginkgo.By("Sending the expected data to the local port")
err = wsWrite(ws, 0, []byte("def"))
@ -436,7 +436,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
return fmt.Errorf("expected %q from server, got %q", expectedData, buf.Bytes())
}
return nil
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
}, time.Minute, 10*time.Second).Should(gomega.Succeed())
ginkgo.By("Verifying logs")
gomega.Eventually(func() (string, error) {

View File

@ -102,7 +102,6 @@ type testParameters struct {
type mockDriverSetup struct {
cs clientset.Interface
config *storageframework.PerTestConfig
testCleanups []func()
pods []*v1.Pod
pvcs []*v1.PersistentVolumeClaim
sc map[string]*storagev1.StorageClass
@ -236,10 +235,6 @@ func (m *mockDriverSetup) cleanup() {
ginkgo.By(fmt.Sprintf("Deleting volumesnapshotclass %s", vsc.GetName()))
m.config.Framework.DynamicClient.Resource(utils.SnapshotClassGVR).Delete(context.TODO(), vsc.GetName(), metav1.DeleteOptions{})
}
ginkgo.By("Cleaning up resources")
for _, cleanupFunc := range m.testCleanups {
cleanupFunc()
}
err := utilerrors.NewAggregate(errs)
framework.ExpectNoError(err, "while cleaning up after test")

View File

@ -470,7 +470,8 @@ func bindRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rb
Name: rbacRoleName,
},
}
f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding, metav1.CreateOptions{})
_, err := f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding, metav1.CreateOptions{})
framework.ExpectNoError(err)
}
func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rbacRoleName string) {
@ -492,7 +493,8 @@ func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountN
Name: rbacRoleName,
},
}
f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), binding, metav1.CreateOptions{})
_, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), binding, metav1.CreateOptions{})
framework.ExpectNoError(err)
}
// createPodWithGmsa creates a pod using the test GMSA cred spec, and returns its name.

View File

@ -58,7 +58,8 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats [Serial]", func() {
ginkgo.By("Waiting up to 3 minutes for pods to be running")
timeout := 3 * time.Minute
e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string))
err = e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string))
framework.ExpectNoError(err)
ginkgo.By("Getting kubelet stats 5 times and checking average duration")
iterations := 5
@ -148,7 +149,8 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() {
ginkgo.By("Waiting up to 3 minutes for pods to be running")
timeout := 3 * time.Minute
e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string))
err = e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string))
framework.ExpectNoError(err)
ginkgo.By("Getting kubelet stats 1 time")
iterations := 1