mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
Merge pull request #114469 from pohly/e2e-ginkgo-timeouts-cleanups
e2e ginkgo timeouts: cleanup commits
This commit is contained in:
commit
a35650b833
@ -162,7 +162,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
|||||||
found, unchanged := 0, 0
|
found, unchanged := 0, 0
|
||||||
// On contended servers the service account controller can slow down, leading to the count changing during a run.
|
// On contended servers the service account controller can slow down, leading to the count changing during a run.
|
||||||
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
|
// Wait up to 5s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
|
||||||
wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
|
err := wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
|
||||||
secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
secrets, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if len(secrets.Items) == found {
|
if len(secrets.Items) == found {
|
||||||
@ -174,6 +174,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
|||||||
found = len(secrets.Items)
|
found = len(secrets.Items)
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
defaultSecrets := fmt.Sprintf("%d", found)
|
defaultSecrets := fmt.Sprintf("%d", found)
|
||||||
hardSecrets := fmt.Sprintf("%d", found+1)
|
hardSecrets := fmt.Sprintf("%d", found+1)
|
||||||
|
|
||||||
@ -327,7 +328,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
|||||||
found, unchanged := 0, 0
|
found, unchanged := 0, 0
|
||||||
// On contended servers the service account controller can slow down, leading to the count changing during a run.
|
// On contended servers the service account controller can slow down, leading to the count changing during a run.
|
||||||
// Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
|
// Wait up to 15s for the count to stabilize, assuming that updates come at a consistent rate, and are not held indefinitely.
|
||||||
wait.Poll(1*time.Second, time.Minute, func() (bool, error) {
|
err := wait.Poll(1*time.Second, time.Minute, func() (bool, error) {
|
||||||
configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
configmaps, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
if len(configmaps.Items) == found {
|
if len(configmaps.Items) == found {
|
||||||
@ -339,6 +340,7 @@ var _ = SIGDescribe("ResourceQuota", func() {
|
|||||||
found = len(configmaps.Items)
|
found = len(configmaps.Items)
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
defaultConfigMaps := fmt.Sprintf("%d", found)
|
defaultConfigMaps := fmt.Sprintf("%d", found)
|
||||||
hardConfigMaps := fmt.Sprintf("%d", found+1)
|
hardConfigMaps := fmt.Sprintf("%d", found+1)
|
||||||
|
|
||||||
|
@ -539,8 +539,10 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
|
|||||||
framework.Failf("Failed to create client pod: %v", err)
|
framework.Failf("Failed to create client pod: %v", err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
|
// testVolumeClient might get used more than once per test, therefore
|
||||||
|
// we have to clean up before returning.
|
||||||
e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name)
|
e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name)
|
||||||
e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
|
framework.ExpectNoError(e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
|
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
|
||||||
@ -572,8 +574,10 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
|
// This pod must get deleted before the function returns becaue the test relies on
|
||||||
|
// the volume not being in use.
|
||||||
e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name)
|
e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name)
|
||||||
e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
|
framework.ExpectNoError(e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ginkgo.By("Writing text file contents in the container.")
|
ginkgo.By("Writing text file contents in the container.")
|
||||||
|
@ -398,7 +398,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
|||||||
return fmt.Errorf("received the wrong port: %d", p)
|
return fmt.Errorf("received the wrong port: %d", p)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
|
}, time.Minute, 10*time.Second).Should(gomega.Succeed())
|
||||||
|
|
||||||
gomega.Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
channel, msg, err := wsRead(ws)
|
channel, msg, err := wsRead(ws)
|
||||||
@ -412,7 +412,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
|||||||
return fmt.Errorf("received the wrong port: %d", p)
|
return fmt.Errorf("received the wrong port: %d", p)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
|
}, time.Minute, 10*time.Second).Should(gomega.Succeed())
|
||||||
|
|
||||||
ginkgo.By("Sending the expected data to the local port")
|
ginkgo.By("Sending the expected data to the local port")
|
||||||
err = wsWrite(ws, 0, []byte("def"))
|
err = wsWrite(ws, 0, []byte("def"))
|
||||||
@ -436,7 +436,7 @@ func doTestOverWebSockets(bindAddress string, f *framework.Framework) {
|
|||||||
return fmt.Errorf("expected %q from server, got %q", expectedData, buf.Bytes())
|
return fmt.Errorf("expected %q from server, got %q", expectedData, buf.Bytes())
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, time.Minute, 10*time.Second).Should(gomega.BeNil())
|
}, time.Minute, 10*time.Second).Should(gomega.Succeed())
|
||||||
|
|
||||||
ginkgo.By("Verifying logs")
|
ginkgo.By("Verifying logs")
|
||||||
gomega.Eventually(func() (string, error) {
|
gomega.Eventually(func() (string, error) {
|
||||||
|
@ -100,17 +100,16 @@ type testParameters struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type mockDriverSetup struct {
|
type mockDriverSetup struct {
|
||||||
cs clientset.Interface
|
cs clientset.Interface
|
||||||
config *storageframework.PerTestConfig
|
config *storageframework.PerTestConfig
|
||||||
testCleanups []func()
|
pods []*v1.Pod
|
||||||
pods []*v1.Pod
|
pvcs []*v1.PersistentVolumeClaim
|
||||||
pvcs []*v1.PersistentVolumeClaim
|
sc map[string]*storagev1.StorageClass
|
||||||
sc map[string]*storagev1.StorageClass
|
vsc map[string]*unstructured.Unstructured
|
||||||
vsc map[string]*unstructured.Unstructured
|
driver drivers.MockCSITestDriver
|
||||||
driver drivers.MockCSITestDriver
|
provisioner string
|
||||||
provisioner string
|
tp testParameters
|
||||||
tp testParameters
|
f *framework.Framework
|
||||||
f *framework.Framework
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type volumeType string
|
type volumeType string
|
||||||
@ -236,10 +235,6 @@ func (m *mockDriverSetup) cleanup() {
|
|||||||
ginkgo.By(fmt.Sprintf("Deleting volumesnapshotclass %s", vsc.GetName()))
|
ginkgo.By(fmt.Sprintf("Deleting volumesnapshotclass %s", vsc.GetName()))
|
||||||
m.config.Framework.DynamicClient.Resource(utils.SnapshotClassGVR).Delete(context.TODO(), vsc.GetName(), metav1.DeleteOptions{})
|
m.config.Framework.DynamicClient.Resource(utils.SnapshotClassGVR).Delete(context.TODO(), vsc.GetName(), metav1.DeleteOptions{})
|
||||||
}
|
}
|
||||||
ginkgo.By("Cleaning up resources")
|
|
||||||
for _, cleanupFunc := range m.testCleanups {
|
|
||||||
cleanupFunc()
|
|
||||||
}
|
|
||||||
|
|
||||||
err := utilerrors.NewAggregate(errs)
|
err := utilerrors.NewAggregate(errs)
|
||||||
framework.ExpectNoError(err, "while cleaning up after test")
|
framework.ExpectNoError(err, "while cleaning up after test")
|
||||||
|
@ -470,7 +470,8 @@ func bindRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rb
|
|||||||
Name: rbacRoleName,
|
Name: rbacRoleName,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding, metav1.CreateOptions{})
|
_, err := f.ClientSet.RbacV1().RoleBindings(f.Namespace.Name).Create(context.TODO(), binding, metav1.CreateOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rbacRoleName string) {
|
func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountName, rbacRoleName string) {
|
||||||
@ -492,7 +493,8 @@ func bindClusterRBACRoleToServiceAccount(f *framework.Framework, serviceAccountN
|
|||||||
Name: rbacRoleName,
|
Name: rbacRoleName,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), binding, metav1.CreateOptions{})
|
_, err := f.ClientSet.RbacV1().ClusterRoleBindings().Create(context.TODO(), binding, metav1.CreateOptions{})
|
||||||
|
framework.ExpectNoError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// createPodWithGmsa creates a pod using the test GMSA cred spec, and returns its name.
|
// createPodWithGmsa creates a pod using the test GMSA cred spec, and returns its name.
|
||||||
|
@ -58,7 +58,8 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats [Serial]", func() {
|
|||||||
|
|
||||||
ginkgo.By("Waiting up to 3 minutes for pods to be running")
|
ginkgo.By("Waiting up to 3 minutes for pods to be running")
|
||||||
timeout := 3 * time.Minute
|
timeout := 3 * time.Minute
|
||||||
e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string))
|
err = e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 10, 0, timeout, make(map[string]string))
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Getting kubelet stats 5 times and checking average duration")
|
ginkgo.By("Getting kubelet stats 5 times and checking average duration")
|
||||||
iterations := 5
|
iterations := 5
|
||||||
@ -148,7 +149,8 @@ var _ = SIGDescribe("[Feature:Windows] Kubelet-Stats", func() {
|
|||||||
|
|
||||||
ginkgo.By("Waiting up to 3 minutes for pods to be running")
|
ginkgo.By("Waiting up to 3 minutes for pods to be running")
|
||||||
timeout := 3 * time.Minute
|
timeout := 3 * time.Minute
|
||||||
e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string))
|
err = e2epod.WaitForPodsRunningReady(f.ClientSet, f.Namespace.Name, 3, 0, timeout, make(map[string]string))
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Getting kubelet stats 1 time")
|
ginkgo.By("Getting kubelet stats 1 time")
|
||||||
iterations := 1
|
iterations := 1
|
||||||
|
Loading…
Reference in New Issue
Block a user