e2e: use Ginkgo context

All code must use the context from Ginkgo when doing API calls or polling for a
change, otherwise the code would not return immediately when the test gets
aborted.
This commit is contained in:
Patrick Ohly
2022-12-12 10:11:10 +01:00
parent bf1d1dfd0f
commit 2f6c4f5eab
418 changed files with 11489 additions and 11369 deletions

View File

@@ -149,7 +149,7 @@ type Test struct {
}
// NewNFSServer is a NFS-specific wrapper for CreateStorageServer.
func NewNFSServer(cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, host string) {
func NewNFSServer(ctx context.Context, cs clientset.Interface, namespace string, args []string) (config TestConfig, pod *v1.Pod, host string) {
config = TestConfig{
Namespace: namespace,
Prefix: "nfs",
@@ -161,7 +161,7 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf
if len(args) > 0 {
config.ServerArgs = args
}
pod, host = CreateStorageServer(cs, config)
pod, host = CreateStorageServer(ctx, cs, config)
if strings.Contains(host, ":") {
host = "[" + host + "]"
}
@@ -171,8 +171,8 @@ func NewNFSServer(cs clientset.Interface, namespace string, args []string) (conf
// CreateStorageServer is a wrapper for startVolumeServer(). A storage server config is passed in, and a pod pointer
// and ip address string are returned.
// Note: Expect() is called so no error is returned.
func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) {
pod = startVolumeServer(cs, config)
func CreateStorageServer(ctx context.Context, cs clientset.Interface, config TestConfig) (pod *v1.Pod, ip string) {
pod = startVolumeServer(ctx, cs, config)
gomega.Expect(pod).NotTo(gomega.BeNil(), "storage server pod should not be nil")
ip = pod.Status.PodIP
gomega.Expect(len(ip)).NotTo(gomega.BeZero(), fmt.Sprintf("pod %s's IP should not be empty", pod.Name))
@@ -182,7 +182,7 @@ func CreateStorageServer(cs clientset.Interface, config TestConfig) (pod *v1.Pod
// GetVolumeAttachmentName returns the hash value of the provisioner, the config ClientNodeSelection name,
// and the VolumeAttachment name of the PV that is bound to the PVC with the passed in claimName and claimNamespace.
func GetVolumeAttachmentName(cs clientset.Interface, config TestConfig, provisioner string, claimName string, claimNamespace string) string {
func GetVolumeAttachmentName(ctx context.Context, cs clientset.Interface, config TestConfig, provisioner string, claimName string, claimNamespace string) string {
var nodeName string
// For provisioning tests, ClientNodeSelection is not set so we do not know the NodeName of the VolumeAttachment of the PV that is
// bound to the PVC with the passed in claimName and claimNamespace. We need this NodeName because it is used to generate the
@@ -190,9 +190,9 @@ func GetVolumeAttachmentName(cs clientset.Interface, config TestConfig, provisio
// To get the nodeName of the VolumeAttachment, we get all the VolumeAttachments, look for the VolumeAttachment with a
// PersistentVolumeName equal to the PV that is bound to the passed in PVC, and then we get the NodeName from that VolumeAttachment.
if config.ClientNodeSelection.Name == "" {
claim, _ := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(context.TODO(), claimName, metav1.GetOptions{})
claim, _ := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(ctx, claimName, metav1.GetOptions{})
pvName := claim.Spec.VolumeName
volumeAttachments, _ := cs.StorageV1().VolumeAttachments().List(context.TODO(), metav1.ListOptions{})
volumeAttachments, _ := cs.StorageV1().VolumeAttachments().List(ctx, metav1.ListOptions{})
for _, volumeAttachment := range volumeAttachments.Items {
if *volumeAttachment.Spec.Source.PersistentVolumeName == pvName {
nodeName = volumeAttachment.Spec.NodeName
@@ -202,21 +202,21 @@ func GetVolumeAttachmentName(cs clientset.Interface, config TestConfig, provisio
} else {
nodeName = config.ClientNodeSelection.Name
}
handle := getVolumeHandle(cs, claimName, claimNamespace)
handle := getVolumeHandle(ctx, cs, claimName, claimNamespace)
attachmentHash := sha256.Sum256([]byte(fmt.Sprintf("%s%s%s", handle, provisioner, nodeName)))
return fmt.Sprintf("csi-%x", attachmentHash)
}
// getVolumeHandle returns the VolumeHandle of the PV that is bound to the PVC with the passed in claimName and claimNamespace.
func getVolumeHandle(cs clientset.Interface, claimName string, claimNamespace string) string {
func getVolumeHandle(ctx context.Context, cs clientset.Interface, claimName string, claimNamespace string) string {
// re-get the claim to the latest state with bound volume
claim, err := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(context.TODO(), claimName, metav1.GetOptions{})
claim, err := cs.CoreV1().PersistentVolumeClaims(claimNamespace).Get(ctx, claimName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PVC")
return ""
}
pvName := claim.Spec.VolumeName
pv, err := cs.CoreV1().PersistentVolumes().Get(context.TODO(), pvName, metav1.GetOptions{})
pv, err := cs.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
framework.ExpectNoError(err, "Cannot get PV")
return ""
@@ -229,9 +229,9 @@ func getVolumeHandle(cs clientset.Interface, claimName string, claimNamespace st
}
// WaitForVolumeAttachmentTerminated waits for the VolumeAttachment with the passed in attachmentName to be terminated.
func WaitForVolumeAttachmentTerminated(attachmentName string, cs clientset.Interface, timeout time.Duration) error {
waitErr := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
_, err := cs.StorageV1().VolumeAttachments().Get(context.TODO(), attachmentName, metav1.GetOptions{})
func WaitForVolumeAttachmentTerminated(ctx context.Context, attachmentName string, cs clientset.Interface, timeout time.Duration) error {
waitErr := wait.PollImmediateWithContext(ctx, 10*time.Second, timeout, func(ctx context.Context) (bool, error) {
_, err := cs.StorageV1().VolumeAttachments().Get(ctx, attachmentName, metav1.GetOptions{})
if err != nil {
// if the volumeattachment object is not found, it means it has been terminated.
if apierrors.IsNotFound(err) {
@@ -250,7 +250,7 @@ func WaitForVolumeAttachmentTerminated(attachmentName string, cs clientset.Inter
// startVolumeServer starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.
func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
func startVolumeServer(ctx context.Context, client clientset.Interface, config TestConfig) *v1.Pod {
podClient := client.CoreV1().Pods(config.Namespace)
portCount := len(config.ServerPorts)
@@ -331,13 +331,13 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
}
var pod *v1.Pod
serverPod, err := podClient.Create(context.TODO(), serverPod, metav1.CreateOptions{})
serverPod, err := podClient.Create(ctx, serverPod, metav1.CreateOptions{})
// ok if the server pod already exists. TODO: make this controllable by callers
if err != nil {
if apierrors.IsAlreadyExists(err) {
framework.Logf("Ignore \"already-exists\" error, re-get pod...")
ginkgo.By(fmt.Sprintf("re-getting the %q server pod", serverPodName))
serverPod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{})
serverPod, err = podClient.Get(ctx, serverPodName, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot re-get the server pod %q: %v", serverPodName, err)
pod = serverPod
} else {
@@ -345,13 +345,13 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
}
}
if config.WaitForCompletion {
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(client, serverPod.Name, serverPod.Namespace))
framework.ExpectNoError(podClient.Delete(context.TODO(), serverPod.Name, metav1.DeleteOptions{}))
framework.ExpectNoError(e2epod.WaitForPodSuccessInNamespace(ctx, client, serverPod.Name, serverPod.Namespace))
framework.ExpectNoError(podClient.Delete(ctx, serverPod.Name, metav1.DeleteOptions{}))
} else {
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(client, serverPod))
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, client, serverPod))
if pod == nil {
ginkgo.By(fmt.Sprintf("locating the %q server pod", serverPodName))
pod, err = podClient.Get(context.TODO(), serverPodName, metav1.GetOptions{})
pod, err = podClient.Get(ctx, serverPodName, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the server pod %q: %v", serverPodName, err)
}
}
@@ -363,7 +363,7 @@ func startVolumeServer(client clientset.Interface, config TestConfig) *v1.Pod {
}
// TestServerCleanup cleans server pod.
func TestServerCleanup(f *framework.Framework, config TestConfig) {
func TestServerCleanup(ctx context.Context, f *framework.Framework, config TestConfig) {
ginkgo.By(fmt.Sprint("cleaning the environment after ", config.Prefix))
defer ginkgo.GinkgoRecover()
@@ -371,11 +371,11 @@ func TestServerCleanup(f *framework.Framework, config TestConfig) {
return
}
err := e2epod.DeletePodWithWaitByName(f.ClientSet, config.Prefix+"-server", config.Namespace)
err := e2epod.DeletePodWithWaitByName(ctx, f.ClientSet, config.Prefix+"-server", config.Namespace)
gomega.Expect(err).To(gomega.BeNil(), "Failed to delete pod %v in namespace %v", config.Prefix+"-server", config.Namespace)
}
func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeouts *framework.TimeoutContext, config TestConfig, podSuffix string, privileged bool, fsGroup *int64, tests []Test, slow bool) (*v1.Pod, error) {
ginkgo.By(fmt.Sprint("starting ", config.Prefix, "-", podSuffix))
var gracePeriod int64 = 1
var command string
@@ -453,18 +453,18 @@ func runVolumeTesterPod(client clientset.Interface, timeouts *framework.TimeoutC
})
}
podsNamespacer := client.CoreV1().Pods(config.Namespace)
clientPod, err := podsNamespacer.Create(context.TODO(), clientPod, metav1.CreateOptions{})
clientPod, err := podsNamespacer.Create(ctx, clientPod, metav1.CreateOptions{})
if err != nil {
return nil, err
}
if slow {
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow)
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodStartSlow)
} else {
err = e2epod.WaitTimeoutForPodRunningInNamespace(client, clientPod.Name, clientPod.Namespace, timeouts.PodStart)
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, client, clientPod.Name, clientPod.Namespace, timeouts.PodStart)
}
if err != nil {
e2epod.DeletePodOrFail(client, clientPod.Namespace, clientPod.Name)
e2epod.WaitForPodToDisappear(client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
e2epod.DeletePodOrFail(ctx, client, clientPod.Namespace, clientPod.Name)
_ = e2epod.WaitForPodToDisappear(ctx, client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)
return nil, err
}
return clientPod, nil
@@ -519,8 +519,8 @@ func testVolumeContent(f *framework.Framework, pod *v1.Pod, containerName string
// Timeout for dynamic provisioning (if "WaitForFirstConsumer" is set && provided PVC is not bound yet),
// pod creation, scheduling and complete pod startup (incl. volume attach & mount) is pod.podStartTimeout.
// It should be used for cases where "regular" dynamic provisioning of an empty volume is requested.
func TestVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
testVolumeClient(f, config, fsGroup, fsType, tests, false)
func TestVolumeClient(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
testVolumeClient(ctx, f, config, fsGroup, fsType, tests, false)
}
// TestVolumeClientSlow is the same as TestVolumeClient except for its timeout.
@@ -528,21 +528,21 @@ func TestVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
// pod creation, scheduling and complete pod startup (incl. volume attach & mount) is pod.slowPodStartTimeout.
// It should be used for cases where "special" dynamic provisioning is requested, such as volume cloning
// or snapshot restore.
func TestVolumeClientSlow(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
testVolumeClient(f, config, fsGroup, fsType, tests, true)
func TestVolumeClientSlow(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
testVolumeClient(ctx, f, config, fsGroup, fsType, tests, true)
}
func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) {
func testVolumeClient(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test, slow bool) {
timeouts := f.Timeouts
clientPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow)
clientPod, err := runVolumeTesterPod(ctx, f.ClientSet, timeouts, config, "client", false, fsGroup, tests, slow)
if err != nil {
framework.Failf("Failed to create client pod: %v", err)
}
defer func() {
// testVolumeClient might get used more than once per test, therefore
// we have to clean up before returning.
e2epod.DeletePodOrFail(f.ClientSet, clientPod.Namespace, clientPod.Name)
framework.ExpectNoError(e2epod.WaitForPodToDisappear(f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete))
e2epod.DeletePodOrFail(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name)
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete))
}()
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
@@ -553,7 +553,7 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
}
ec.Resources = v1.ResourceRequirements{}
ec.Name = "volume-ephemeral-container"
err = e2epod.NewPodClient(f).AddEphemeralContainerSync(clientPod, ec, timeouts.PodStart)
err = e2epod.NewPodClient(f).AddEphemeralContainerSync(ctx, clientPod, ec, timeouts.PodStart)
// The API server will return NotFound for the subresource when the feature is disabled
framework.ExpectNoError(err, "failed to add ephemeral container for re-test")
testVolumeContent(f, clientPod, ec.Name, fsGroup, fsType, tests)
@@ -562,13 +562,13 @@ func testVolumeClient(f *framework.Framework, config TestConfig, fsGroup *int64,
// InjectContent inserts index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there.
// The volume must be writable.
func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
func InjectContent(ctx context.Context, f *framework.Framework, config TestConfig, fsGroup *int64, fsType string, tests []Test) {
privileged := true
timeouts := f.Timeouts
if framework.NodeOSDistroIs("windows") {
privileged = false
}
injectorPod, err := runVolumeTesterPod(f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false /*slow*/)
injectorPod, err := runVolumeTesterPod(ctx, f.ClientSet, timeouts, config, "injector", privileged, fsGroup, tests, false /*slow*/)
if err != nil {
framework.Failf("Failed to create injector pod: %v", err)
return
@@ -576,8 +576,8 @@ func InjectContent(f *framework.Framework, config TestConfig, fsGroup *int64, fs
defer func() {
// This pod must get deleted before the function returns becaue the test relies on
// the volume not being in use.
e2epod.DeletePodOrFail(f.ClientSet, injectorPod.Namespace, injectorPod.Name)
framework.ExpectNoError(e2epod.WaitForPodToDisappear(f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete))
e2epod.DeletePodOrFail(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name)
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete))
}()
ginkgo.By("Writing text file contents in the container.")