e2e: use Ginkgo context

All code must use the context from Ginkgo when doing API calls or polling for a
change, otherwise the code would not return immediately when the test gets
aborted.
This commit is contained in:
Patrick Ohly
2022-12-12 10:11:10 +01:00
parent bf1d1dfd0f
commit 2f6c4f5eab
418 changed files with 11489 additions and 11369 deletions

View File

@@ -17,6 +17,7 @@ limitations under the License.
package aws
import (
"context"
"fmt"
"strings"
@@ -163,7 +164,7 @@ func (p *Provider) DeletePD(pdName string) error {
}
// CreatePVSource creates a persistent volume source
func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) {
func (p *Provider) CreatePVSource(ctx context.Context, zone, diskName string) (*v1.PersistentVolumeSource, error) {
return &v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: diskName,
@@ -173,8 +174,8 @@ func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSo
}
// DeletePVSource deletes a persistent volume source
func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
return e2epv.DeletePDWithRetry(pvSource.AWSElasticBlockStore.VolumeID)
func (p *Provider) DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) error {
return e2epv.DeletePDWithRetry(ctx, pvSource.AWSElasticBlockStore.VolumeID)
}
func newAWSClient(zone string) *ec2.EC2 {

View File

@@ -17,6 +17,7 @@ limitations under the License.
package gce
import (
"context"
"fmt"
"net/http"
"strconv"
@@ -394,12 +395,12 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset
}
// WaitForFirewallRule waits for the specified firewall existence
func WaitForFirewallRule(gceCloud *gcecloud.Cloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) {
func WaitForFirewallRule(ctx context.Context, gceCloud *gcecloud.Cloud, fwName string, exist bool, timeout time.Duration) (*compute.Firewall, error) {
framework.Logf("Waiting up to %v for firewall %v exist=%v", timeout, fwName, exist)
var fw *compute.Firewall
var err error
condition := func() (bool, error) {
condition := func(ctx context.Context) (bool, error) {
fw, err = gceCloud.GetFirewall(fwName)
if err != nil && exist ||
err == nil && !exist ||
@@ -409,7 +410,7 @@ func WaitForFirewallRule(gceCloud *gcecloud.Cloud, fwName string, exist bool, ti
return true, nil
}
if err := wait.PollImmediate(5*time.Second, timeout, condition); err != nil {
if err := wait.PollImmediateWithContext(ctx, 5*time.Second, timeout, condition); err != nil {
return nil, fmt.Errorf("error waiting for firewall %v exist=%v", fwName, exist)
}
return fw, nil

View File

@@ -185,14 +185,14 @@ func (p *Provider) GroupSize(group string) (int, error) {
}
// EnsureLoadBalancerResourcesDeleted ensures that cloud load balancer resources that were created
func (p *Provider) EnsureLoadBalancerResourcesDeleted(ip, portRange string) error {
func (p *Provider) EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, portRange string) error {
project := framework.TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(framework.TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", framework.TestContext.CloudConfig.Zone, err)
}
return wait.Poll(10*time.Second, 5*time.Minute, func() (bool, error) {
return wait.PollWithContext(ctx, 10*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) {
computeservice := p.gceCloud.ComputeServices().GA
list, err := computeservice.ForwardingRules.List(project, region).Do()
if err != nil {
@@ -268,7 +268,7 @@ func (p *Provider) DeletePD(pdName string) error {
}
// CreatePVSource creates a persistent volume source
func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSource, error) {
func (p *Provider) CreatePVSource(ctx context.Context, zone, diskName string) (*v1.PersistentVolumeSource, error) {
return &v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
@@ -279,16 +279,16 @@ func (p *Provider) CreatePVSource(zone, diskName string) (*v1.PersistentVolumeSo
}
// DeletePVSource deletes a persistent volume source
func (p *Provider) DeletePVSource(pvSource *v1.PersistentVolumeSource) error {
return e2epv.DeletePDWithRetry(pvSource.GCEPersistentDisk.PDName)
func (p *Provider) DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) error {
return e2epv.DeletePDWithRetry(ctx, pvSource.GCEPersistentDisk.PDName)
}
// CleanupServiceResources cleans up GCE Service Type=LoadBalancer resources with
// the given name. The name is usually the UUID of the Service prefixed with an
// alpha-numeric character ('a') to work around cloudprovider rules.
func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerName, region, zone string) {
if pollErr := wait.Poll(5*time.Second, e2eservice.LoadBalancerCleanupTimeout, func() (bool, error) {
if err := p.cleanupGCEResources(c, loadBalancerName, region, zone); err != nil {
func (p *Provider) CleanupServiceResources(ctx context.Context, c clientset.Interface, loadBalancerName, region, zone string) {
if pollErr := wait.PollWithContext(ctx, 5*time.Second, e2eservice.LoadBalancerCleanupTimeout, func(ctx context.Context) (bool, error) {
if err := p.cleanupGCEResources(ctx, c, loadBalancerName, region, zone); err != nil {
framework.Logf("Still waiting for glbc to cleanup: %v", err)
return false, nil
}
@@ -298,7 +298,7 @@ func (p *Provider) CleanupServiceResources(c clientset.Interface, loadBalancerNa
}
}
func (p *Provider) cleanupGCEResources(c clientset.Interface, loadBalancerName, region, zone string) (retErr error) {
func (p *Provider) cleanupGCEResources(ctx context.Context, c clientset.Interface, loadBalancerName, region, zone string) (retErr error) {
if region == "" {
// Attempt to parse region from zone if no region is given.
var err error
@@ -320,7 +320,7 @@ func (p *Provider) cleanupGCEResources(c clientset.Interface, loadBalancerName,
!IsGoogleAPIHTTPErrorCode(err, http.StatusNotFound) {
retErr = fmt.Errorf("%v\n%v", retErr, err)
}
clusterID, err := GetClusterID(c)
clusterID, err := GetClusterID(ctx, c)
if err != nil {
retErr = fmt.Errorf("%v\n%v", retErr, err)
return
@@ -401,8 +401,8 @@ func GetGCECloud() (*gcecloud.Cloud, error) {
}
// GetClusterID returns cluster ID
func GetClusterID(c clientset.Interface) (string, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), gcecloud.UIDConfigMapName, metav1.GetOptions{})
func GetClusterID(ctx context.Context, c clientset.Interface) (string, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, gcecloud.UIDConfigMapName, metav1.GetOptions{})
if err != nil || cm == nil {
return "", fmt.Errorf("error getting cluster ID: %v", err)
}

View File

@@ -77,14 +77,14 @@ type IngressController struct {
}
// CleanupIngressController calls cont.CleanupIngressControllerWithTimeout with hard-coded timeout
func (cont *IngressController) CleanupIngressController() error {
return cont.CleanupIngressControllerWithTimeout(e2eservice.LoadBalancerCleanupTimeout)
func (cont *IngressController) CleanupIngressController(ctx context.Context) error {
return cont.CleanupIngressControllerWithTimeout(ctx, e2eservice.LoadBalancerCleanupTimeout)
}
// CleanupIngressControllerWithTimeout calls the IngressController.Cleanup(false)
// followed with deleting the static ip, and then a final IngressController.Cleanup(true)
func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time.Duration) error {
pollErr := wait.Poll(5*time.Second, timeout, func() (bool, error) {
func (cont *IngressController) CleanupIngressControllerWithTimeout(ctx context.Context, timeout time.Duration) error {
pollErr := wait.PollWithContext(ctx, 5*time.Second, timeout, func(ctx context.Context) (bool, error) {
if err := cont.Cleanup(false); err != nil {
framework.Logf("Monitoring glbc's cleanup of gce resources:\n%v", err)
return false, nil
@@ -105,7 +105,7 @@ func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time.
// controller. Delete this IP only after the controller has had a chance
// to cleanup or it might interfere with the controller, causing it to
// throw out confusing events.
if ipErr := wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
if ipErr := wait.PollWithContext(ctx, 5*time.Second, 1*time.Minute, func(ctx context.Context) (bool, error) {
if err := cont.deleteStaticIPs(); err != nil {
framework.Logf("Failed to delete static-ip: %v\n", err)
return false, nil
@@ -125,9 +125,9 @@ func (cont *IngressController) CleanupIngressControllerWithTimeout(timeout time.
return nil
}
func (cont *IngressController) getL7AddonUID() (string, error) {
func (cont *IngressController) getL7AddonUID(ctx context.Context) (string, error) {
framework.Logf("Retrieving UID from config map: %v/%v", metav1.NamespaceSystem, uidConfigMap)
cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(context.TODO(), uidConfigMap, metav1.GetOptions{})
cm, err := cont.Client.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, uidConfigMap, metav1.GetOptions{})
if err != nil {
return "", err
}
@@ -604,8 +604,8 @@ func (cont *IngressController) isHTTPErrorCode(err error, code int) bool {
}
// WaitForNegBackendService waits for the expected backend service to become
func (cont *IngressController) WaitForNegBackendService(svcPorts map[string]v1.ServicePort) error {
return wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
func (cont *IngressController) WaitForNegBackendService(ctx context.Context, svcPorts map[string]v1.ServicePort) error {
return wait.PollWithContext(ctx, 5*time.Second, 1*time.Minute, func(ctx context.Context) (bool, error) {
err := cont.verifyBackendMode(svcPorts, negBackend)
if err != nil {
framework.Logf("Err while checking if backend service is using NEG: %v", err)
@@ -616,8 +616,8 @@ func (cont *IngressController) WaitForNegBackendService(svcPorts map[string]v1.S
}
// WaitForIgBackendService returns true only if all global backend service with matching svcPorts pointing to IG as backend
func (cont *IngressController) WaitForIgBackendService(svcPorts map[string]v1.ServicePort) error {
return wait.Poll(5*time.Second, 1*time.Minute, func() (bool, error) {
func (cont *IngressController) WaitForIgBackendService(ctx context.Context, svcPorts map[string]v1.ServicePort) error {
return wait.PollWithContext(ctx, 5*time.Second, 1*time.Minute, func(ctx context.Context) (bool, error) {
err := cont.verifyBackendMode(svcPorts, igBackend)
if err != nil {
framework.Logf("Err while checking if backend service is using IG: %v", err)
@@ -745,8 +745,8 @@ func (cont *IngressController) Cleanup(del bool) error {
}
// Init initializes the IngressController with an UID
func (cont *IngressController) Init() error {
uid, err := cont.getL7AddonUID()
func (cont *IngressController) Init(ctx context.Context) error {
uid, err := cont.getL7AddonUID(ctx)
if err != nil {
return err
}

View File

@@ -80,12 +80,12 @@ func RecreateNodes(c clientset.Interface, nodes []v1.Node) error {
}
// WaitForNodeBootIdsToChange waits for the boot ids of the given nodes to change in order to verify the node has been recreated.
func WaitForNodeBootIdsToChange(c clientset.Interface, nodes []v1.Node, timeout time.Duration) error {
func WaitForNodeBootIdsToChange(ctx context.Context, c clientset.Interface, nodes []v1.Node, timeout time.Duration) error {
errMsg := []string{}
for i := range nodes {
node := &nodes[i]
if err := wait.Poll(30*time.Second, timeout, func() (bool, error) {
newNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
if err := wait.PollWithContext(ctx, 30*time.Second, timeout, func(ctx context.Context) (bool, error) {
newNode, err := c.CoreV1().Nodes().Get(ctx, node.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("Could not get node info: %s. Retrying in %v.", err, 30*time.Second)
return false, nil

View File

@@ -17,6 +17,7 @@ limitations under the License.
package providers
import (
"context"
"fmt"
"os"
"path"
@@ -58,7 +59,7 @@ func LocationParamGKE() string {
}
// MasterUpgradeGKE upgrades master node to the specified version on GKE.
func MasterUpgradeGKE(namespace string, v string) error {
func MasterUpgradeGKE(ctx context.Context, namespace string, v string) error {
framework.Logf("Upgrading master to %q", v)
args := []string{
"container",
@@ -76,7 +77,7 @@ func MasterUpgradeGKE(namespace string, v string) error {
return err
}
e2enode.WaitForSSHTunnels(namespace)
e2enode.WaitForSSHTunnels(ctx, namespace)
return nil
}