diff --git a/test/e2e/apimachinery/apiserver_identity.go b/test/e2e/apimachinery/apiserver_identity.go index c641b4a8b86..df672030d24 100644 --- a/test/e2e/apimachinery/apiserver_identity.go +++ b/test/e2e/apimachinery/apiserver_identity.go @@ -73,7 +73,7 @@ func restartAPIServer(ctx context.Context, node *v1.Node) error { result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(controlPlaneAddress, e2essh.SSHPort), framework.TestContext.Provider) if err != nil || result.Code != 0 { e2essh.LogResult(result) - return fmt.Errorf("couldn't restart kube-apiserver: %v", err) + return fmt.Errorf("couldn't restart kube-apiserver: %w", err) } return nil } diff --git a/test/e2e/apimachinery/crd_publish_openapi.go b/test/e2e/apimachinery/crd_publish_openapi.go index 29998b33fcf..7ca0e275ceb 100644 --- a/test/e2e/apimachinery/crd_publish_openapi.go +++ b/test/e2e/apimachinery/crd_publish_openapi.go @@ -561,7 +561,7 @@ func setupCRDAndVerifySchemaWithOptions(f *framework.Framework, schema, expect [ }) crd, err := crd.CreateMultiVersionTestCRD(f, group, options...) if err != nil { - return nil, fmt.Errorf("failed to create CRD: %v", err) + return nil, fmt.Errorf("failed to create CRD: %w", err) } for _, v := range crd.Crd.Spec.Versions { @@ -623,7 +623,7 @@ func waitForDefinition(c k8sclientset.Interface, name string, schema []byte) err return true, "" }) if err != nil { - return fmt.Errorf("failed to wait for definition %q to be served with the right OpenAPI schema: %v", name, err) + return fmt.Errorf("failed to wait for definition %q to be served with the right OpenAPI schema: %w", name, err) } return nil } @@ -637,7 +637,7 @@ func waitForDefinitionCleanup(c k8sclientset.Interface, name string) error { return true, "" }) if err != nil { - return fmt.Errorf("failed to wait for definition %q not to be served anymore: %v", name, err) + return fmt.Errorf("failed to wait for definition %q not to be served anymore: %w", name, err) } return nil } @@ -718,7 +718,7 @@ func dropDefaults(s *spec.Schema) { func verifyKubectlExplain(ns, name, pattern string) error { result, err := e2ekubectl.RunKubectl(ns, "explain", name) if err != nil { - return fmt.Errorf("failed to explain %s: %v", name, err) + return fmt.Errorf("failed to explain %s: %w", name, err) } r := regexp.MustCompile(pattern) if !r.Match([]byte(result)) { diff --git a/test/e2e/apimachinery/garbage_collector.go b/test/e2e/apimachinery/garbage_collector.go index e0fb6266ca3..e240312f207 100644 --- a/test/e2e/apimachinery/garbage_collector.go +++ b/test/e2e/apimachinery/garbage_collector.go @@ -181,7 +181,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects case "Pods": pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { - return false, fmt.Errorf("failed to list pods: %v", err) + return false, fmt.Errorf("failed to list pods: %w", err) } if len(pods.Items) != num { ret = false @@ -190,7 +190,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects case "Deployments": deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { - return false, fmt.Errorf("failed to list deployments: %v", err) + return false, fmt.Errorf("failed to list deployments: %w", err) } if len(deployments.Items) != num { ret = false @@ -199,7 +199,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects case "ReplicaSets": rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { - return false, fmt.Errorf("failed to list rs: %v", err) + return false, fmt.Errorf("failed to list rs: %w", err) } if len(rs.Items) != num { ret = false @@ -208,7 +208,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects case "ReplicationControllers": rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { - return false, fmt.Errorf("failed to list replication controllers: %v", err) + return false, fmt.Errorf("failed to list replication controllers: %w", err) } if len(rcs.Items) != num { ret = false @@ -217,7 +217,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects case "CronJobs": cronJobs, err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { - return false, fmt.Errorf("failed to list cronjobs: %v", err) + return false, fmt.Errorf("failed to list cronjobs: %w", err) } if len(cronJobs.Items) != num { ret = false @@ -226,7 +226,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects case "Jobs": jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { - return false, fmt.Errorf("failed to list jobs: %v", err) + return false, fmt.Errorf("failed to list jobs: %w", err) } if len(jobs.Items) != num { ret = false @@ -325,7 +325,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { pods, err := podClient.List(ctx, metav1.ListOptions{}) if err != nil { - return false, fmt.Errorf("failed to list pods: %v", err) + return false, fmt.Errorf("failed to list pods: %w", err) } // We intentionally don't wait the number of pods to reach // rc.Spec.Replicas. We want to see if the garbage collector and the @@ -383,7 +383,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{}) if err != nil { - return false, fmt.Errorf("failed to get rc: %v", err) + return false, fmt.Errorf("failed to get rc: %w", err) } if rc.Status.Replicas == *rc.Spec.Replicas { return true, nil @@ -410,7 +410,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err := wait.PollWithContext(ctx, 5*time.Second, 120*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { rcs, err := rcClient.List(ctx, metav1.ListOptions{}) if err != nil { - return false, fmt.Errorf("failed to list rcs: %v", err) + return false, fmt.Errorf("failed to list rcs: %w", err) } if len(rcs.Items) != 0 { return false, nil @@ -452,7 +452,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{}) if err != nil { - return false, fmt.Errorf("failed to get rc: %v", err) + return false, fmt.Errorf("failed to get rc: %w", err) } if rc.Status.Replicas == *rc.Spec.Replicas { return true, nil @@ -505,7 +505,7 @@ var _ = SIGDescribe("Garbage collector", func() { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { rsList, err := rsClient.List(ctx, metav1.ListOptions{}) if err != nil { - return false, fmt.Errorf("failed to list rs: %v", err) + return false, fmt.Errorf("failed to list rs: %w", err) } return len(rsList.Items) > 0, nil @@ -530,7 +530,7 @@ var _ = SIGDescribe("Garbage collector", func() { errList = append(errList, err) remainingRSs, err := rsClient.List(ctx, metav1.ListOptions{}) if err != nil { - errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err)) + errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %w", err)) } else { errList = append(errList, fmt.Errorf("remaining rs are: %#v", remainingRSs)) } @@ -565,7 +565,7 @@ var _ = SIGDescribe("Garbage collector", func() { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { rsList, err := rsClient.List(ctx, metav1.ListOptions{}) if err != nil { - return false, fmt.Errorf("failed to list rs: %v", err) + return false, fmt.Errorf("failed to list rs: %w", err) } if len(rsList.Items) > 0 { replicaset = rsList.Items[0] @@ -599,7 +599,7 @@ var _ = SIGDescribe("Garbage collector", func() { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { dList, err := deployClient.List(ctx, metav1.ListOptions{}) if err != nil { - return false, fmt.Errorf("failed to list deployments: %v", err) + return false, fmt.Errorf("failed to list deployments: %w", err) } return len(dList.Items) == 0, nil }) @@ -616,13 +616,13 @@ var _ = SIGDescribe("Garbage collector", func() { errList := make([]error, 0) remainingRSs, err := rsClient.List(ctx, metav1.ListOptions{}) if err != nil { - errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err)) + errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %w", err)) } else { errList = append(errList, fmt.Errorf("remaining rs post mortem: %#v", remainingRSs)) } remainingDSs, err := deployClient.List(ctx, metav1.ListOptions{}) if err != nil { - errList = append(errList, fmt.Errorf("failed to list Deployments post mortem: %v", err)) + errList = append(errList, fmt.Errorf("failed to list Deployments post mortem: %w", err)) } else { errList = append(errList, fmt.Errorf("remaining deployment's post mortem: %#v", remainingDSs)) } @@ -663,7 +663,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{}) if err != nil { - return false, fmt.Errorf("failed to get rc: %v", err) + return false, fmt.Errorf("failed to get rc: %w", err) } if rc.Status.Replicas == *rc.Spec.Replicas { return true, nil @@ -758,7 +758,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { rc1, err := rcClient.Get(ctx, rc1.Name, metav1.GetOptions{}) if err != nil { - return false, fmt.Errorf("failed to get rc: %v", err) + return false, fmt.Errorf("failed to get rc: %w", err) } if rc1.Status.Replicas == *rc1.Spec.Replicas { return true, nil @@ -889,7 +889,7 @@ var _ = SIGDescribe("Garbage collector", func() { if err := wait.PollWithContext(ctx, 5*time.Second, 90*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { pods, err2 = podClient.List(ctx, metav1.ListOptions{}) if err2 != nil { - return false, fmt.Errorf("failed to list pods: %v", err) + return false, fmt.Errorf("failed to list pods: %w", err) } if len(pods.Items) == 0 { return true, nil @@ -1125,7 +1125,7 @@ var _ = SIGDescribe("Garbage collector", func() { return false, nil } if err != nil && !apierrors.IsNotFound(err) { - return false, fmt.Errorf("failed to get owner: %v", err) + return false, fmt.Errorf("failed to get owner: %w", err) } return true, nil }); err != nil { @@ -1153,7 +1153,7 @@ var _ = SIGDescribe("Garbage collector", func() { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 2*time.Minute, func(ctx context.Context) (bool, error) { jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { - return false, fmt.Errorf("failed to list jobs: %v", err) + return false, fmt.Errorf("failed to list jobs: %w", err) } return len(jobs.Items) > 0, nil }) diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index b99fd1c514e..2f8569f23b9 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -1250,7 +1250,7 @@ func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func(ctx con if apierrors.IsNotFound(err) { return true, nil } - return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err) + return false, fmt.Errorf("failed to get failed daemon pod %q: %w", pod.Name, err) } return false, nil } diff --git a/test/e2e/apps/rc.go b/test/e2e/apps/rc.go index b54d6e3cb0c..d036cd53908 100644 --- a/test/e2e/apps/rc.go +++ b/test/e2e/apps/rc.go @@ -27,7 +27,6 @@ import ( apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" utilrand "k8s.io/apimachinery/pkg/util/rand" @@ -513,9 +512,9 @@ func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework if err != nil { updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) if getErr == nil { - err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) + err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %w", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) } else { - err = fmt.Errorf("pod %q never run: %v", pod.Name, err) + err = fmt.Errorf("pod %q never run: %w", pod.Name, err) } } framework.ExpectNoError(err) @@ -528,13 +527,7 @@ func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework // Verify that something is listening. framework.Logf("Trying to dial the pod") - retryTimeout := 2 * time.Minute - retryInterval := 5 * time.Second - label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - err = wait.PollWithContext(ctx, retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) - if err != nil { - framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) - } + framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, true, 2*time.Minute, pods)) } // 1. Create a quota restricting pods in the current namespace to 2. diff --git a/test/e2e/apps/replica_set.go b/test/e2e/apps/replica_set.go index 3d192ac8ab0..d3489c3567a 100644 --- a/test/e2e/apps/replica_set.go +++ b/test/e2e/apps/replica_set.go @@ -210,9 +210,9 @@ func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework, if err != nil { updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) if getErr == nil { - err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) + err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %w", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) } else { - err = fmt.Errorf("pod %q never run: %v", pod.Name, err) + err = fmt.Errorf("pod %q never run: %w", pod.Name, err) } } framework.ExpectNoError(err) @@ -225,13 +225,7 @@ func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework, // Verify that something is listening. framework.Logf("Trying to dial the pod") - retryTimeout := 2 * time.Minute - retryInterval := 5 * time.Second - label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - err = wait.PollWithContext(ctx, retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) - if err != nil { - framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) - } + framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, true, 2*time.Minute, pods)) } // 1. Create a quota restricting pods in the current namespace to 2. diff --git a/test/e2e/auth/service_accounts.go b/test/e2e/auth/service_accounts.go index 111ab22118a..78163e9b488 100644 --- a/test/e2e/auth/service_accounts.go +++ b/test/e2e/auth/service_accounts.go @@ -493,9 +493,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { framework.ExpectNoError(err) framework.Logf("created pod") - if !e2epod.CheckPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { - framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name) - } + framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, time.Minute)) framework.Logf("pod is ready") @@ -509,7 +507,7 @@ var _ = SIGDescribe("ServiceAccounts", func() { } tokenCount, err := ParseInClusterClientLogs(logs) if err != nil { - return false, fmt.Errorf("inclusterclient reported an error: %v", err) + return false, fmt.Errorf("inclusterclient reported an error: %w", err) } if tokenCount < 2 { framework.Logf("Retrying. Still waiting to see more unique tokens: got=%d, want=2", tokenCount) diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go index 77547a714f6..7a9aa45851c 100644 --- a/test/e2e/autoscaling/cluster_size_autoscaling.go +++ b/test/e2e/autoscaling/cluster_size_autoscaling.go @@ -1166,7 +1166,7 @@ func enableAutoscaler(nodePool string, minCount, maxCount int) error { if err != nil { klog.Errorf("Failed config update result: %s", output) - return fmt.Errorf("Failed to enable autoscaling: %v", err) + return fmt.Errorf("Failed to enable autoscaling: %w", err) } klog.Infof("Config update result: %s", output) @@ -1190,7 +1190,7 @@ func disableAutoscaler(nodePool string, minCount, maxCount int) error { if err != nil { klog.Errorf("Failed config update result: %s", output) - return fmt.Errorf("Failed to disable autoscaling: %v", err) + return fmt.Errorf("Failed to disable autoscaling: %w", err) } klog.Infof("Config update result: %s", output) @@ -1384,7 +1384,7 @@ func waitForCaPodsReadyInNamespace(ctx context.Context, f *framework.Framework, for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)) && ctx.Err() == nil; time.Sleep(20 * time.Second) { pods, err := c.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}) if err != nil { - return fmt.Errorf("failed to get pods: %v", err) + return fmt.Errorf("failed to get pods: %w", err) } notready = make([]string, 0) for _, pod := range pods.Items { diff --git a/test/e2e/autoscaling/dns_autoscaling.go b/test/e2e/autoscaling/dns_autoscaling.go index 2165af35154..a1f33db3de3 100644 --- a/test/e2e/autoscaling/dns_autoscaling.go +++ b/test/e2e/autoscaling/dns_autoscaling.go @@ -355,7 +355,7 @@ func waitForDNSReplicasSatisfied(ctx context.Context, c clientset.Interface, get } if err = wait.Poll(2*time.Second, timeout, condition); err != nil { - return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err) + return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %w", expected, current, err) } framework.Logf("kube-dns reaches expected replicas: %v", expected) return nil @@ -372,7 +372,7 @@ func waitForDNSConfigMapCreated(ctx context.Context, c clientset.Interface, time } if err = wait.Poll(time.Second, timeout, condition); err != nil { - return nil, fmt.Errorf("err waiting for DNS autoscaling ConfigMap got re-created: %v", err) + return nil, fmt.Errorf("err waiting for DNS autoscaling ConfigMap got re-created: %w", err) } return configMap, nil } diff --git a/test/e2e/cloud/gcp/addon_update.go b/test/e2e/cloud/gcp/addon_update.go index 63cf281a354..42e4d2bc0cc 100644 --- a/test/e2e/cloud/gcp/addon_update.go +++ b/test/e2e/cloud/gcp/addon_update.go @@ -375,7 +375,7 @@ func waitForReplicationController(ctx context.Context, c clientset.Interface, na }) if err != nil { stateMsg := map[bool]string{true: "to appear", false: "to disappear"} - return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err) + return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %w", namespace, name, stateMsg[exist], err) } return nil } @@ -402,7 +402,7 @@ func waitForServiceWithSelector(ctx context.Context, c clientset.Interface, name }) if err != nil { stateMsg := map[bool]string{true: "to appear", false: "to disappear"} - return fmt.Errorf("error waiting for service with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err) + return fmt.Errorf("error waiting for service with %s in namespace %s %s: %w", selector.String(), namespace, stateMsg[exist], err) } return nil } @@ -426,7 +426,7 @@ func waitForReplicationControllerWithSelector(ctx context.Context, c clientset.I }) if err != nil { stateMsg := map[bool]string{true: "to appear", false: "to disappear"} - return fmt.Errorf("error waiting for ReplicationControllers with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err) + return fmt.Errorf("error waiting for ReplicationControllers with %s in namespace %s %s: %w", selector.String(), namespace, stateMsg[exist], err) } return nil } @@ -437,7 +437,7 @@ func getMasterSSHClient() (*ssh.Client, error) { // Get a signer for the provider. signer, err := e2essh.GetSigner(framework.TestContext.Provider) if err != nil { - return nil, fmt.Errorf("error getting signer for provider %s: '%v'", framework.TestContext.Provider, err) + return nil, fmt.Errorf("error getting signer for provider %s: %w", framework.TestContext.Provider, err) } sshUser := os.Getenv("KUBE_SSH_USER") @@ -453,7 +453,7 @@ func getMasterSSHClient() (*ssh.Client, error) { host := framework.APIAddress() + ":22" client, err := ssh.Dial("tcp", host, config) if err != nil { - return nil, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err) + return nil, fmt.Errorf("error getting SSH client to host %s: %w", host, err) } return client, err } @@ -468,7 +468,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) { framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr()) session, err := client.NewSession() if err != nil { - return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err) + return "", "", 0, fmt.Errorf("error creating session to host %s: %w", client.RemoteAddr(), err) } defer session.Close() @@ -490,7 +490,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) { } else { // Some other kind of error happened (e.g. an IOError); consider the // SSH unsuccessful. - err = fmt.Errorf("failed running `%s` on %s: '%v'", cmd, client.RemoteAddr(), err) + err = fmt.Errorf("failed running `%s` on %s: %w", cmd, client.RemoteAddr(), err) } } return bout.String(), berr.String(), code, err @@ -500,7 +500,7 @@ func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os. framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr())) session, err := sshClient.NewSession() if err != nil { - return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err) + return fmt.Errorf("error creating session to host %s: %w", sshClient.RemoteAddr(), err) } defer session.Close() diff --git a/test/e2e/cloud/gcp/common/upgrade_context.go b/test/e2e/cloud/gcp/common/upgrade_context.go index 19d09c5c347..5fc30842d3e 100644 --- a/test/e2e/cloud/gcp/common/upgrade_context.go +++ b/test/e2e/cloud/gcp/common/upgrade_context.go @@ -82,7 +82,7 @@ func realVersion(s string) (string, error) { framework.Logf("Getting real version for %q", s) v, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/get-build.sh"), "-v", s) if err != nil { - return v, fmt.Errorf("error getting real version for %q: %v", s, err) + return v, fmt.Errorf("error getting real version for %q: %w", s, err) } framework.Logf("Version for %q is %q", s, v) return strings.TrimPrefix(strings.TrimSpace(v), "v"), nil diff --git a/test/e2e/cloud/gcp/common/upgrade_mechanics.go b/test/e2e/cloud/gcp/common/upgrade_mechanics.go index 763525e5c0f..cc4a3db3f11 100644 --- a/test/e2e/cloud/gcp/common/upgrade_mechanics.go +++ b/test/e2e/cloud/gcp/common/upgrade_mechanics.go @@ -131,7 +131,7 @@ func checkControlPlaneVersion(ctx context.Context, c clientset.Interface, want s return true, nil }) if waitErr != nil { - return fmt.Errorf("CheckControlPlane() couldn't get the control plane version: %v", err) + return fmt.Errorf("CheckControlPlane() couldn't get the control plane version: %w", err) } // We do prefix trimming and then matching because: // want looks like: 0.19.3-815-g50e67d4 diff --git a/test/e2e/cloud/gcp/node_lease.go b/test/e2e/cloud/gcp/node_lease.go index cef9a0f86be..e3e875a68db 100644 --- a/test/e2e/cloud/gcp/node_lease.go +++ b/test/e2e/cloud/gcp/node_lease.go @@ -97,7 +97,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() { // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. ginkgo.By("waiting for system pods to successfully restart") - err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout) framework.ExpectNoError(err) }) diff --git a/test/e2e/cloud/gcp/resize_nodes.go b/test/e2e/cloud/gcp/resize_nodes.go index 60a02fbb41e..034d1876919 100644 --- a/test/e2e/cloud/gcp/resize_nodes.go +++ b/test/e2e/cloud/gcp/resize_nodes.go @@ -99,7 +99,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() { // Many e2e tests assume that the cluster is fully healthy before they start. Wait until // the cluster is restored to health. ginkgo.By("waiting for system pods to successfully restart") - err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout) framework.ExpectNoError(err) }) }) diff --git a/test/e2e/common/node/container_probe.go b/test/e2e/common/node/container_probe.go index 5f4e90d3e71..6a0f687ff07 100644 --- a/test/e2e/common/node/container_probe.go +++ b/test/e2e/common/node/container_probe.go @@ -608,7 +608,7 @@ done }) // verify pods are running and ready - err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart) framework.ExpectNoError(err) // Shutdown pod. Readiness should change to false @@ -690,7 +690,7 @@ done }) // verify pods are running and ready - err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{}) + err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart) framework.ExpectNoError(err) // Shutdown pod. Readiness should change to false diff --git a/test/e2e/common/node/lease.go b/test/e2e/common/node/lease.go index 2582339704d..4cc3bdc673b 100644 --- a/test/e2e/common/node/lease.go +++ b/test/e2e/common/node/lease.go @@ -39,15 +39,15 @@ import ( func getPatchBytes(oldLease, newLease *coordinationv1.Lease) ([]byte, error) { oldData, err := json.Marshal(oldLease) if err != nil { - return nil, fmt.Errorf("failed to Marshal oldData: %v", err) + return nil, fmt.Errorf("failed to Marshal oldData: %w", err) } newData, err := json.Marshal(newLease) if err != nil { - return nil, fmt.Errorf("failed to Marshal newData: %v", err) + return nil, fmt.Errorf("failed to Marshal newData: %w", err) } patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, coordinationv1.Lease{}) if err != nil { - return nil, fmt.Errorf("failed to CreateTwoWayMergePatch: %v", err) + return nil, fmt.Errorf("failed to CreateTwoWayMergePatch: %w", err) } return patchBytes, nil } diff --git a/test/e2e/common/node/pods.go b/test/e2e/common/node/pods.go index 698e032d003..f8ac287da5c 100644 --- a/test/e2e/common/node/pods.go +++ b/test/e2e/common/node/pods.go @@ -873,7 +873,7 @@ var _ = SIGDescribe("Pods", func() { // wait as required for all 3 pods to be running ginkgo.By("waiting for all 3 pods to be running") - err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart, nil) + err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart) framework.ExpectNoError(err, "3 pods not found running.") // delete Collection of pods with a label in the current namespace diff --git a/test/e2e/common/node/runtime.go b/test/e2e/common/node/runtime.go index 9d2c1475ef5..abce11298de 100644 --- a/test/e2e/common/node/runtime.go +++ b/test/e2e/common/node/runtime.go @@ -310,7 +310,7 @@ while true; do sleep 1; done checkContainerStatus := func(ctx context.Context) error { status, err := container.GetStatus(ctx) if err != nil { - return fmt.Errorf("failed to get container status: %v", err) + return fmt.Errorf("failed to get container status: %w", err) } // We need to check container state first. The default pod status is pending, If we check pod phase first, // and the expected pod phase is Pending, the container status may not even show up when we check it. @@ -335,7 +335,7 @@ while true; do sleep 1; done // Check pod phase phase, err := container.GetPhase(ctx) if err != nil { - return fmt.Errorf("failed to get pod phase: %v", err) + return fmt.Errorf("failed to get pod phase: %w", err) } if phase != expectedPhase { return fmt.Errorf("expected pod phase: %q, got: %q", expectedPhase, phase) diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go index 19e2ee3a2a9..1102ce2c367 100644 --- a/test/e2e/common/util.go +++ b/test/e2e/common/util.go @@ -195,11 +195,11 @@ func RestartNodes(c clientset.Interface, nodes []v1.Node) error { if err := wait.Poll(30*time.Second, framework.RestartNodeReadyAgainTimeout, func() (bool, error) { newNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) if err != nil { - return false, fmt.Errorf("error getting node info after reboot: %s", err) + return false, fmt.Errorf("error getting node info after reboot: %w", err) } return node.Status.NodeInfo.BootID != newNode.Status.NodeInfo.BootID, nil }); err != nil { - return fmt.Errorf("error waiting for node %s boot ID to change: %s", node.Name, err) + return fmt.Errorf("error waiting for node %s boot ID to change: %w", node.Name, err) } } return nil diff --git a/test/e2e/dra/dra.go b/test/e2e/dra/dra.go index fc7529e682c..f1cc0b9ff82 100644 --- a/test/e2e/dra/dra.go +++ b/test/e2e/dra/dra.go @@ -110,7 +110,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu gomega.Consistently(func() error { testPod, err := b.f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) if err != nil { - return fmt.Errorf("expected the test pod %s to exist: %v", pod.Name, err) + return fmt.Errorf("expected the test pod %s to exist: %w", pod.Name, err) } if testPod.Status.Phase != v1.PodPending { return fmt.Errorf("pod %s: unexpected status %s, expected status: %s", pod.Name, testPod.Status.Phase, v1.PodPending) diff --git a/test/e2e/dra/test-driver/app/controller.go b/test/e2e/dra/test-driver/app/controller.go index 079e7739dcf..8ddd46ffa53 100644 --- a/test/e2e/dra/test-driver/app/controller.go +++ b/test/e2e/dra/test-driver/app/controller.go @@ -148,7 +148,7 @@ func (c *ExampleController) GetClaimParameters(ctx context.Context, claim *resou func (c *ExampleController) readParametersFromConfigMap(ctx context.Context, namespace, name string) (map[string]string, error) { configMap, err := c.clientset.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { - return nil, fmt.Errorf("get config map: %v", err) + return nil, fmt.Errorf("get config map: %w", err) } return configMap.Data, nil } @@ -221,7 +221,7 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha toEnvVars("admin", classParameters, p.EnvVars) data, err := json.Marshal(p) if err != nil { - return nil, fmt.Errorf("encode parameters: %v", err) + return nil, fmt.Errorf("encode parameters: %w", err) } allocation.ResourceHandle = string(data) var nodes []string diff --git a/test/e2e/dra/test-driver/app/kubeletplugin.go b/test/e2e/dra/test-driver/app/kubeletplugin.go index 89e9b85734b..e24aeb44a54 100644 --- a/test/e2e/dra/test-driver/app/kubeletplugin.go +++ b/test/e2e/dra/test-driver/app/kubeletplugin.go @@ -97,7 +97,7 @@ func StartPlugin(logger klog.Logger, cdiDir, driverName string, nodeName string, ) d, err := kubeletplugin.Start(ex, opts...) if err != nil { - return nil, fmt.Errorf("start kubelet plugin: %v", err) + return nil, fmt.Errorf("start kubelet plugin: %w", err) } ex.d = d @@ -127,7 +127,7 @@ func (ex *ExamplePlugin) NodePrepareResource(ctx context.Context, req *drapbv1.N // Determine environment variables. var p parameters if err := json.Unmarshal([]byte(req.ResourceHandle), &p); err != nil { - return nil, fmt.Errorf("unmarshal resource handle: %v", err) + return nil, fmt.Errorf("unmarshal resource handle: %w", err) } // Sanity check scheduling. @@ -161,7 +161,7 @@ func (ex *ExamplePlugin) NodePrepareResource(ctx context.Context, req *drapbv1.N filePath := ex.getJSONFilePath(req.ClaimUid) buffer, err := json.Marshal(spec) if err != nil { - return nil, fmt.Errorf("marshal spec: %v", err) + return nil, fmt.Errorf("marshal spec: %w", err) } if err := ex.fileOps.Create(filePath, buffer); err != nil { return nil, fmt.Errorf("failed to write CDI file %v", err) @@ -186,7 +186,7 @@ func (ex *ExamplePlugin) NodeUnprepareResource(ctx context.Context, req *drapbv1 filePath := ex.getJSONFilePath(req.ClaimUid) if err := ex.fileOps.Remove(filePath); err != nil { - return nil, fmt.Errorf("error removing CDI file: %v", err) + return nil, fmt.Errorf("error removing CDI file: %w", err) } logger.V(3).Info("CDI file removed", "path", filePath) diff --git a/test/e2e/dra/test-driver/app/server.go b/test/e2e/dra/test-driver/app/server.go index 4be3ea34f32..eae5e425c2d 100644 --- a/test/e2e/dra/test-driver/app/server.go +++ b/test/e2e/dra/test-driver/app/server.go @@ -115,12 +115,12 @@ func NewCommand() *cobra.Command { if *kubeconfig == "" { config, err = rest.InClusterConfig() if err != nil { - return fmt.Errorf("create in-cluster client configuration: %v", err) + return fmt.Errorf("create in-cluster client configuration: %w", err) } } else { config, err = clientcmd.BuildConfigFromFlags("", *kubeconfig) if err != nil { - return fmt.Errorf("create out-of-cluster client configuration: %v", err) + return fmt.Errorf("create out-of-cluster client configuration: %w", err) } } config.QPS = *kubeAPIQPS @@ -128,7 +128,7 @@ func NewCommand() *cobra.Command { clientset, err = kubernetes.NewForConfig(config) if err != nil { - return fmt.Errorf("create client: %v", err) + return fmt.Errorf("create client: %w", err) } if *httpEndpoint != "" { @@ -158,7 +158,7 @@ func NewCommand() *cobra.Command { listener, err := net.Listen("tcp", *httpEndpoint) if err != nil { - return fmt.Errorf("listen on HTTP endpoint: %v", err) + return fmt.Errorf("listen on HTTP endpoint: %w", err) } go func() { @@ -203,12 +203,12 @@ func NewCommand() *cobra.Command { if *resourceConfig != "" { file, err := os.Open(*resourceConfig) if err != nil { - return fmt.Errorf("open resource config: %v", err) + return fmt.Errorf("open resource config: %w", err) } decoder := json.NewDecoder(file) decoder.DisallowUnknownFields() if err := decoder.Decode(&resources); err != nil { - return fmt.Errorf("parse resource config %q: %v", *resourceConfig, err) + return fmt.Errorf("parse resource config %q: %w", *resourceConfig, err) } } @@ -230,7 +230,7 @@ func NewCommand() *cobra.Command { // exceeds the QPS+burst limits. leClientset, err := kubernetes.NewForConfig(config) if err != nil { - return fmt.Errorf("create leaderelection client: %v", err) + return fmt.Errorf("create leaderelection client: %w", err) } le := leaderelection.New(leClientset, lockName, @@ -246,7 +246,7 @@ func NewCommand() *cobra.Command { le.PrepareHealthCheck(mux) } if err := le.Run(); err != nil { - return fmt.Errorf("leader election failed: %v", err) + return fmt.Errorf("leader election failed: %w", err) } return nil @@ -275,10 +275,10 @@ func NewCommand() *cobra.Command { // to know early if there is a setup problem that would prevent // creating those directories. if err := os.MkdirAll(*cdiDir, os.FileMode(0750)); err != nil { - return fmt.Errorf("create CDI directory: %v", err) + return fmt.Errorf("create CDI directory: %w", err) } if err := os.MkdirAll(filepath.Dir(*endpoint), 0750); err != nil { - return fmt.Errorf("create socket directory: %v", err) + return fmt.Errorf("create socket directory: %w", err) } plugin, err := StartPlugin(logger, *cdiDir, *driverName, "", FileOperations{}, @@ -287,7 +287,7 @@ func NewCommand() *cobra.Command { kubeletplugin.KubeletPluginSocketPath(*draAddress), ) if err != nil { - return fmt.Errorf("start example plugin: %v", err) + return fmt.Errorf("start example plugin: %w", err) } // Handle graceful shutdown. We need to delete Unix domain diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 0d674e83b74..5ad7daebb21 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -245,7 +245,7 @@ func setupSuite(ctx context.Context) { // #41007. To avoid those pods preventing the whole test runs (and just // wasting the whole run), we allow for some not-ready pods (with the // number equal to the number of allowed not-ready nodes). - if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup, map[string]string{}); err != nil { + if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup); err != nil { e2edebug.DumpAllNamespaceInfo(ctx, c, metav1.NamespaceSystem) e2ekubectl.LogFailedContainers(ctx, c, metav1.NamespaceSystem, framework.Logf) framework.Failf("Error waiting for all pods to be running and ready: %v", err) diff --git a/test/e2e/framework/autoscaling/autoscaling_utils.go b/test/e2e/framework/autoscaling/autoscaling_utils.go index f618ba64ca2..0bf99ac062c 100644 --- a/test/e2e/framework/autoscaling/autoscaling_utils.go +++ b/test/e2e/framework/autoscaling/autoscaling_utils.go @@ -1001,7 +1001,7 @@ func CreateCustomSubresourceInstance(ctx context.Context, namespace, name string } createdObjectMeta, err := meta.Accessor(instance) if err != nil { - return nil, fmt.Errorf("Error while creating object meta: %v", err) + return nil, fmt.Errorf("Error while creating object meta: %w", err) } if len(createdObjectMeta.GetUID()) == 0 { return nil, fmt.Errorf("Missing UUID: %v", instance) diff --git a/test/e2e/framework/deployment/fixtures.go b/test/e2e/framework/deployment/fixtures.go index 462bbc6ea08..f22695961f6 100644 --- a/test/e2e/framework/deployment/fixtures.go +++ b/test/e2e/framework/deployment/fixtures.go @@ -75,12 +75,12 @@ func CreateDeployment(ctx context.Context, client clientset.Interface, replicas deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command) deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{}) if err != nil { - return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err) + return nil, fmt.Errorf("deployment %q Create API error: %w", deploymentSpec.Name, err) } framework.Logf("Waiting deployment %q to complete", deploymentSpec.Name) err = WaitForDeploymentComplete(client, deployment) if err != nil { - return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err) + return nil, fmt.Errorf("deployment %q failed to complete: %w", deploymentSpec.Name, err) } return deployment, nil } diff --git a/test/e2e/framework/events/events.go b/test/e2e/framework/events/events.go index 80b931c2837..b38c4a1eea8 100644 --- a/test/e2e/framework/events/events.go +++ b/test/e2e/framework/events/events.go @@ -42,7 +42,7 @@ func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string) return func(ctx context.Context) (bool, error) { events, err := c.CoreV1().Events(namespace).List(ctx, options) if err != nil { - return false, fmt.Errorf("got error while getting events: %v", err) + return false, fmt.Errorf("got error while getting events: %w", err) } for _, event := range events.Items { if strings.Contains(event.Message, msg) { diff --git a/test/e2e/framework/expect.go b/test/e2e/framework/expect.go index 7b7fe7f416a..da0cde08df7 100644 --- a/test/e2e/framework/expect.go +++ b/test/e2e/framework/expect.go @@ -17,12 +17,281 @@ limitations under the License. package framework import ( + "context" + "errors" "fmt" + "strings" + "time" + ginkgotypes "github.com/onsi/ginkgo/v2/types" "github.com/onsi/gomega" "github.com/onsi/gomega/format" + "github.com/onsi/gomega/types" ) +// MakeMatcher builds a gomega.Matcher based on a single callback function. +// That function is passed the actual value that is to be checked. +// There are three possible outcomes of the check: +// - An error is returned, which then is converted into a failure +// by Gomega. +// - A non-nil failure function is returned, which then is called +// by Gomega once a failure string is needed. This is useful +// to avoid unnecessarily preparing a failure string for intermediate +// failures in Eventually or Consistently. +// - Both function and error are nil, which means that the check +// succeeded. +func MakeMatcher[T interface{}](match func(actual T) (failure func() string, err error)) types.GomegaMatcher { + return &matcher[T]{ + match: match, + } +} + +type matcher[T interface{}] struct { + match func(actual T) (func() string, error) + failure func() string +} + +func (m *matcher[T]) Match(actual interface{}) (success bool, err error) { + if actual, ok := actual.(T); ok { + failure, err := m.match(actual) + if err != nil { + return false, err + } + m.failure = failure + if failure != nil { + return false, nil + } + return true, nil + } + var empty T + return false, gomega.StopTrying(fmt.Sprintf("internal error: expected %T, got:\n%s", empty, format.Object(actual, 1))) +} + +func (m *matcher[T]) FailureMessage(actual interface{}) string { + return m.failure() +} + +func (m matcher[T]) NegatedFailureMessage(actual interface{}) string { + return m.failure() +} + +var _ types.GomegaMatcher = &matcher[string]{} + +// Gomega returns an interface that can be used like gomega to express +// assertions. The difference is that failed assertions are returned as an +// error: +// +// if err := Gomega().Expect(pod.Status.Phase).To(gomega.BeEqual(v1.Running)); err != nil { +// return fmt.Errorf("test pod not running: %w", err) +// } +// +// This error can get wrapped to provide additional context for the +// failure. The test then should use ExpectNoError to turn a non-nil error into +// a failure. +// +// When using this approach, there is no need for call offsets and extra +// descriptions for the Expect call because the call stack will be dumped when +// ExpectNoError is called and the additional description(s) can be added by +// wrapping the error. +// +// Asynchronous assertions use the framework's Poll interval and PodStart timeout +// by default. +func Gomega() GomegaInstance { + return gomegaInstance{} +} + +type GomegaInstance interface { + Expect(actual interface{}) Assertion + Eventually(ctx context.Context, args ...interface{}) AsyncAssertion + Consistently(ctx context.Context, args ...interface{}) AsyncAssertion +} + +type Assertion interface { + Should(matcher types.GomegaMatcher) error + ShouldNot(matcher types.GomegaMatcher) error + To(matcher types.GomegaMatcher) error + ToNot(matcher types.GomegaMatcher) error + NotTo(matcher types.GomegaMatcher) error +} + +type AsyncAssertion interface { + Should(matcher types.GomegaMatcher) error + ShouldNot(matcher types.GomegaMatcher) error + + WithTimeout(interval time.Duration) AsyncAssertion + WithPolling(interval time.Duration) AsyncAssertion +} + +type gomegaInstance struct{} + +var _ GomegaInstance = gomegaInstance{} + +func (g gomegaInstance) Expect(actual interface{}) Assertion { + return assertion{actual: actual} +} + +func (g gomegaInstance) Eventually(ctx context.Context, args ...interface{}) AsyncAssertion { + return newAsyncAssertion(ctx, args, false) +} + +func (g gomegaInstance) Consistently(ctx context.Context, args ...interface{}) AsyncAssertion { + return newAsyncAssertion(ctx, args, true) +} + +func newG() (*FailureError, gomega.Gomega) { + var failure FailureError + g := gomega.NewGomega(func(msg string, callerSkip ...int) { + failure = FailureError{ + msg: msg, + } + }) + + return &failure, g +} + +type assertion struct { + actual interface{} +} + +func (a assertion) Should(matcher types.GomegaMatcher) error { + err, g := newG() + if !g.Expect(a.actual).Should(matcher) { + err.backtrace() + return *err + } + return nil +} + +func (a assertion) ShouldNot(matcher types.GomegaMatcher) error { + err, g := newG() + if !g.Expect(a.actual).ShouldNot(matcher) { + err.backtrace() + return *err + } + return nil +} + +func (a assertion) To(matcher types.GomegaMatcher) error { + err, g := newG() + if !g.Expect(a.actual).To(matcher) { + err.backtrace() + return *err + } + return nil +} + +func (a assertion) ToNot(matcher types.GomegaMatcher) error { + err, g := newG() + if !g.Expect(a.actual).ToNot(matcher) { + err.backtrace() + return *err + } + return nil +} + +func (a assertion) NotTo(matcher types.GomegaMatcher) error { + err, g := newG() + if !g.Expect(a.actual).NotTo(matcher) { + err.backtrace() + return *err + } + return nil +} + +type asyncAssertion struct { + ctx context.Context + args []interface{} + timeout time.Duration + interval time.Duration + consistently bool +} + +func newAsyncAssertion(ctx context.Context, args []interface{}, consistently bool) asyncAssertion { + return asyncAssertion{ + ctx: ctx, + args: args, + // PodStart is used as default because waiting for a pod is the + // most common operation. + timeout: TestContext.timeouts.PodStart, + interval: TestContext.timeouts.Poll, + } +} + +func (a asyncAssertion) newAsync() (*FailureError, gomega.AsyncAssertion) { + err, g := newG() + var assertion gomega.AsyncAssertion + if a.consistently { + assertion = g.Consistently(a.ctx, a.args...) + } else { + assertion = g.Eventually(a.ctx, a.args...) + } + assertion = assertion.WithTimeout(a.timeout).WithPolling(a.interval) + return err, assertion +} + +func (a asyncAssertion) Should(matcher types.GomegaMatcher) error { + err, assertion := a.newAsync() + if !assertion.Should(matcher) { + err.backtrace() + return *err + } + return nil +} + +func (a asyncAssertion) ShouldNot(matcher types.GomegaMatcher) error { + err, assertion := a.newAsync() + if !assertion.ShouldNot(matcher) { + err.backtrace() + return *err + } + return nil +} + +func (a asyncAssertion) WithTimeout(timeout time.Duration) AsyncAssertion { + a.timeout = timeout + return a +} + +func (a asyncAssertion) WithPolling(interval time.Duration) AsyncAssertion { + a.interval = interval + return a +} + +// FailureError is an error where the error string is meant to be passed to +// ginkgo.Fail directly, i.e. adding some prefix like "unexpected error" is not +// necessary. It is also not necessary to dump the error struct. +type FailureError struct { + msg string + fullStackTrace string +} + +func (f FailureError) Error() string { + return f.msg +} + +func (f FailureError) Backtrace() string { + return f.fullStackTrace +} + +func (f FailureError) Is(target error) bool { + return target == ErrFailure +} + +func (f *FailureError) backtrace() { + f.fullStackTrace = ginkgotypes.NewCodeLocationWithStackTrace(2).FullStackTrace +} + +// ErrFailure is an empty error that can be wrapped to indicate that an error +// is a FailureError. It can also be used to test for a FailureError:. +// +// return fmt.Errorf("some problem%w", ErrFailure) +// ... +// err := someOperation() +// if errors.Is(err, ErrFailure) { +// ... +// } +var ErrFailure error = FailureError{} + // ExpectEqual expects the specified two are the same, otherwise an exception raises func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) { gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...) @@ -72,7 +341,17 @@ func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) { // failures at the same code line might not be matched in // https://go.k8s.io/triage because the error details are too // different. - Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1)) + // + // Some errors include all relevant information in the Error + // string. For those we can skip the redundant log message. + // For our own failures we only log the additional stack backtrace + // because it is not included in the failure message. + var failure FailureError + if errors.As(err, &failure) && failure.Backtrace() != "" { + Logf("Failed inside E2E framework:\n %s", strings.ReplaceAll(failure.Backtrace(), "\n", "\n ")) + } else if !errors.Is(err, ErrFailure) { + Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1)) + } Fail(prefix+err.Error(), 1+offset) } diff --git a/test/e2e/framework/expect_test.go b/test/e2e/framework/expect_test.go new file mode 100644 index 00000000000..c64980fd128 --- /dev/null +++ b/test/e2e/framework/expect_test.go @@ -0,0 +1,62 @@ +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package framework + +import ( + "errors" + "testing" + + "github.com/onsi/gomega" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +// This test is sensitive to line numbering. +// The following lines can be removed to compensate for import changes. +// +// +// +// +// +// +// +// +// +// +// This must be line #40. + +func TestNewGomega(t *testing.T) { + if err := Gomega().Expect("hello").To(gomega.Equal("hello")); err != nil { + t.Errorf("unexpected failure: %s", err.Error()) + } + err := Gomega().Expect("hello").ToNot(gomega.Equal("hello")) + require.NotNil(t, err) + assert.Equal(t, `Expected + : hello +not to equal + : hello`, err.Error()) + if !errors.Is(err, ErrFailure) { + t.Errorf("expected error that is ErrFailure, got %T: %+v", err, err) + } + var failure FailureError + if !errors.As(err, &failure) { + t.Errorf("expected error that can be copied to FailureError, got %T: %+v", err, err) + } else { + assert.Regexp(t, `^k8s.io/kubernetes/test/e2e/framework.TestNewGomega\(0x[0-9A-Fa-f]*\) + .*/test/e2e/framework/expect_test.go:46`, failure.Backtrace()) + } +} diff --git a/test/e2e/framework/get.go b/test/e2e/framework/get.go index 27574257faa..ffa26d3078d 100644 --- a/test/e2e/framework/get.go +++ b/test/e2e/framework/get.go @@ -34,6 +34,9 @@ type GetFunc[T any] func(ctx context.Context) (T, error) // APIGetFunc is a get functions as used in client-go. type APIGetFunc[T any] func(ctx context.Context, name string, getOptions metav1.GetOptions) (T, error) +// APIListFunc is a list functions as used in client-go. +type APIListFunc[T any] func(ctx context.Context, listOptions metav1.ListOptions) (T, error) + // GetObject takes a get function like clientset.CoreV1().Pods(ns).Get // and the parameters for it and returns a function that executes that get // operation in a [gomega.Eventually] or [gomega.Consistently]. @@ -47,6 +50,17 @@ func GetObject[T any](get APIGetFunc[T], name string, getOptions metav1.GetOptio }) } +// ListObjects takes a list function like clientset.CoreV1().Pods(ns).List +// and the parameters for it and returns a function that executes that list +// operation in a [gomega.Eventually] or [gomega.Consistently]. +// +// Delays and retries are handled by [HandleRetry]. +func ListObjects[T any](list APIListFunc[T], listOptions metav1.ListOptions) GetFunc[T] { + return HandleRetry(func(ctx context.Context) (T, error) { + return list(ctx, listOptions) + }) +} + // HandleRetry wraps an arbitrary get function. When the wrapped function // returns an error, HandleGetError will decide whether the call should be // retried and if requested, will sleep before doing so. diff --git a/test/e2e/framework/ingress/ingress_utils.go b/test/e2e/framework/ingress/ingress_utils.go index 06daf645e9b..908f4cbbabf 100644 --- a/test/e2e/framework/ingress/ingress_utils.go +++ b/test/e2e/framework/ingress/ingress_utils.go @@ -309,7 +309,7 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) { } priv, err := rsa.GenerateKey(rand.Reader, rsaBits) if err != nil { - return nil, nil, fmt.Errorf("Failed to generate key: %v", err) + return nil, nil, fmt.Errorf("Failed to generate key: %w", err) } notBefore := time.Now() notAfter := notBefore.Add(validFor) @@ -318,7 +318,7 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) { serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) if err != nil { - return nil, nil, fmt.Errorf("failed to generate serial number: %s", err) + return nil, nil, fmt.Errorf("failed to generate serial number: %w", err) } template := x509.Certificate{ SerialNumber: serialNumber, @@ -351,13 +351,13 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) { var keyOut, certOut bytes.Buffer derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) if err != nil { - return nil, nil, fmt.Errorf("Failed to create certificate: %s", err) + return nil, nil, fmt.Errorf("Failed to create certificate: %w", err) } if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { - return nil, nil, fmt.Errorf("Failed creating cert: %v", err) + return nil, nil, fmt.Errorf("Failed creating cert: %w", err) } if err := pem.Encode(&keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { - return nil, nil, fmt.Errorf("Failed creating key: %v", err) + return nil, nil, fmt.Errorf("Failed creating key: %w", err) } return certOut.Bytes(), keyOut.Bytes(), nil } @@ -532,11 +532,11 @@ func ingressFromManifest(fileName string) (*networkingv1.Ingress, error) { func ingressToManifest(ing *networkingv1.Ingress, path string) error { serialized, err := marshalToYaml(ing, networkingv1.SchemeGroupVersion) if err != nil { - return fmt.Errorf("failed to marshal ingress %v to YAML: %v", ing, err) + return fmt.Errorf("failed to marshal ingress %v to YAML: %w", ing, err) } if err := os.WriteFile(path, serialized, 0600); err != nil { - return fmt.Errorf("error in writing ingress to file: %s", err) + return fmt.Errorf("error in writing ingress to file: %w", err) } return nil } @@ -1150,17 +1150,17 @@ func (j *TestJig) DeleteTestResource(ctx context.Context, cs clientset.Interface var errs []error if ing != nil { if err := j.runDelete(ctx, ing); err != nil { - errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err)) + errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %w", ing.Namespace, ing.Name, err)) } } if svc != nil { if err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil { - errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err)) + errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %w", svc.Namespace, svc.Name, err)) } } if deploy != nil { if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(ctx, deploy.Name, metav1.DeleteOptions{}); err != nil { - errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", deploy.Namespace, deploy.Name, err)) + errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %w", deploy.Namespace, deploy.Name, err)) } } return errs diff --git a/test/e2e/framework/internal/output/output.go b/test/e2e/framework/internal/output/output.go index cc26c079677..053c36012a7 100644 --- a/test/e2e/framework/internal/output/output.go +++ b/test/e2e/framework/internal/output/output.go @@ -122,6 +122,9 @@ var timePrefix = regexp.MustCompile(`(?m)^[[:alpha:]]{3} +[[:digit:]]{1,2} +[[:d // elapsedSuffix matches "Elapsed: 16.189µs" var elapsedSuffix = regexp.MustCompile(`Elapsed: [[:digit:]]+(\.[[:digit:]]+)?(µs|ns|ms|s|m)`) +// afterSuffix matches "after 5.001s." +var afterSuffix = regexp.MustCompile(`after [[:digit:]]+(\.[[:digit:]]+)?(µs|ns|ms|s|m).`) + // timeSuffix matches "@ 09/06/22 15:36:43.44 (5.001s)" as printed by Ginkgo v2 for log output, with the duration being optional. var timeSuffix = regexp.MustCompile(`(?m)@[[:space:]][[:digit:]]{2}/[[:digit:]]{2}/[[:digit:]]{2} [[:digit:]]{2}:[[:digit:]]{2}:[[:digit:]]{2}(\.[[:digit:]]{1,3})?( \([[:digit:]]+(\.[[:digit:]]+)?(µs|ns|ms|s|m)\))?$`) @@ -129,6 +132,7 @@ func stripTimes(in string) string { out := timePrefix.ReplaceAllString(in, "") out = elapsedSuffix.ReplaceAllString(out, "Elapsed: ") out = timeSuffix.ReplaceAllString(out, "