Merge pull request #113298 from pohly/e2e-wait-for-pods-with-gomega

e2e: wait for pods with gomega
This commit is contained in:
Kubernetes Prow Robot 2023-02-04 05:26:29 -08:00 committed by GitHub
commit 85aa0057c6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
126 changed files with 1426 additions and 1143 deletions

View File

@ -73,7 +73,7 @@ func restartAPIServer(ctx context.Context, node *v1.Node) error {
result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(controlPlaneAddress, e2essh.SSHPort), framework.TestContext.Provider) result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(controlPlaneAddress, e2essh.SSHPort), framework.TestContext.Provider)
if err != nil || result.Code != 0 { if err != nil || result.Code != 0 {
e2essh.LogResult(result) e2essh.LogResult(result)
return fmt.Errorf("couldn't restart kube-apiserver: %v", err) return fmt.Errorf("couldn't restart kube-apiserver: %w", err)
} }
return nil return nil
} }

View File

@ -561,7 +561,7 @@ func setupCRDAndVerifySchemaWithOptions(f *framework.Framework, schema, expect [
}) })
crd, err := crd.CreateMultiVersionTestCRD(f, group, options...) crd, err := crd.CreateMultiVersionTestCRD(f, group, options...)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create CRD: %v", err) return nil, fmt.Errorf("failed to create CRD: %w", err)
} }
for _, v := range crd.Crd.Spec.Versions { for _, v := range crd.Crd.Spec.Versions {
@ -623,7 +623,7 @@ func waitForDefinition(c k8sclientset.Interface, name string, schema []byte) err
return true, "" return true, ""
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to wait for definition %q to be served with the right OpenAPI schema: %v", name, err) return fmt.Errorf("failed to wait for definition %q to be served with the right OpenAPI schema: %w", name, err)
} }
return nil return nil
} }
@ -637,7 +637,7 @@ func waitForDefinitionCleanup(c k8sclientset.Interface, name string) error {
return true, "" return true, ""
}) })
if err != nil { if err != nil {
return fmt.Errorf("failed to wait for definition %q not to be served anymore: %v", name, err) return fmt.Errorf("failed to wait for definition %q not to be served anymore: %w", name, err)
} }
return nil return nil
} }
@ -718,7 +718,7 @@ func dropDefaults(s *spec.Schema) {
func verifyKubectlExplain(ns, name, pattern string) error { func verifyKubectlExplain(ns, name, pattern string) error {
result, err := e2ekubectl.RunKubectl(ns, "explain", name) result, err := e2ekubectl.RunKubectl(ns, "explain", name)
if err != nil { if err != nil {
return fmt.Errorf("failed to explain %s: %v", name, err) return fmt.Errorf("failed to explain %s: %w", name, err)
} }
r := regexp.MustCompile(pattern) r := regexp.MustCompile(pattern)
if !r.Match([]byte(result)) { if !r.Match([]byte(result)) {

View File

@ -181,7 +181,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects
case "Pods": case "Pods":
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}) pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list pods: %v", err) return false, fmt.Errorf("failed to list pods: %w", err)
} }
if len(pods.Items) != num { if len(pods.Items) != num {
ret = false ret = false
@ -190,7 +190,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects
case "Deployments": case "Deployments":
deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(ctx, metav1.ListOptions{}) deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list deployments: %v", err) return false, fmt.Errorf("failed to list deployments: %w", err)
} }
if len(deployments.Items) != num { if len(deployments.Items) != num {
ret = false ret = false
@ -199,7 +199,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects
case "ReplicaSets": case "ReplicaSets":
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(ctx, metav1.ListOptions{}) rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err) return false, fmt.Errorf("failed to list rs: %w", err)
} }
if len(rs.Items) != num { if len(rs.Items) != num {
ret = false ret = false
@ -208,7 +208,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects
case "ReplicationControllers": case "ReplicationControllers":
rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(ctx, metav1.ListOptions{}) rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list replication controllers: %v", err) return false, fmt.Errorf("failed to list replication controllers: %w", err)
} }
if len(rcs.Items) != num { if len(rcs.Items) != num {
ret = false ret = false
@ -217,7 +217,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects
case "CronJobs": case "CronJobs":
cronJobs, err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) cronJobs, err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list cronjobs: %v", err) return false, fmt.Errorf("failed to list cronjobs: %w", err)
} }
if len(cronJobs.Items) != num { if len(cronJobs.Items) != num {
ret = false ret = false
@ -226,7 +226,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects
case "Jobs": case "Jobs":
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list jobs: %v", err) return false, fmt.Errorf("failed to list jobs: %w", err)
} }
if len(jobs.Items) != num { if len(jobs.Items) != num {
ret = false ret = false
@ -325,7 +325,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) { if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
pods, err := podClient.List(ctx, metav1.ListOptions{}) pods, err := podClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list pods: %v", err) return false, fmt.Errorf("failed to list pods: %w", err)
} }
// We intentionally don't wait the number of pods to reach // We intentionally don't wait the number of pods to reach
// rc.Spec.Replicas. We want to see if the garbage collector and the // rc.Spec.Replicas. We want to see if the garbage collector and the
@ -383,7 +383,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{}) rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err) return false, fmt.Errorf("failed to get rc: %w", err)
} }
if rc.Status.Replicas == *rc.Spec.Replicas { if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil return true, nil
@ -410,7 +410,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.PollWithContext(ctx, 5*time.Second, 120*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 120*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
rcs, err := rcClient.List(ctx, metav1.ListOptions{}) rcs, err := rcClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list rcs: %v", err) return false, fmt.Errorf("failed to list rcs: %w", err)
} }
if len(rcs.Items) != 0 { if len(rcs.Items) != 0 {
return false, nil return false, nil
@ -452,7 +452,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{}) rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err) return false, fmt.Errorf("failed to get rc: %w", err)
} }
if rc.Status.Replicas == *rc.Spec.Replicas { if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil return true, nil
@ -505,7 +505,7 @@ var _ = SIGDescribe("Garbage collector", func() {
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) {
rsList, err := rsClient.List(ctx, metav1.ListOptions{}) rsList, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err) return false, fmt.Errorf("failed to list rs: %w", err)
} }
return len(rsList.Items) > 0, nil return len(rsList.Items) > 0, nil
@ -530,7 +530,7 @@ var _ = SIGDescribe("Garbage collector", func() {
errList = append(errList, err) errList = append(errList, err)
remainingRSs, err := rsClient.List(ctx, metav1.ListOptions{}) remainingRSs, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err)) errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %w", err))
} else { } else {
errList = append(errList, fmt.Errorf("remaining rs are: %#v", remainingRSs)) errList = append(errList, fmt.Errorf("remaining rs are: %#v", remainingRSs))
} }
@ -565,7 +565,7 @@ var _ = SIGDescribe("Garbage collector", func() {
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) {
rsList, err := rsClient.List(ctx, metav1.ListOptions{}) rsList, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err) return false, fmt.Errorf("failed to list rs: %w", err)
} }
if len(rsList.Items) > 0 { if len(rsList.Items) > 0 {
replicaset = rsList.Items[0] replicaset = rsList.Items[0]
@ -599,7 +599,7 @@ var _ = SIGDescribe("Garbage collector", func() {
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
dList, err := deployClient.List(ctx, metav1.ListOptions{}) dList, err := deployClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list deployments: %v", err) return false, fmt.Errorf("failed to list deployments: %w", err)
} }
return len(dList.Items) == 0, nil return len(dList.Items) == 0, nil
}) })
@ -616,13 +616,13 @@ var _ = SIGDescribe("Garbage collector", func() {
errList := make([]error, 0) errList := make([]error, 0)
remainingRSs, err := rsClient.List(ctx, metav1.ListOptions{}) remainingRSs, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err)) errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %w", err))
} else { } else {
errList = append(errList, fmt.Errorf("remaining rs post mortem: %#v", remainingRSs)) errList = append(errList, fmt.Errorf("remaining rs post mortem: %#v", remainingRSs))
} }
remainingDSs, err := deployClient.List(ctx, metav1.ListOptions{}) remainingDSs, err := deployClient.List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
errList = append(errList, fmt.Errorf("failed to list Deployments post mortem: %v", err)) errList = append(errList, fmt.Errorf("failed to list Deployments post mortem: %w", err))
} else { } else {
errList = append(errList, fmt.Errorf("remaining deployment's post mortem: %#v", remainingDSs)) errList = append(errList, fmt.Errorf("remaining deployment's post mortem: %#v", remainingDSs))
} }
@ -663,7 +663,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{}) rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err) return false, fmt.Errorf("failed to get rc: %w", err)
} }
if rc.Status.Replicas == *rc.Spec.Replicas { if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil return true, nil
@ -758,7 +758,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
rc1, err := rcClient.Get(ctx, rc1.Name, metav1.GetOptions{}) rc1, err := rcClient.Get(ctx, rc1.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err) return false, fmt.Errorf("failed to get rc: %w", err)
} }
if rc1.Status.Replicas == *rc1.Spec.Replicas { if rc1.Status.Replicas == *rc1.Spec.Replicas {
return true, nil return true, nil
@ -889,7 +889,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.PollWithContext(ctx, 5*time.Second, 90*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) { if err := wait.PollWithContext(ctx, 5*time.Second, 90*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
pods, err2 = podClient.List(ctx, metav1.ListOptions{}) pods, err2 = podClient.List(ctx, metav1.ListOptions{})
if err2 != nil { if err2 != nil {
return false, fmt.Errorf("failed to list pods: %v", err) return false, fmt.Errorf("failed to list pods: %w", err)
} }
if len(pods.Items) == 0 { if len(pods.Items) == 0 {
return true, nil return true, nil
@ -1125,7 +1125,7 @@ var _ = SIGDescribe("Garbage collector", func() {
return false, nil return false, nil
} }
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {
return false, fmt.Errorf("failed to get owner: %v", err) return false, fmt.Errorf("failed to get owner: %w", err)
} }
return true, nil return true, nil
}); err != nil { }); err != nil {
@ -1153,7 +1153,7 @@ var _ = SIGDescribe("Garbage collector", func() {
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 2*time.Minute, func(ctx context.Context) (bool, error) { err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 2*time.Minute, func(ctx context.Context) (bool, error) {
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{}) jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list jobs: %v", err) return false, fmt.Errorf("failed to list jobs: %w", err)
} }
return len(jobs.Items) > 0, nil return len(jobs.Items) > 0, nil
}) })

View File

@ -1250,7 +1250,7 @@ func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func(ctx con
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return true, nil return true, nil
} }
return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err) return false, fmt.Errorf("failed to get failed daemon pod %q: %w", pod.Name, err)
} }
return false, nil return false, nil
} }

View File

@ -27,7 +27,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
utilrand "k8s.io/apimachinery/pkg/util/rand" utilrand "k8s.io/apimachinery/pkg/util/rand"
@ -513,9 +512,9 @@ func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework
if err != nil { if err != nil {
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
if getErr == nil { if getErr == nil {
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %w", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else { } else {
err = fmt.Errorf("pod %q never run: %v", pod.Name, err) err = fmt.Errorf("pod %q never run: %w", pod.Name, err)
} }
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -528,13 +527,7 @@ func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework
// Verify that something is listening. // Verify that something is listening.
framework.Logf("Trying to dial the pod") framework.Logf("Trying to dial the pod")
retryTimeout := 2 * time.Minute framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, true, 2*time.Minute, pods))
retryInterval := 5 * time.Second
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err = wait.PollWithContext(ctx, retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil {
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
}
} }
// 1. Create a quota restricting pods in the current namespace to 2. // 1. Create a quota restricting pods in the current namespace to 2.

View File

@ -210,9 +210,9 @@ func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework,
if err != nil { if err != nil {
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
if getErr == nil { if getErr == nil {
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err) err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %w", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else { } else {
err = fmt.Errorf("pod %q never run: %v", pod.Name, err) err = fmt.Errorf("pod %q never run: %w", pod.Name, err)
} }
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -225,13 +225,7 @@ func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework,
// Verify that something is listening. // Verify that something is listening.
framework.Logf("Trying to dial the pod") framework.Logf("Trying to dial the pod")
retryTimeout := 2 * time.Minute framework.ExpectNoError(e2epod.WaitForPodsResponding(ctx, f.ClientSet, f.Namespace.Name, name, true, 2*time.Minute, pods))
retryInterval := 5 * time.Second
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err = wait.PollWithContext(ctx, retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil {
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
}
} }
// 1. Create a quota restricting pods in the current namespace to 2. // 1. Create a quota restricting pods in the current namespace to 2.

View File

@ -493,9 +493,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
framework.Logf("created pod") framework.Logf("created pod")
if !e2epod.CheckPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, []string{pod.Name}, time.Minute) { framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name, time.Minute))
framework.Failf("pod %q in ns %q never became ready", pod.Name, f.Namespace.Name)
}
framework.Logf("pod is ready") framework.Logf("pod is ready")
@ -509,7 +507,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
} }
tokenCount, err := ParseInClusterClientLogs(logs) tokenCount, err := ParseInClusterClientLogs(logs)
if err != nil { if err != nil {
return false, fmt.Errorf("inclusterclient reported an error: %v", err) return false, fmt.Errorf("inclusterclient reported an error: %w", err)
} }
if tokenCount < 2 { if tokenCount < 2 {
framework.Logf("Retrying. Still waiting to see more unique tokens: got=%d, want=2", tokenCount) framework.Logf("Retrying. Still waiting to see more unique tokens: got=%d, want=2", tokenCount)

View File

@ -1166,7 +1166,7 @@ func enableAutoscaler(nodePool string, minCount, maxCount int) error {
if err != nil { if err != nil {
klog.Errorf("Failed config update result: %s", output) klog.Errorf("Failed config update result: %s", output)
return fmt.Errorf("Failed to enable autoscaling: %v", err) return fmt.Errorf("Failed to enable autoscaling: %w", err)
} }
klog.Infof("Config update result: %s", output) klog.Infof("Config update result: %s", output)
@ -1190,7 +1190,7 @@ func disableAutoscaler(nodePool string, minCount, maxCount int) error {
if err != nil { if err != nil {
klog.Errorf("Failed config update result: %s", output) klog.Errorf("Failed config update result: %s", output)
return fmt.Errorf("Failed to disable autoscaling: %v", err) return fmt.Errorf("Failed to disable autoscaling: %w", err)
} }
klog.Infof("Config update result: %s", output) klog.Infof("Config update result: %s", output)
@ -1384,7 +1384,7 @@ func waitForCaPodsReadyInNamespace(ctx context.Context, f *framework.Framework,
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)) && ctx.Err() == nil; time.Sleep(20 * time.Second) { for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)) && ctx.Err() == nil; time.Sleep(20 * time.Second) {
pods, err := c.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{}) pods, err := c.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return fmt.Errorf("failed to get pods: %v", err) return fmt.Errorf("failed to get pods: %w", err)
} }
notready = make([]string, 0) notready = make([]string, 0)
for _, pod := range pods.Items { for _, pod := range pods.Items {

View File

@ -355,7 +355,7 @@ func waitForDNSReplicasSatisfied(ctx context.Context, c clientset.Interface, get
} }
if err = wait.Poll(2*time.Second, timeout, condition); err != nil { if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err) return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %w", expected, current, err)
} }
framework.Logf("kube-dns reaches expected replicas: %v", expected) framework.Logf("kube-dns reaches expected replicas: %v", expected)
return nil return nil
@ -372,7 +372,7 @@ func waitForDNSConfigMapCreated(ctx context.Context, c clientset.Interface, time
} }
if err = wait.Poll(time.Second, timeout, condition); err != nil { if err = wait.Poll(time.Second, timeout, condition); err != nil {
return nil, fmt.Errorf("err waiting for DNS autoscaling ConfigMap got re-created: %v", err) return nil, fmt.Errorf("err waiting for DNS autoscaling ConfigMap got re-created: %w", err)
} }
return configMap, nil return configMap, nil
} }

View File

@ -375,7 +375,7 @@ func waitForReplicationController(ctx context.Context, c clientset.Interface, na
}) })
if err != nil { if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"} stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err) return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %w", namespace, name, stateMsg[exist], err)
} }
return nil return nil
} }
@ -402,7 +402,7 @@ func waitForServiceWithSelector(ctx context.Context, c clientset.Interface, name
}) })
if err != nil { if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"} stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err) return fmt.Errorf("error waiting for service with %s in namespace %s %s: %w", selector.String(), namespace, stateMsg[exist], err)
} }
return nil return nil
} }
@ -426,7 +426,7 @@ func waitForReplicationControllerWithSelector(ctx context.Context, c clientset.I
}) })
if err != nil { if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"} stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for ReplicationControllers with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err) return fmt.Errorf("error waiting for ReplicationControllers with %s in namespace %s %s: %w", selector.String(), namespace, stateMsg[exist], err)
} }
return nil return nil
} }
@ -437,7 +437,7 @@ func getMasterSSHClient() (*ssh.Client, error) {
// Get a signer for the provider. // Get a signer for the provider.
signer, err := e2essh.GetSigner(framework.TestContext.Provider) signer, err := e2essh.GetSigner(framework.TestContext.Provider)
if err != nil { if err != nil {
return nil, fmt.Errorf("error getting signer for provider %s: '%v'", framework.TestContext.Provider, err) return nil, fmt.Errorf("error getting signer for provider %s: %w", framework.TestContext.Provider, err)
} }
sshUser := os.Getenv("KUBE_SSH_USER") sshUser := os.Getenv("KUBE_SSH_USER")
@ -453,7 +453,7 @@ func getMasterSSHClient() (*ssh.Client, error) {
host := framework.APIAddress() + ":22" host := framework.APIAddress() + ":22"
client, err := ssh.Dial("tcp", host, config) client, err := ssh.Dial("tcp", host, config)
if err != nil { if err != nil {
return nil, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err) return nil, fmt.Errorf("error getting SSH client to host %s: %w", host, err)
} }
return client, err return client, err
} }
@ -468,7 +468,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr()) framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
session, err := client.NewSession() session, err := client.NewSession()
if err != nil { if err != nil {
return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err) return "", "", 0, fmt.Errorf("error creating session to host %s: %w", client.RemoteAddr(), err)
} }
defer session.Close() defer session.Close()
@ -490,7 +490,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
} else { } else {
// Some other kind of error happened (e.g. an IOError); consider the // Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful. // SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s: '%v'", cmd, client.RemoteAddr(), err) err = fmt.Errorf("failed running `%s` on %s: %w", cmd, client.RemoteAddr(), err)
} }
} }
return bout.String(), berr.String(), code, err return bout.String(), berr.String(), code, err
@ -500,7 +500,7 @@ func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.
framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr())) framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
session, err := sshClient.NewSession() session, err := sshClient.NewSession()
if err != nil { if err != nil {
return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err) return fmt.Errorf("error creating session to host %s: %w", sshClient.RemoteAddr(), err)
} }
defer session.Close() defer session.Close()

View File

@ -82,7 +82,7 @@ func realVersion(s string) (string, error) {
framework.Logf("Getting real version for %q", s) framework.Logf("Getting real version for %q", s)
v, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/get-build.sh"), "-v", s) v, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/get-build.sh"), "-v", s)
if err != nil { if err != nil {
return v, fmt.Errorf("error getting real version for %q: %v", s, err) return v, fmt.Errorf("error getting real version for %q: %w", s, err)
} }
framework.Logf("Version for %q is %q", s, v) framework.Logf("Version for %q is %q", s, v)
return strings.TrimPrefix(strings.TrimSpace(v), "v"), nil return strings.TrimPrefix(strings.TrimSpace(v), "v"), nil

View File

@ -131,7 +131,7 @@ func checkControlPlaneVersion(ctx context.Context, c clientset.Interface, want s
return true, nil return true, nil
}) })
if waitErr != nil { if waitErr != nil {
return fmt.Errorf("CheckControlPlane() couldn't get the control plane version: %v", err) return fmt.Errorf("CheckControlPlane() couldn't get the control plane version: %w", err)
} }
// We do prefix trimming and then matching because: // We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4 // want looks like: 0.19.3-815-g50e67d4

View File

@ -97,7 +97,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until // Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health. // the cluster is restored to health.
ginkgo.By("waiting for system pods to successfully restart") ginkgo.By("waiting for system pods to successfully restart")
err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })

View File

@ -99,7 +99,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until // Many e2e tests assume that the cluster is fully healthy before they start. Wait until
// the cluster is restored to health. // the cluster is restored to health.
ginkgo.By("waiting for system pods to successfully restart") ginkgo.By("waiting for system pods to successfully restart")
err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{}) err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
}) })

View File

@ -608,7 +608,7 @@ done
}) })
// verify pods are running and ready // verify pods are running and ready
err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{}) err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Shutdown pod. Readiness should change to false // Shutdown pod. Readiness should change to false
@ -690,7 +690,7 @@ done
}) })
// verify pods are running and ready // verify pods are running and ready
err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart, map[string]string{}) err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 1, 0, f.Timeouts.PodStart)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Shutdown pod. Readiness should change to false // Shutdown pod. Readiness should change to false

View File

@ -39,15 +39,15 @@ import (
func getPatchBytes(oldLease, newLease *coordinationv1.Lease) ([]byte, error) { func getPatchBytes(oldLease, newLease *coordinationv1.Lease) ([]byte, error) {
oldData, err := json.Marshal(oldLease) oldData, err := json.Marshal(oldLease)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to Marshal oldData: %v", err) return nil, fmt.Errorf("failed to Marshal oldData: %w", err)
} }
newData, err := json.Marshal(newLease) newData, err := json.Marshal(newLease)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to Marshal newData: %v", err) return nil, fmt.Errorf("failed to Marshal newData: %w", err)
} }
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, coordinationv1.Lease{}) patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, coordinationv1.Lease{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to CreateTwoWayMergePatch: %v", err) return nil, fmt.Errorf("failed to CreateTwoWayMergePatch: %w", err)
} }
return patchBytes, nil return patchBytes, nil
} }

View File

@ -873,7 +873,7 @@ var _ = SIGDescribe("Pods", func() {
// wait as required for all 3 pods to be running // wait as required for all 3 pods to be running
ginkgo.By("waiting for all 3 pods to be running") ginkgo.By("waiting for all 3 pods to be running")
err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart, nil) err := e2epod.WaitForPodsRunningReady(ctx, f.ClientSet, f.Namespace.Name, 3, 0, f.Timeouts.PodStart)
framework.ExpectNoError(err, "3 pods not found running.") framework.ExpectNoError(err, "3 pods not found running.")
// delete Collection of pods with a label in the current namespace // delete Collection of pods with a label in the current namespace

View File

@ -310,7 +310,7 @@ while true; do sleep 1; done
checkContainerStatus := func(ctx context.Context) error { checkContainerStatus := func(ctx context.Context) error {
status, err := container.GetStatus(ctx) status, err := container.GetStatus(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to get container status: %v", err) return fmt.Errorf("failed to get container status: %w", err)
} }
// We need to check container state first. The default pod status is pending, If we check pod phase first, // We need to check container state first. The default pod status is pending, If we check pod phase first,
// and the expected pod phase is Pending, the container status may not even show up when we check it. // and the expected pod phase is Pending, the container status may not even show up when we check it.
@ -335,7 +335,7 @@ while true; do sleep 1; done
// Check pod phase // Check pod phase
phase, err := container.GetPhase(ctx) phase, err := container.GetPhase(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to get pod phase: %v", err) return fmt.Errorf("failed to get pod phase: %w", err)
} }
if phase != expectedPhase { if phase != expectedPhase {
return fmt.Errorf("expected pod phase: %q, got: %q", expectedPhase, phase) return fmt.Errorf("expected pod phase: %q, got: %q", expectedPhase, phase)

View File

@ -195,11 +195,11 @@ func RestartNodes(c clientset.Interface, nodes []v1.Node) error {
if err := wait.Poll(30*time.Second, framework.RestartNodeReadyAgainTimeout, func() (bool, error) { if err := wait.Poll(30*time.Second, framework.RestartNodeReadyAgainTimeout, func() (bool, error) {
newNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) newNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("error getting node info after reboot: %s", err) return false, fmt.Errorf("error getting node info after reboot: %w", err)
} }
return node.Status.NodeInfo.BootID != newNode.Status.NodeInfo.BootID, nil return node.Status.NodeInfo.BootID != newNode.Status.NodeInfo.BootID, nil
}); err != nil { }); err != nil {
return fmt.Errorf("error waiting for node %s boot ID to change: %s", node.Name, err) return fmt.Errorf("error waiting for node %s boot ID to change: %w", node.Name, err)
} }
} }
return nil return nil

View File

@ -110,7 +110,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
gomega.Consistently(func() error { gomega.Consistently(func() error {
testPod, err := b.f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{}) testPod, err := b.f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("expected the test pod %s to exist: %v", pod.Name, err) return fmt.Errorf("expected the test pod %s to exist: %w", pod.Name, err)
} }
if testPod.Status.Phase != v1.PodPending { if testPod.Status.Phase != v1.PodPending {
return fmt.Errorf("pod %s: unexpected status %s, expected status: %s", pod.Name, testPod.Status.Phase, v1.PodPending) return fmt.Errorf("pod %s: unexpected status %s, expected status: %s", pod.Name, testPod.Status.Phase, v1.PodPending)

View File

@ -148,7 +148,7 @@ func (c *ExampleController) GetClaimParameters(ctx context.Context, claim *resou
func (c *ExampleController) readParametersFromConfigMap(ctx context.Context, namespace, name string) (map[string]string, error) { func (c *ExampleController) readParametersFromConfigMap(ctx context.Context, namespace, name string) (map[string]string, error) {
configMap, err := c.clientset.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{}) configMap, err := c.clientset.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("get config map: %v", err) return nil, fmt.Errorf("get config map: %w", err)
} }
return configMap.Data, nil return configMap.Data, nil
} }
@ -221,7 +221,7 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha
toEnvVars("admin", classParameters, p.EnvVars) toEnvVars("admin", classParameters, p.EnvVars)
data, err := json.Marshal(p) data, err := json.Marshal(p)
if err != nil { if err != nil {
return nil, fmt.Errorf("encode parameters: %v", err) return nil, fmt.Errorf("encode parameters: %w", err)
} }
allocation.ResourceHandle = string(data) allocation.ResourceHandle = string(data)
var nodes []string var nodes []string

View File

@ -97,7 +97,7 @@ func StartPlugin(logger klog.Logger, cdiDir, driverName string, nodeName string,
) )
d, err := kubeletplugin.Start(ex, opts...) d, err := kubeletplugin.Start(ex, opts...)
if err != nil { if err != nil {
return nil, fmt.Errorf("start kubelet plugin: %v", err) return nil, fmt.Errorf("start kubelet plugin: %w", err)
} }
ex.d = d ex.d = d
@ -127,7 +127,7 @@ func (ex *ExamplePlugin) NodePrepareResource(ctx context.Context, req *drapbv1.N
// Determine environment variables. // Determine environment variables.
var p parameters var p parameters
if err := json.Unmarshal([]byte(req.ResourceHandle), &p); err != nil { if err := json.Unmarshal([]byte(req.ResourceHandle), &p); err != nil {
return nil, fmt.Errorf("unmarshal resource handle: %v", err) return nil, fmt.Errorf("unmarshal resource handle: %w", err)
} }
// Sanity check scheduling. // Sanity check scheduling.
@ -161,7 +161,7 @@ func (ex *ExamplePlugin) NodePrepareResource(ctx context.Context, req *drapbv1.N
filePath := ex.getJSONFilePath(req.ClaimUid) filePath := ex.getJSONFilePath(req.ClaimUid)
buffer, err := json.Marshal(spec) buffer, err := json.Marshal(spec)
if err != nil { if err != nil {
return nil, fmt.Errorf("marshal spec: %v", err) return nil, fmt.Errorf("marshal spec: %w", err)
} }
if err := ex.fileOps.Create(filePath, buffer); err != nil { if err := ex.fileOps.Create(filePath, buffer); err != nil {
return nil, fmt.Errorf("failed to write CDI file %v", err) return nil, fmt.Errorf("failed to write CDI file %v", err)
@ -186,7 +186,7 @@ func (ex *ExamplePlugin) NodeUnprepareResource(ctx context.Context, req *drapbv1
filePath := ex.getJSONFilePath(req.ClaimUid) filePath := ex.getJSONFilePath(req.ClaimUid)
if err := ex.fileOps.Remove(filePath); err != nil { if err := ex.fileOps.Remove(filePath); err != nil {
return nil, fmt.Errorf("error removing CDI file: %v", err) return nil, fmt.Errorf("error removing CDI file: %w", err)
} }
logger.V(3).Info("CDI file removed", "path", filePath) logger.V(3).Info("CDI file removed", "path", filePath)

View File

@ -115,12 +115,12 @@ func NewCommand() *cobra.Command {
if *kubeconfig == "" { if *kubeconfig == "" {
config, err = rest.InClusterConfig() config, err = rest.InClusterConfig()
if err != nil { if err != nil {
return fmt.Errorf("create in-cluster client configuration: %v", err) return fmt.Errorf("create in-cluster client configuration: %w", err)
} }
} else { } else {
config, err = clientcmd.BuildConfigFromFlags("", *kubeconfig) config, err = clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil { if err != nil {
return fmt.Errorf("create out-of-cluster client configuration: %v", err) return fmt.Errorf("create out-of-cluster client configuration: %w", err)
} }
} }
config.QPS = *kubeAPIQPS config.QPS = *kubeAPIQPS
@ -128,7 +128,7 @@ func NewCommand() *cobra.Command {
clientset, err = kubernetes.NewForConfig(config) clientset, err = kubernetes.NewForConfig(config)
if err != nil { if err != nil {
return fmt.Errorf("create client: %v", err) return fmt.Errorf("create client: %w", err)
} }
if *httpEndpoint != "" { if *httpEndpoint != "" {
@ -158,7 +158,7 @@ func NewCommand() *cobra.Command {
listener, err := net.Listen("tcp", *httpEndpoint) listener, err := net.Listen("tcp", *httpEndpoint)
if err != nil { if err != nil {
return fmt.Errorf("listen on HTTP endpoint: %v", err) return fmt.Errorf("listen on HTTP endpoint: %w", err)
} }
go func() { go func() {
@ -203,12 +203,12 @@ func NewCommand() *cobra.Command {
if *resourceConfig != "" { if *resourceConfig != "" {
file, err := os.Open(*resourceConfig) file, err := os.Open(*resourceConfig)
if err != nil { if err != nil {
return fmt.Errorf("open resource config: %v", err) return fmt.Errorf("open resource config: %w", err)
} }
decoder := json.NewDecoder(file) decoder := json.NewDecoder(file)
decoder.DisallowUnknownFields() decoder.DisallowUnknownFields()
if err := decoder.Decode(&resources); err != nil { if err := decoder.Decode(&resources); err != nil {
return fmt.Errorf("parse resource config %q: %v", *resourceConfig, err) return fmt.Errorf("parse resource config %q: %w", *resourceConfig, err)
} }
} }
@ -230,7 +230,7 @@ func NewCommand() *cobra.Command {
// exceeds the QPS+burst limits. // exceeds the QPS+burst limits.
leClientset, err := kubernetes.NewForConfig(config) leClientset, err := kubernetes.NewForConfig(config)
if err != nil { if err != nil {
return fmt.Errorf("create leaderelection client: %v", err) return fmt.Errorf("create leaderelection client: %w", err)
} }
le := leaderelection.New(leClientset, lockName, le := leaderelection.New(leClientset, lockName,
@ -246,7 +246,7 @@ func NewCommand() *cobra.Command {
le.PrepareHealthCheck(mux) le.PrepareHealthCheck(mux)
} }
if err := le.Run(); err != nil { if err := le.Run(); err != nil {
return fmt.Errorf("leader election failed: %v", err) return fmt.Errorf("leader election failed: %w", err)
} }
return nil return nil
@ -275,10 +275,10 @@ func NewCommand() *cobra.Command {
// to know early if there is a setup problem that would prevent // to know early if there is a setup problem that would prevent
// creating those directories. // creating those directories.
if err := os.MkdirAll(*cdiDir, os.FileMode(0750)); err != nil { if err := os.MkdirAll(*cdiDir, os.FileMode(0750)); err != nil {
return fmt.Errorf("create CDI directory: %v", err) return fmt.Errorf("create CDI directory: %w", err)
} }
if err := os.MkdirAll(filepath.Dir(*endpoint), 0750); err != nil { if err := os.MkdirAll(filepath.Dir(*endpoint), 0750); err != nil {
return fmt.Errorf("create socket directory: %v", err) return fmt.Errorf("create socket directory: %w", err)
} }
plugin, err := StartPlugin(logger, *cdiDir, *driverName, "", FileOperations{}, plugin, err := StartPlugin(logger, *cdiDir, *driverName, "", FileOperations{},
@ -287,7 +287,7 @@ func NewCommand() *cobra.Command {
kubeletplugin.KubeletPluginSocketPath(*draAddress), kubeletplugin.KubeletPluginSocketPath(*draAddress),
) )
if err != nil { if err != nil {
return fmt.Errorf("start example plugin: %v", err) return fmt.Errorf("start example plugin: %w", err)
} }
// Handle graceful shutdown. We need to delete Unix domain // Handle graceful shutdown. We need to delete Unix domain

View File

@ -245,7 +245,7 @@ func setupSuite(ctx context.Context) {
// #41007. To avoid those pods preventing the whole test runs (and just // #41007. To avoid those pods preventing the whole test runs (and just
// wasting the whole run), we allow for some not-ready pods (with the // wasting the whole run), we allow for some not-ready pods (with the
// number equal to the number of allowed not-ready nodes). // number equal to the number of allowed not-ready nodes).
if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup, map[string]string{}); err != nil { if err := e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup); err != nil {
e2edebug.DumpAllNamespaceInfo(ctx, c, metav1.NamespaceSystem) e2edebug.DumpAllNamespaceInfo(ctx, c, metav1.NamespaceSystem)
e2ekubectl.LogFailedContainers(ctx, c, metav1.NamespaceSystem, framework.Logf) e2ekubectl.LogFailedContainers(ctx, c, metav1.NamespaceSystem, framework.Logf)
framework.Failf("Error waiting for all pods to be running and ready: %v", err) framework.Failf("Error waiting for all pods to be running and ready: %v", err)

View File

@ -1001,7 +1001,7 @@ func CreateCustomSubresourceInstance(ctx context.Context, namespace, name string
} }
createdObjectMeta, err := meta.Accessor(instance) createdObjectMeta, err := meta.Accessor(instance)
if err != nil { if err != nil {
return nil, fmt.Errorf("Error while creating object meta: %v", err) return nil, fmt.Errorf("Error while creating object meta: %w", err)
} }
if len(createdObjectMeta.GetUID()) == 0 { if len(createdObjectMeta.GetUID()) == 0 {
return nil, fmt.Errorf("Missing UUID: %v", instance) return nil, fmt.Errorf("Missing UUID: %v", instance)

View File

@ -75,12 +75,12 @@ func CreateDeployment(ctx context.Context, client clientset.Interface, replicas
deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command) deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{}) deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err) return nil, fmt.Errorf("deployment %q Create API error: %w", deploymentSpec.Name, err)
} }
framework.Logf("Waiting deployment %q to complete", deploymentSpec.Name) framework.Logf("Waiting deployment %q to complete", deploymentSpec.Name)
err = WaitForDeploymentComplete(client, deployment) err = WaitForDeploymentComplete(client, deployment)
if err != nil { if err != nil {
return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err) return nil, fmt.Errorf("deployment %q failed to complete: %w", deploymentSpec.Name, err)
} }
return deployment, nil return deployment, nil
} }

View File

@ -42,7 +42,7 @@ func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string)
return func(ctx context.Context) (bool, error) { return func(ctx context.Context) (bool, error) {
events, err := c.CoreV1().Events(namespace).List(ctx, options) events, err := c.CoreV1().Events(namespace).List(ctx, options)
if err != nil { if err != nil {
return false, fmt.Errorf("got error while getting events: %v", err) return false, fmt.Errorf("got error while getting events: %w", err)
} }
for _, event := range events.Items { for _, event := range events.Items {
if strings.Contains(event.Message, msg) { if strings.Contains(event.Message, msg) {

View File

@ -17,12 +17,281 @@ limitations under the License.
package framework package framework
import ( import (
"context"
"errors"
"fmt" "fmt"
"strings"
"time"
ginkgotypes "github.com/onsi/ginkgo/v2/types"
"github.com/onsi/gomega" "github.com/onsi/gomega"
"github.com/onsi/gomega/format" "github.com/onsi/gomega/format"
"github.com/onsi/gomega/types"
) )
// MakeMatcher builds a gomega.Matcher based on a single callback function.
// That function is passed the actual value that is to be checked.
// There are three possible outcomes of the check:
// - An error is returned, which then is converted into a failure
// by Gomega.
// - A non-nil failure function is returned, which then is called
// by Gomega once a failure string is needed. This is useful
// to avoid unnecessarily preparing a failure string for intermediate
// failures in Eventually or Consistently.
// - Both function and error are nil, which means that the check
// succeeded.
func MakeMatcher[T interface{}](match func(actual T) (failure func() string, err error)) types.GomegaMatcher {
return &matcher[T]{
match: match,
}
}
type matcher[T interface{}] struct {
match func(actual T) (func() string, error)
failure func() string
}
func (m *matcher[T]) Match(actual interface{}) (success bool, err error) {
if actual, ok := actual.(T); ok {
failure, err := m.match(actual)
if err != nil {
return false, err
}
m.failure = failure
if failure != nil {
return false, nil
}
return true, nil
}
var empty T
return false, gomega.StopTrying(fmt.Sprintf("internal error: expected %T, got:\n%s", empty, format.Object(actual, 1)))
}
func (m *matcher[T]) FailureMessage(actual interface{}) string {
return m.failure()
}
func (m matcher[T]) NegatedFailureMessage(actual interface{}) string {
return m.failure()
}
var _ types.GomegaMatcher = &matcher[string]{}
// Gomega returns an interface that can be used like gomega to express
// assertions. The difference is that failed assertions are returned as an
// error:
//
// if err := Gomega().Expect(pod.Status.Phase).To(gomega.BeEqual(v1.Running)); err != nil {
// return fmt.Errorf("test pod not running: %w", err)
// }
//
// This error can get wrapped to provide additional context for the
// failure. The test then should use ExpectNoError to turn a non-nil error into
// a failure.
//
// When using this approach, there is no need for call offsets and extra
// descriptions for the Expect call because the call stack will be dumped when
// ExpectNoError is called and the additional description(s) can be added by
// wrapping the error.
//
// Asynchronous assertions use the framework's Poll interval and PodStart timeout
// by default.
func Gomega() GomegaInstance {
return gomegaInstance{}
}
type GomegaInstance interface {
Expect(actual interface{}) Assertion
Eventually(ctx context.Context, args ...interface{}) AsyncAssertion
Consistently(ctx context.Context, args ...interface{}) AsyncAssertion
}
type Assertion interface {
Should(matcher types.GomegaMatcher) error
ShouldNot(matcher types.GomegaMatcher) error
To(matcher types.GomegaMatcher) error
ToNot(matcher types.GomegaMatcher) error
NotTo(matcher types.GomegaMatcher) error
}
type AsyncAssertion interface {
Should(matcher types.GomegaMatcher) error
ShouldNot(matcher types.GomegaMatcher) error
WithTimeout(interval time.Duration) AsyncAssertion
WithPolling(interval time.Duration) AsyncAssertion
}
type gomegaInstance struct{}
var _ GomegaInstance = gomegaInstance{}
func (g gomegaInstance) Expect(actual interface{}) Assertion {
return assertion{actual: actual}
}
func (g gomegaInstance) Eventually(ctx context.Context, args ...interface{}) AsyncAssertion {
return newAsyncAssertion(ctx, args, false)
}
func (g gomegaInstance) Consistently(ctx context.Context, args ...interface{}) AsyncAssertion {
return newAsyncAssertion(ctx, args, true)
}
func newG() (*FailureError, gomega.Gomega) {
var failure FailureError
g := gomega.NewGomega(func(msg string, callerSkip ...int) {
failure = FailureError{
msg: msg,
}
})
return &failure, g
}
type assertion struct {
actual interface{}
}
func (a assertion) Should(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).Should(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a assertion) ShouldNot(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).ShouldNot(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a assertion) To(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).To(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a assertion) ToNot(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).ToNot(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a assertion) NotTo(matcher types.GomegaMatcher) error {
err, g := newG()
if !g.Expect(a.actual).NotTo(matcher) {
err.backtrace()
return *err
}
return nil
}
type asyncAssertion struct {
ctx context.Context
args []interface{}
timeout time.Duration
interval time.Duration
consistently bool
}
func newAsyncAssertion(ctx context.Context, args []interface{}, consistently bool) asyncAssertion {
return asyncAssertion{
ctx: ctx,
args: args,
// PodStart is used as default because waiting for a pod is the
// most common operation.
timeout: TestContext.timeouts.PodStart,
interval: TestContext.timeouts.Poll,
}
}
func (a asyncAssertion) newAsync() (*FailureError, gomega.AsyncAssertion) {
err, g := newG()
var assertion gomega.AsyncAssertion
if a.consistently {
assertion = g.Consistently(a.ctx, a.args...)
} else {
assertion = g.Eventually(a.ctx, a.args...)
}
assertion = assertion.WithTimeout(a.timeout).WithPolling(a.interval)
return err, assertion
}
func (a asyncAssertion) Should(matcher types.GomegaMatcher) error {
err, assertion := a.newAsync()
if !assertion.Should(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a asyncAssertion) ShouldNot(matcher types.GomegaMatcher) error {
err, assertion := a.newAsync()
if !assertion.ShouldNot(matcher) {
err.backtrace()
return *err
}
return nil
}
func (a asyncAssertion) WithTimeout(timeout time.Duration) AsyncAssertion {
a.timeout = timeout
return a
}
func (a asyncAssertion) WithPolling(interval time.Duration) AsyncAssertion {
a.interval = interval
return a
}
// FailureError is an error where the error string is meant to be passed to
// ginkgo.Fail directly, i.e. adding some prefix like "unexpected error" is not
// necessary. It is also not necessary to dump the error struct.
type FailureError struct {
msg string
fullStackTrace string
}
func (f FailureError) Error() string {
return f.msg
}
func (f FailureError) Backtrace() string {
return f.fullStackTrace
}
func (f FailureError) Is(target error) bool {
return target == ErrFailure
}
func (f *FailureError) backtrace() {
f.fullStackTrace = ginkgotypes.NewCodeLocationWithStackTrace(2).FullStackTrace
}
// ErrFailure is an empty error that can be wrapped to indicate that an error
// is a FailureError. It can also be used to test for a FailureError:.
//
// return fmt.Errorf("some problem%w", ErrFailure)
// ...
// err := someOperation()
// if errors.Is(err, ErrFailure) {
// ...
// }
var ErrFailure error = FailureError{}
// ExpectEqual expects the specified two are the same, otherwise an exception raises // ExpectEqual expects the specified two are the same, otherwise an exception raises
func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) { func ExpectEqual(actual interface{}, extra interface{}, explain ...interface{}) {
gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...) gomega.ExpectWithOffset(1, actual).To(gomega.Equal(extra), explain...)
@ -72,7 +341,17 @@ func ExpectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
// failures at the same code line might not be matched in // failures at the same code line might not be matched in
// https://go.k8s.io/triage because the error details are too // https://go.k8s.io/triage because the error details are too
// different. // different.
Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1)) //
// Some errors include all relevant information in the Error
// string. For those we can skip the redundant log message.
// For our own failures we only log the additional stack backtrace
// because it is not included in the failure message.
var failure FailureError
if errors.As(err, &failure) && failure.Backtrace() != "" {
Logf("Failed inside E2E framework:\n %s", strings.ReplaceAll(failure.Backtrace(), "\n", "\n "))
} else if !errors.Is(err, ErrFailure) {
Logf("Unexpected error: %s\n%s", prefix, format.Object(err, 1))
}
Fail(prefix+err.Error(), 1+offset) Fail(prefix+err.Error(), 1+offset)
} }

View File

@ -0,0 +1,62 @@
/*
Copyright 2023 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"errors"
"testing"
"github.com/onsi/gomega"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// This test is sensitive to line numbering.
// The following lines can be removed to compensate for import changes.
//
//
//
//
//
//
//
//
//
//
// This must be line #40.
func TestNewGomega(t *testing.T) {
if err := Gomega().Expect("hello").To(gomega.Equal("hello")); err != nil {
t.Errorf("unexpected failure: %s", err.Error())
}
err := Gomega().Expect("hello").ToNot(gomega.Equal("hello"))
require.NotNil(t, err)
assert.Equal(t, `Expected
<string>: hello
not to equal
<string>: hello`, err.Error())
if !errors.Is(err, ErrFailure) {
t.Errorf("expected error that is ErrFailure, got %T: %+v", err, err)
}
var failure FailureError
if !errors.As(err, &failure) {
t.Errorf("expected error that can be copied to FailureError, got %T: %+v", err, err)
} else {
assert.Regexp(t, `^k8s.io/kubernetes/test/e2e/framework.TestNewGomega\(0x[0-9A-Fa-f]*\)
.*/test/e2e/framework/expect_test.go:46`, failure.Backtrace())
}
}

View File

@ -34,6 +34,9 @@ type GetFunc[T any] func(ctx context.Context) (T, error)
// APIGetFunc is a get functions as used in client-go. // APIGetFunc is a get functions as used in client-go.
type APIGetFunc[T any] func(ctx context.Context, name string, getOptions metav1.GetOptions) (T, error) type APIGetFunc[T any] func(ctx context.Context, name string, getOptions metav1.GetOptions) (T, error)
// APIListFunc is a list functions as used in client-go.
type APIListFunc[T any] func(ctx context.Context, listOptions metav1.ListOptions) (T, error)
// GetObject takes a get function like clientset.CoreV1().Pods(ns).Get // GetObject takes a get function like clientset.CoreV1().Pods(ns).Get
// and the parameters for it and returns a function that executes that get // and the parameters for it and returns a function that executes that get
// operation in a [gomega.Eventually] or [gomega.Consistently]. // operation in a [gomega.Eventually] or [gomega.Consistently].
@ -47,6 +50,17 @@ func GetObject[T any](get APIGetFunc[T], name string, getOptions metav1.GetOptio
}) })
} }
// ListObjects takes a list function like clientset.CoreV1().Pods(ns).List
// and the parameters for it and returns a function that executes that list
// operation in a [gomega.Eventually] or [gomega.Consistently].
//
// Delays and retries are handled by [HandleRetry].
func ListObjects[T any](list APIListFunc[T], listOptions metav1.ListOptions) GetFunc[T] {
return HandleRetry(func(ctx context.Context) (T, error) {
return list(ctx, listOptions)
})
}
// HandleRetry wraps an arbitrary get function. When the wrapped function // HandleRetry wraps an arbitrary get function. When the wrapped function
// returns an error, HandleGetError will decide whether the call should be // returns an error, HandleGetError will decide whether the call should be
// retried and if requested, will sleep before doing so. // retried and if requested, will sleep before doing so.

View File

@ -309,7 +309,7 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) {
} }
priv, err := rsa.GenerateKey(rand.Reader, rsaBits) priv, err := rsa.GenerateKey(rand.Reader, rsaBits)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("Failed to generate key: %v", err) return nil, nil, fmt.Errorf("Failed to generate key: %w", err)
} }
notBefore := time.Now() notBefore := time.Now()
notAfter := notBefore.Add(validFor) notAfter := notBefore.Add(validFor)
@ -318,7 +318,7 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) {
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("failed to generate serial number: %s", err) return nil, nil, fmt.Errorf("failed to generate serial number: %w", err)
} }
template := x509.Certificate{ template := x509.Certificate{
SerialNumber: serialNumber, SerialNumber: serialNumber,
@ -351,13 +351,13 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) {
var keyOut, certOut bytes.Buffer var keyOut, certOut bytes.Buffer
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("Failed to create certificate: %s", err) return nil, nil, fmt.Errorf("Failed to create certificate: %w", err)
} }
if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil { if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
return nil, nil, fmt.Errorf("Failed creating cert: %v", err) return nil, nil, fmt.Errorf("Failed creating cert: %w", err)
} }
if err := pem.Encode(&keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { if err := pem.Encode(&keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
return nil, nil, fmt.Errorf("Failed creating key: %v", err) return nil, nil, fmt.Errorf("Failed creating key: %w", err)
} }
return certOut.Bytes(), keyOut.Bytes(), nil return certOut.Bytes(), keyOut.Bytes(), nil
} }
@ -532,11 +532,11 @@ func ingressFromManifest(fileName string) (*networkingv1.Ingress, error) {
func ingressToManifest(ing *networkingv1.Ingress, path string) error { func ingressToManifest(ing *networkingv1.Ingress, path string) error {
serialized, err := marshalToYaml(ing, networkingv1.SchemeGroupVersion) serialized, err := marshalToYaml(ing, networkingv1.SchemeGroupVersion)
if err != nil { if err != nil {
return fmt.Errorf("failed to marshal ingress %v to YAML: %v", ing, err) return fmt.Errorf("failed to marshal ingress %v to YAML: %w", ing, err)
} }
if err := os.WriteFile(path, serialized, 0600); err != nil { if err := os.WriteFile(path, serialized, 0600); err != nil {
return fmt.Errorf("error in writing ingress to file: %s", err) return fmt.Errorf("error in writing ingress to file: %w", err)
} }
return nil return nil
} }
@ -1150,17 +1150,17 @@ func (j *TestJig) DeleteTestResource(ctx context.Context, cs clientset.Interface
var errs []error var errs []error
if ing != nil { if ing != nil {
if err := j.runDelete(ctx, ing); err != nil { if err := j.runDelete(ctx, ing); err != nil {
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err)) errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %w", ing.Namespace, ing.Name, err))
} }
} }
if svc != nil { if svc != nil {
if err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil { if err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err)) errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %w", svc.Namespace, svc.Name, err))
} }
} }
if deploy != nil { if deploy != nil {
if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(ctx, deploy.Name, metav1.DeleteOptions{}); err != nil { if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(ctx, deploy.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", deploy.Namespace, deploy.Name, err)) errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %w", deploy.Namespace, deploy.Name, err))
} }
} }
return errs return errs

View File

@ -122,6 +122,9 @@ var timePrefix = regexp.MustCompile(`(?m)^[[:alpha:]]{3} +[[:digit:]]{1,2} +[[:d
// elapsedSuffix matches "Elapsed: 16.189µs" // elapsedSuffix matches "Elapsed: 16.189µs"
var elapsedSuffix = regexp.MustCompile(`Elapsed: [[:digit:]]+(\.[[:digit:]]+)?(µs|ns|ms|s|m)`) var elapsedSuffix = regexp.MustCompile(`Elapsed: [[:digit:]]+(\.[[:digit:]]+)?(µs|ns|ms|s|m)`)
// afterSuffix matches "after 5.001s."
var afterSuffix = regexp.MustCompile(`after [[:digit:]]+(\.[[:digit:]]+)?(µs|ns|ms|s|m).`)
// timeSuffix matches "@ 09/06/22 15:36:43.44 (5.001s)" as printed by Ginkgo v2 for log output, with the duration being optional. // timeSuffix matches "@ 09/06/22 15:36:43.44 (5.001s)" as printed by Ginkgo v2 for log output, with the duration being optional.
var timeSuffix = regexp.MustCompile(`(?m)@[[:space:]][[:digit:]]{2}/[[:digit:]]{2}/[[:digit:]]{2} [[:digit:]]{2}:[[:digit:]]{2}:[[:digit:]]{2}(\.[[:digit:]]{1,3})?( \([[:digit:]]+(\.[[:digit:]]+)?(µs|ns|ms|s|m)\))?$`) var timeSuffix = regexp.MustCompile(`(?m)@[[:space:]][[:digit:]]{2}/[[:digit:]]{2}/[[:digit:]]{2} [[:digit:]]{2}:[[:digit:]]{2}:[[:digit:]]{2}(\.[[:digit:]]{1,3})?( \([[:digit:]]+(\.[[:digit:]]+)?(µs|ns|ms|s|m)\))?$`)
@ -129,6 +132,7 @@ func stripTimes(in string) string {
out := timePrefix.ReplaceAllString(in, "") out := timePrefix.ReplaceAllString(in, "")
out = elapsedSuffix.ReplaceAllString(out, "Elapsed: <elapsed>") out = elapsedSuffix.ReplaceAllString(out, "Elapsed: <elapsed>")
out = timeSuffix.ReplaceAllString(out, "<time>") out = timeSuffix.ReplaceAllString(out, "<time>")
out = afterSuffix.ReplaceAllString(out, "after <after>.")
return out return out
} }

View File

@ -41,7 +41,7 @@ func RestartControllerManager(ctx context.Context) error {
result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider) result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider)
if err != nil || result.Code != 0 { if err != nil || result.Code != 0 {
e2essh.LogResult(result) e2essh.LogResult(result)
return fmt.Errorf("couldn't restart controller-manager: %v", err) return fmt.Errorf("couldn't restart controller-manager: %w", err)
} }
return nil return nil
} }

View File

@ -115,7 +115,7 @@ func DaemonSetFromURL(ctx context.Context, url string) (*appsv1.DaemonSet, error
} }
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to get url: %v", err) return nil, fmt.Errorf("Failed to get url: %w", err)
} }
if response.StatusCode != 200 { if response.StatusCode != 200 {
return nil, fmt.Errorf("invalid http response status: %v", response.StatusCode) return nil, fmt.Errorf("invalid http response status: %v", response.StatusCode)
@ -124,7 +124,7 @@ func DaemonSetFromURL(ctx context.Context, url string) (*appsv1.DaemonSet, error
data, err := io.ReadAll(response.Body) data, err := io.ReadAll(response.Body)
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to read html response body: %v", err) return nil, fmt.Errorf("Failed to read html response body: %w", err)
} }
return DaemonSetFromData(data) return DaemonSetFromData(data)
} }
@ -134,12 +134,12 @@ func DaemonSetFromData(data []byte) (*appsv1.DaemonSet, error) {
var ds appsv1.DaemonSet var ds appsv1.DaemonSet
dataJSON, err := utilyaml.ToJSON(data) dataJSON, err := utilyaml.ToJSON(data)
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to parse data to json: %v", err) return nil, fmt.Errorf("Failed to parse data to json: %w", err)
} }
err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), dataJSON, &ds) err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), dataJSON, &ds)
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to decode DaemonSet spec: %v", err) return nil, fmt.Errorf("Failed to decode DaemonSet spec: %w", err)
} }
return &ds, nil return &ds, nil
} }

View File

@ -32,7 +32,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
utilnet "k8s.io/apimachinery/pkg/util/net" utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
@ -889,7 +888,7 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod(ctx context.Context) {
framework.ExpectNoError(config.getPodClient().Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0))) framework.ExpectNoError(config.getPodClient().Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)))
config.EndpointPods = config.EndpointPods[1:] config.EndpointPods = config.EndpointPods[1:]
// wait for pod being deleted. // wait for pod being deleted.
err := e2epod.WaitForPodToDisappear(ctx, config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) err := e2epod.WaitForPodNotFoundInNamespace(ctx, config.f.ClientSet, config.Namespace, pod.Name, wait.ForeverTestTimeout)
if err != nil { if err != nil {
framework.Failf("Failed to delete %s pod: %v", pod.Name, err) framework.Failf("Failed to delete %s pod: %v", pod.Name, err)
} }
@ -1027,7 +1026,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
body, err := io.ReadAll(resp.Body) body, err := io.ReadAll(resp.Body)
if err != nil { if err != nil {
ret.Status = HTTPError ret.Status = HTTPError
ret.Error = fmt.Errorf("error reading HTTP body: %v", err) ret.Error = fmt.Errorf("error reading HTTP body: %w", err)
framework.Logf("Poke(%q): %v", url, ret.Error) framework.Logf("Poke(%q): %v", url, ret.Error)
return ret return ret
} }
@ -1192,7 +1191,7 @@ func WaitForService(ctx context.Context, c clientset.Interface, namespace, name
}) })
if err != nil { if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"} stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err) return fmt.Errorf("error waiting for service %s/%s %s: %w", namespace, name, stateMsg[exist], err)
} }
return nil return nil
} }

View File

@ -107,7 +107,7 @@ func NodeHasTaint(ctx context.Context, c clientset.Interface, nodeName string, t
// default test add-ons. // default test add-ons.
func AllNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error { func AllNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
if err := allNodesReady(ctx, c, timeout); err != nil { if err := allNodesReady(ctx, c, timeout); err != nil {
return fmt.Errorf("checking for ready nodes: %v", err) return fmt.Errorf("checking for ready nodes: %w", err)
} }
return nil return nil
} }

View File

@ -296,7 +296,7 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri
func PickIP(ctx context.Context, c clientset.Interface) (string, error) { func PickIP(ctx context.Context, c clientset.Interface) (string, error) {
publicIps, err := GetPublicIps(ctx, c) publicIps, err := GetPublicIps(ctx, c)
if err != nil { if err != nil {
return "", fmt.Errorf("get node public IPs error: %s", err) return "", fmt.Errorf("get node public IPs error: %w", err)
} }
if len(publicIps) == 0 { if len(publicIps) == 0 {
return "", fmt.Errorf("got unexpected number (%d) of public IPs", len(publicIps)) return "", fmt.Errorf("got unexpected number (%d) of public IPs", len(publicIps))
@ -309,7 +309,7 @@ func PickIP(ctx context.Context, c clientset.Interface) (string, error) {
func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error) { func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error) {
nodes, err := GetReadySchedulableNodes(ctx, c) nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil { if err != nil {
return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err) return nil, fmt.Errorf("get schedulable and ready nodes error: %w", err)
} }
ips := CollectAddresses(nodes, v1.NodeExternalIP) ips := CollectAddresses(nodes, v1.NodeExternalIP)
if len(ips) == 0 { if len(ips) == 0 {
@ -327,7 +327,7 @@ func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error)
func GetReadySchedulableNodes(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) { func GetReadySchedulableNodes(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(ctx, c) nodes, err = checkWaitListSchedulableNodes(ctx, c)
if err != nil { if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err) return nil, fmt.Errorf("listing schedulable nodes error: %w", err)
} }
Filter(nodes, func(node v1.Node) bool { Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node) && isNodeUntainted(&node) return IsNodeSchedulable(&node) && isNodeUntainted(&node)
@ -376,7 +376,7 @@ func GetRandomReadySchedulableNode(ctx context.Context, c clientset.Interface) (
func GetReadyNodesIncludingTainted(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) { func GetReadyNodesIncludingTainted(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(ctx, c) nodes, err = checkWaitListSchedulableNodes(ctx, c)
if err != nil { if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err) return nil, fmt.Errorf("listing schedulable nodes error: %w", err)
} }
Filter(nodes, func(node v1.Node) bool { Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node) return IsNodeSchedulable(&node)
@ -536,7 +536,7 @@ func PodNodePairs(ctx context.Context, c clientset.Interface, ns string) ([]PodN
func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) { func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err) return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %w", err)
} }
// collect values of zone label from all nodes // collect values of zone label from all nodes
@ -558,7 +558,7 @@ func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (set
// GetReadySchedulableNodes already filters our tainted and unschedulable nodes. // GetReadySchedulableNodes already filters our tainted and unschedulable nodes.
nodes, err := GetReadySchedulableNodes(ctx, c) nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil { if err != nil {
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %v", err) return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %w", err)
} }
// collect values of zone label from all nodes // collect values of zone label from all nodes
@ -781,7 +781,7 @@ func removeNodeTaint(ctx context.Context, c clientset.Interface, nodeName string
func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error { func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
oldData, err := json.Marshal(oldNode) oldData, err := json.Marshal(oldNode)
if err != nil { if err != nil {
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err) return fmt.Errorf("failed to marshal old node %#v for node %q: %w", oldNode, nodeName, err)
} }
newTaints := newNode.Spec.Taints newTaints := newNode.Spec.Taints
@ -789,12 +789,12 @@ func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string
newNodeClone.Spec.Taints = newTaints newNodeClone.Spec.Taints = newTaints
newData, err := json.Marshal(newNodeClone) newData, err := json.Marshal(newNodeClone)
if err != nil { if err != nil {
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err) return fmt.Errorf("failed to marshal new node %#v for node %q: %w", newNodeClone, nodeName, err)
} }
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{}) patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil { if err != nil {
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) return fmt.Errorf("failed to create patch for node %q: %w", nodeName, err)
} }
_, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) _, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})

View File

@ -56,17 +56,17 @@ func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, nam
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command) pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err) return nil, fmt.Errorf("pod Create API error: %w", err)
} }
// Waiting for pod to become Unschedulable // Waiting for pod to become Unschedulable
err = WaitForPodNameUnschedulableInNamespace(ctx, client, pod.Name, namespace) err = WaitForPodNameUnschedulableInNamespace(ctx, client, pod.Name, namespace)
if err != nil { if err != nil {
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err) return pod, fmt.Errorf("pod %q is not Unschedulable: %w", pod.Name, err)
} }
// get fresh pod info // get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err) return pod, fmt.Errorf("pod Get API error: %w", err)
} }
return pod, nil return pod, nil
} }
@ -81,17 +81,17 @@ func CreatePod(ctx context.Context, client clientset.Interface, namespace string
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command) pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err) return nil, fmt.Errorf("pod Create API error: %w", err)
} }
// Waiting for pod to be running // Waiting for pod to be running
err = WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace) err = WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)
if err != nil { if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
} }
// get fresh pod info // get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err) return pod, fmt.Errorf("pod Get API error: %w", err)
} }
return pod, nil return pod, nil
} }
@ -105,23 +105,23 @@ func CreateSecPod(ctx context.Context, client clientset.Interface, podConfig *Co
func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) { func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
pod, err := MakeSecPod(podConfig) pod, err := MakeSecPod(podConfig)
if err != nil { if err != nil {
return nil, fmt.Errorf("Unable to create pod: %v", err) return nil, fmt.Errorf("Unable to create pod: %w", err)
} }
pod, err = client.CoreV1().Pods(podConfig.NS).Create(ctx, pod, metav1.CreateOptions{}) pod, err = client.CoreV1().Pods(podConfig.NS).Create(ctx, pod, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err) return nil, fmt.Errorf("pod Create API error: %w", err)
} }
// Waiting for pod to be running // Waiting for pod to be running
err = WaitTimeoutForPodRunningInNamespace(ctx, client, pod.Name, podConfig.NS, timeout) err = WaitTimeoutForPodRunningInNamespace(ctx, client, pod.Name, podConfig.NS, timeout)
if err != nil { if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
} }
// get fresh pod info // get fresh pod info
pod, err = client.CoreV1().Pods(podConfig.NS).Get(ctx, pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(podConfig.NS).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err) return pod, fmt.Errorf("pod Get API error: %w", err)
} }
return pod, nil return pod, nil
} }

View File

@ -65,12 +65,12 @@ func DeletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted return nil // assume pod was already deleted
} }
return fmt.Errorf("pod Delete API error: %v", err) return fmt.Errorf("pod Delete API error: %w", err)
} }
framework.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName) framework.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
err = WaitForPodNotFoundInNamespace(ctx, c, podName, podNamespace, PodDeleteTimeout) err = WaitForPodNotFoundInNamespace(ctx, c, podName, podNamespace, PodDeleteTimeout)
if err != nil { if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", podName, err) return fmt.Errorf("pod %q was not deleted: %w", podName, err)
} }
return nil return nil
} }
@ -98,7 +98,7 @@ func DeletePodWithGracePeriodByName(ctx context.Context, c clientset.Interface,
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted return nil // assume pod was already deleted
} }
return fmt.Errorf("pod Delete API error: %v", err) return fmt.Errorf("pod Delete API error: %w", err)
} }
return nil return nil
} }

View File

@ -87,13 +87,13 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
SubResource("portforward") SubResource("portforward")
transport, upgrader, err := spdy.RoundTripperFor(restConfig) transport, upgrader, err := spdy.RoundTripperFor(restConfig)
if err != nil { if err != nil {
return nil, fmt.Errorf("create round tripper: %v", err) return nil, fmt.Errorf("create round tripper: %w", err)
} }
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL()) dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name) streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name)
if err != nil { if err != nil {
return nil, fmt.Errorf("dialer failed: %v", err) return nil, fmt.Errorf("dialer failed: %w", err)
} }
requestID := "1" requestID := "1"
defer func() { defer func() {
@ -112,7 +112,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
// This happens asynchronously. // This happens asynchronously.
errorStream, err := streamConn.CreateStream(headers) errorStream, err := streamConn.CreateStream(headers)
if err != nil { if err != nil {
return nil, fmt.Errorf("error creating error stream: %v", err) return nil, fmt.Errorf("error creating error stream: %w", err)
} }
errorStream.Close() errorStream.Close()
go func() { go func() {
@ -129,7 +129,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
headers.Set(v1.StreamType, v1.StreamTypeData) headers.Set(v1.StreamType, v1.StreamTypeData)
dataStream, err := streamConn.CreateStream(headers) dataStream, err := streamConn.CreateStream(headers)
if err != nil { if err != nil {
return nil, fmt.Errorf("error creating data stream: %v", err) return nil, fmt.Errorf("error creating data stream: %w", err)
} }
return &stream{ return &stream{

View File

@ -107,7 +107,7 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration
return out, nil return out, nil
} }
if elapsed := time.Since(start); elapsed > timeout { if elapsed := time.Since(start); elapsed > timeout {
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err) return out, fmt.Errorf("RunHostCmd still failed after %v: %w", elapsed, err)
} }
framework.Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err) framework.Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
time.Sleep(interval) time.Sleep(interval)
@ -166,7 +166,7 @@ func MatchContainerOutput(
// Grab its logs. Get host first. // Grab its logs. Get host first.
podStatus, err := podClient.Get(ctx, createdPod.Name, metav1.GetOptions{}) podStatus, err := podClient.Get(ctx, createdPod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("failed to get pod status: %v", err) return fmt.Errorf("failed to get pod status: %w", err)
} }
if podErr != nil { if podErr != nil {
@ -192,14 +192,14 @@ func MatchContainerOutput(
if err != nil { if err != nil {
framework.Logf("Failed to get logs from node %q pod %q container %q. %v", framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err) podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err) return fmt.Errorf("failed to get logs from %s for %s: %w", podStatus.Name, containerName, err)
} }
for _, expected := range expectedOutput { for _, expected := range expectedOutput {
m := matcher(expected) m := matcher(expected)
matches, err := m.Match(logs) matches, err := m.Match(logs)
if err != nil { if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err) return fmt.Errorf("expected %q in container output: %w", expected, err)
} else if !matches { } else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs)) return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
} }

View File

@ -27,7 +27,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/strategicpatch"
@ -134,7 +133,7 @@ func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*30, func(ctx context.Context) (bool, error) { framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*30, func(ctx context.Context) (bool, error) {
pod, err := c.PodInterface.Get(ctx, name, metav1.GetOptions{}) pod, err := c.PodInterface.Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get pod %q: %v", name, err) return false, fmt.Errorf("failed to get pod %q: %w", name, err)
} }
updateFn(pod) updateFn(pod)
_, err = c.PodInterface.Update(ctx, pod, metav1.UpdateOptions{}) _, err = c.PodInterface.Update(ctx, pod, metav1.UpdateOptions{})
@ -146,7 +145,7 @@ func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *
framework.Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err) framework.Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
return false, nil return false, nil
} }
return false, fmt.Errorf("failed to update pod %q: %v", name, err) return false, fmt.Errorf("failed to update pod %q: %w", name, err)
})) }))
} }
@ -182,8 +181,7 @@ func (c *PodClient) DeleteSync(ctx context.Context, name string, options metav1.
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {
framework.Failf("Failed to delete pod %q: %v", name, err) framework.Failf("Failed to delete pod %q: %v", name, err)
} }
gomega.Expect(WaitForPodToDisappear(ctx, c.f.ClientSet, namespace, name, labels.Everything(), framework.ExpectNoError(WaitForPodNotFoundInNamespace(ctx, c.f.ClientSet, namespace, name, timeout), "wait for pod %q to disappear", name)
2*time.Second, timeout)).To(gomega.Succeed(), "wait for pod %q to disappear", name)
} }
// mungeSpec apply test-suite specific transformations to the pod spec. // mungeSpec apply test-suite specific transformations to the pod spec.
@ -263,7 +261,7 @@ func (c *PodClient) WaitForErrorEventOrSuccess(ctx context.Context, pod *v1.Pod)
err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) { err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) {
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod) evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
if err != nil { if err != nil {
return false, fmt.Errorf("error in listing events: %s", err) return false, fmt.Errorf("error in listing events: %w", err)
} }
for _, e := range evnts.Items { for _, e := range evnts.Items {
switch e.Reason { switch e.Reason {
@ -290,7 +288,7 @@ func (c *PodClient) MatchContainerOutput(ctx context.Context, name string, conta
} }
regex, err := regexp.Compile(expectedRegexp) regex, err := regexp.Compile(expectedRegexp)
if err != nil { if err != nil {
return fmt.Errorf("failed to compile regexp %q: %v", expectedRegexp, err) return fmt.Errorf("failed to compile regexp %q: %w", expectedRegexp, err)
} }
if !regex.MatchString(output) { if !regex.MatchString(output) {
return fmt.Errorf("failed to match regexp %q in output %q", expectedRegexp, output) return fmt.Errorf("failed to match regexp %q in output %q", expectedRegexp, output)

View File

@ -18,7 +18,6 @@ package pod
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"os" "os"
"path/filepath" "path/filepath"
@ -31,7 +30,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
@ -40,14 +38,6 @@ import (
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
// errPodCompleted is returned by PodRunning or PodContainerRunning to indicate that
// the pod has already reached completed state.
var errPodCompleted = FinalError(errors.New("pod ran to completion successfully"))
// errPodFailed is returned by PodRunning or PodContainerRunning to indicate that
// the pod has already reached a permanent failue state.
var errPodFailed = FinalError(errors.New("pod failed permanently"))
// LabelLogOnPodFailure can be used to mark which Pods will have their logs logged in the case of // LabelLogOnPodFailure can be used to mark which Pods will have their logs logged in the case of
// a test failure. By default, if there are no Pods with this label, only the first 5 Pods will // a test failure. By default, if there are no Pods with this label, only the first 5 Pods will
// have their logs fetched. // have their logs fetched.
@ -69,95 +59,6 @@ func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...) gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
} }
func isElementOf(podUID types.UID, pods *v1.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
}
// ProxyResponseChecker is a context for checking pods responses by issuing GETs to them (via the API
// proxy) and verifying that they answer with their own pod name.
type ProxyResponseChecker struct {
c clientset.Interface
ns string
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
pods *v1.PodList
}
// NewProxyResponseChecker returns a context for checking pods responses.
func NewProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) ProxyResponseChecker {
return ProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
}
// CheckAllResponses issues GETs to all pods in the context and verify they
// reply with their own pod name.
func (r ProxyResponseChecker) CheckAllResponses(ctx context.Context) (done bool, err error) {
successes := 0
options := metav1.ListOptions{LabelSelector: r.label.String()}
currentPods, err := r.c.CoreV1().Pods(r.ns).List(ctx, options)
expectNoError(err, "Failed to get list of currentPods in namespace: %s", r.ns)
for i, pod := range r.pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return false, fmt.Errorf("pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason. Current replica set: %v", pod.UID, currentPods)
}
ctxUntil, cancel := context.WithTimeout(ctx, singleCallTimeout)
defer cancel()
body, err := r.c.CoreV1().RESTClient().Get().
Namespace(r.ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do(ctxUntil).
Raw()
if err != nil {
if ctxUntil.Err() != nil {
// We may encounter errors here because of a race between the pod readiness and apiserver
// proxy. So, we log the error and retry if this occurs.
framework.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
return false, nil
}
framework.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
continue
}
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
got := string(body)
what := ""
if r.respondName {
what = "expected"
want := pod.Name
if got != want {
framework.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got)
continue
}
} else {
what = "non-empty"
if len(got) == 0 {
framework.Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name)
continue
}
}
successes++
framework.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
}
if successes < len(r.pods.Items) {
return false, nil
}
return true, nil
}
// PodsCreated returns a pod list matched by the given name. // PodsCreated returns a pod list matched by the given name.
func PodsCreated(ctx context.Context, c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) { func PodsCreated(ctx context.Context, c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
@ -213,10 +114,7 @@ func podRunningMaybeResponding(ctx context.Context, c clientset.Interface, ns, n
return fmt.Errorf("failed to wait for pods running: %v", e) return fmt.Errorf("failed to wait for pods running: %v", e)
} }
if checkResponding { if checkResponding {
err = PodsResponding(ctx, c, ns, name, wantName, pods) return WaitForPodsResponding(ctx, c, ns, name, wantName, podRespondingTimeout, pods)
if err != nil {
return fmt.Errorf("failed to wait for pods responding: %v", err)
}
} }
return nil return nil
} }
@ -635,7 +533,7 @@ func VerifyPodHasConditionWithType(ctx context.Context, f *framework.Framework,
func getNodeTTLAnnotationValue(ctx context.Context, c clientset.Interface) (time.Duration, error) { func getNodeTTLAnnotationValue(ctx context.Context, c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil || len(nodes.Items) == 0 { if err != nil || len(nodes.Items) == 0 {
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err) return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %w", err)
} }
// Since TTL the kubelet is using is stored in node object, for the timeout // Since TTL the kubelet is using is stored in node object, for the timeout
// purpose we take it from the first node (all of them should be the same). // purpose we take it from the first node (all of them should be the same).
@ -674,15 +572,3 @@ func IsPodActive(p *v1.Pod) bool {
v1.PodFailed != p.Status.Phase && v1.PodFailed != p.Status.Phase &&
p.DeletionTimestamp == nil p.DeletionTimestamp == nil
} }
func podIdentifier(namespace, name string) string {
return fmt.Sprintf("%s/%s", namespace, name)
}
func identifier(pod *v1.Pod) string {
id := podIdentifier(pod.Namespace, pod.Name)
if pod.UID != "" {
id += fmt.Sprintf("(%s)", pod.UID)
}
return id
}

View File

@ -17,12 +17,11 @@ limitations under the License.
package pod package pod
import ( import (
"bytes"
"context" "context"
"errors" "errors"
"fmt" "fmt"
"reflect" "reflect"
"text/tabwriter" "strings"
"time" "time"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
@ -30,15 +29,18 @@ import (
"github.com/onsi/gomega/gcustom" "github.com/onsi/gomega/gcustom"
"github.com/onsi/gomega/types" "github.com/onsi/gomega/types"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait" apitypes "k8s.io/apimachinery/pkg/types"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubectl/pkg/util/podutils" "k8s.io/kubectl/pkg/util/podutils"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
"k8s.io/kubernetes/test/utils/format"
) )
const ( const (
@ -66,108 +68,6 @@ const (
type podCondition func(pod *v1.Pod) (bool, error) type podCondition func(pod *v1.Pod) (bool, error)
type timeoutError struct {
msg string
observedObjects []interface{}
}
func (e *timeoutError) Error() string {
return e.msg
}
func TimeoutError(msg string, observedObjects ...interface{}) *timeoutError {
return &timeoutError{
msg: msg,
observedObjects: observedObjects,
}
}
// FinalError constructs an error that indicates to a poll function that
// polling can be stopped immediately because some permanent error has been
// encountered that is not going to go away.
//
// TODO (@pohly): move this into framework once the refactoring from
// https://github.com/kubernetes/kubernetes/pull/112043 allows it. Right now it
// leads to circular dependencies.
func FinalError(err error) error {
return &FinalErr{Err: err}
}
type FinalErr struct {
Err error
}
func (err *FinalErr) Error() string {
if err.Err != nil {
return fmt.Sprintf("final error: %s", err.Err.Error())
}
return "final error, exact problem unknown"
}
func (err *FinalErr) Unwrap() error {
return err.Err
}
// IsFinal checks whether the error was marked as final by wrapping some error
// with FinalError.
func IsFinal(err error) bool {
var finalErr *FinalErr
return errors.As(err, &finalErr)
}
// maybeTimeoutError returns a TimeoutError if err is a timeout. Otherwise, wrap err.
// taskFormat and taskArgs should be the task being performed when the error occurred,
// e.g. "waiting for pod to be running".
func maybeTimeoutError(err error, taskFormat string, taskArgs ...interface{}) error {
if IsTimeout(err) {
return TimeoutError(fmt.Sprintf("timed out while "+taskFormat, taskArgs...))
} else if err != nil {
return fmt.Errorf("error while %s: %w", fmt.Sprintf(taskFormat, taskArgs...), err)
} else {
return nil
}
}
func IsTimeout(err error) bool {
if err == wait.ErrWaitTimeout {
return true
}
if _, ok := err.(*timeoutError); ok {
return true
}
return false
}
// errorBadPodsStates create error message of basic info of bad pods for debugging.
func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration, err error) error {
errStr := fmt.Sprintf("%d / %d pods in namespace %s are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
// Print bad pods info only if there are fewer than 10 bad pods
if len(badPods) > 10 {
errStr += "There are too many bad pods. Please check log for details."
} else {
buf := bytes.NewBuffer(nil)
w := tabwriter.NewWriter(buf, 0, 0, 1, ' ', 0)
fmt.Fprintln(w, "POD\tNODE\tPHASE\tGRACE\tCONDITIONS")
for _, badPod := range badPods {
grace := ""
if badPod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *badPod.DeletionGracePeriodSeconds)
}
podInfo := fmt.Sprintf("%s\t%s\t%s\t%s\t%+v",
badPod.ObjectMeta.Name, badPod.Spec.NodeName, badPod.Status.Phase, grace, badPod.Status.Conditions)
fmt.Fprintln(w, podInfo)
}
w.Flush()
errStr += buf.String()
}
if err != nil && !IsTimeout(err) {
return fmt.Errorf("%s\nLast error: %w", errStr, err)
}
return TimeoutError(errStr)
}
// BeRunningNoRetries verifies that a pod starts running. It's a permanent // BeRunningNoRetries verifies that a pod starts running. It's a permanent
// failure when the pod enters some other permanent phase. // failure when the pod enters some other permanent phase.
func BeRunningNoRetries() types.GomegaMatcher { func BeRunningNoRetries() types.GomegaMatcher {
@ -208,263 +108,265 @@ func BeInPhase(phase v1.PodPhase) types.GomegaMatcher {
// example, in cluster startup, because the number of pods increases while // example, in cluster startup, because the number of pods increases while
// waiting. All pods that are in SUCCESS state are not counted. // waiting. All pods that are in SUCCESS state are not counted.
// //
// If ignoreLabels is not empty, pods matching this selector are ignored.
//
// If minPods or allowedNotReadyPods are -1, this method returns immediately // If minPods or allowedNotReadyPods are -1, this method returns immediately
// without waiting. // without waiting.
func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error { func WaitForPodsRunningReady(ctx context.Context, c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration) error {
if minPods == -1 || allowedNotReadyPods == -1 { if minPods == -1 || allowedNotReadyPods == -1 {
return nil return nil
} }
ignoreSelector := labels.SelectorFromSet(map[string]string{}) // We get the new list of pods, replication controllers, and replica
start := time.Now() // sets in every iteration because more pods come online during startup
framework.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready", // and we want to ensure they are also checked.
timeout, minPods, ns) //
var ignoreNotReady bool // This struct gets populated while polling, then gets checked, and in
badPods := []v1.Pod{} // case of a timeout is included in the failure message.
desiredPods := 0 type state struct {
notReady := int32(0) ReplicationControllers []v1.ReplicationController
var lastAPIError error ReplicaSets []appsv1.ReplicaSet
Pods []v1.Pod
}
if wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) { // notReady is -1 for any failure other than a timeout.
// We get the new list of pods, replication controllers, and // Otherwise it is the number of pods that we were still
// replica sets in every iteration because more pods come // waiting for.
// online during startup and we want to ensure they are also notReady := int32(-1)
// checked.
replicas, replicaOk := int32(0), int32(0) err := framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*state, error) {
// Clear API error from the last attempt in case the following calls succeed. // Reset notReady at the start of a poll attempt.
lastAPIError = nil notReady = -1
rcList, err := c.CoreV1().ReplicationControllers(ns).List(ctx, metav1.ListOptions{}) rcList, err := c.CoreV1().ReplicationControllers(ns).List(ctx, metav1.ListOptions{})
lastAPIError = err
if err != nil { if err != nil {
return handleWaitingAPIError(err, false, "listing replication controllers in namespace %s", ns) return nil, fmt.Errorf("listing replication controllers in namespace %s: %w", ns, err)
} }
for _, rc := range rcList.Items { rsList, err := c.AppsV1().ReplicaSets(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("listing replication sets in namespace %s: %w", ns, err)
}
podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("listing pods in namespace %s: %w", ns, err)
}
return &state{
ReplicationControllers: rcList.Items,
ReplicaSets: rsList.Items,
Pods: podList.Items,
}, nil
})).WithTimeout(timeout).Should(framework.MakeMatcher(func(s *state) (func() string, error) {
replicas, replicaOk := int32(0), int32(0)
for _, rc := range s.ReplicationControllers {
replicas += *rc.Spec.Replicas replicas += *rc.Spec.Replicas
replicaOk += rc.Status.ReadyReplicas replicaOk += rc.Status.ReadyReplicas
} }
for _, rs := range s.ReplicaSets {
rsList, err := c.AppsV1().ReplicaSets(ns).List(ctx, metav1.ListOptions{})
lastAPIError = err
if err != nil {
return handleWaitingAPIError(err, false, "listing replication sets in namespace %s", ns)
}
for _, rs := range rsList.Items {
replicas += *rs.Spec.Replicas replicas += *rs.Spec.Replicas
replicaOk += rs.Status.ReadyReplicas replicaOk += rs.Status.ReadyReplicas
} }
podList, err := c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
lastAPIError = err
if err != nil {
return handleWaitingAPIError(err, false, "listing pods in namespace %s", ns)
}
nOk := int32(0) nOk := int32(0)
notReady = int32(0) notReady = int32(0)
badPods = []v1.Pod{} failedPods := []v1.Pod{}
desiredPods = len(podList.Items) otherPods := []v1.Pod{}
for _, pod := range podList.Items { succeededPods := []string{}
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) { for _, pod := range s.Pods {
continue
}
res, err := testutils.PodRunningReady(&pod) res, err := testutils.PodRunningReady(&pod)
switch { switch {
case res && err == nil: case res && err == nil:
nOk++ nOk++
case pod.Status.Phase == v1.PodSucceeded: case pod.Status.Phase == v1.PodSucceeded:
framework.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
// it doesn't make sense to wait for this pod // it doesn't make sense to wait for this pod
continue succeededPods = append(succeededPods, pod.Name)
case pod.Status.Phase != v1.PodFailed: case pod.Status.Phase == v1.PodFailed:
framework.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
notReady++
badPods = append(badPods, pod)
default:
if metav1.GetControllerOf(&pod) == nil {
framework.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
}
// ignore failed pods that are controlled by some controller // ignore failed pods that are controlled by some controller
if metav1.GetControllerOf(&pod) == nil {
failedPods = append(failedPods, pod)
}
default:
notReady++
otherPods = append(otherPods, pod)
} }
} }
done := replicaOk == replicas && nOk >= minPods && (len(failedPods)+len(otherPods)) == 0
framework.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)", if done {
nOk, len(podList.Items), ns, int(time.Since(start).Seconds())) return nil, nil
framework.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
return true, nil
}
ignoreNotReady = (notReady <= allowedNotReadyPods)
LogPodStates(badPods)
return false, nil
}) != nil {
if !ignoreNotReady {
return errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout, lastAPIError)
} }
// Delayed formatting of a failure message.
return func() string {
var buffer strings.Builder
buffer.WriteString(fmt.Sprintf("Expected all pods (need at least %d) in namespace %q to be running and ready (except for %d).\n", minPods, ns, allowedNotReadyPods))
buffer.WriteString(fmt.Sprintf("%d / %d pods were running and ready.\n", nOk, len(s.Pods)))
buffer.WriteString(fmt.Sprintf("Expected %d pod replicas, %d are Running and Ready.\n", replicas, replicaOk))
if len(succeededPods) > 0 {
buffer.WriteString(fmt.Sprintf("Pods that completed successfully:\n%s", format.Object(succeededPods, 1)))
}
if len(failedPods) > 0 {
buffer.WriteString(fmt.Sprintf("Pods that failed and were not controlled by some controller:\n%s", format.Object(failedPods, 1)))
}
if len(otherPods) > 0 {
buffer.WriteString(fmt.Sprintf("Pods that were neither completed nor running:\n%s", format.Object(otherPods, 1)))
}
return buffer.String()
}, nil
}))
// An error might not be fatal.
if err != nil && notReady >= 0 && notReady <= allowedNotReadyPods {
framework.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods) framework.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
return nil
} }
return nil return err
} }
// WaitForPodCondition waits a pods to be matched to the given condition. // WaitForPodCondition waits a pods to be matched to the given condition.
// If the condition callback returns an error that matches FinalErr (checked with IsFinal), // The condition callback may use gomega.StopTrying to abort early.
// then polling aborts early.
func WaitForPodCondition(ctx context.Context, c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error { func WaitForPodCondition(ctx context.Context, c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error {
framework.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, conditionDesc) return framework.Gomega().
var ( Eventually(ctx, framework.RetryNotFound(framework.GetObject(c.CoreV1().Pods(ns).Get, podName, metav1.GetOptions{}))).
lastPodError error WithTimeout(timeout).
lastPod *v1.Pod Should(framework.MakeMatcher(func(pod *v1.Pod) (func() string, error) {
start = time.Now() done, err := condition(pod)
) if err != nil {
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) { return nil, err
pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
lastPodError = err
if err != nil {
return handleWaitingAPIError(err, true, "getting pod %s", podIdentifier(ns, podName))
}
lastPod = pod // Don't overwrite if an error occurs after successfully retrieving.
// log now so that current pod info is reported before calling `condition()`
framework.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
podName, pod.Status.Phase, pod.Status.Reason, podutils.IsPodReady(pod), time.Since(start))
if done, err := condition(pod); done {
if err == nil {
framework.Logf("Pod %q satisfied condition %q", podName, conditionDesc)
} }
return true, err if done {
} else if err != nil { return nil, nil
framework.Logf("Error evaluating pod condition %s: %v", conditionDesc, err)
if IsFinal(err) {
return false, err
} }
} return func() string {
return false, nil return fmt.Sprintf("expected pod to be %s, got instead:\n%s", conditionDesc, format.Object(pod, 1))
}) }, nil
if err == nil { }))
return nil
}
if IsTimeout(err) {
if lastPod != nil {
return TimeoutError(fmt.Sprintf("timed out while waiting for pod %s to be %s", podIdentifier(ns, podName), conditionDesc),
lastPod,
)
} else if lastPodError != nil {
// If the last API call was an error, propagate that instead of the timeout error.
err = lastPodError
}
}
return maybeTimeoutError(err, "waiting for pod %s to be %s", podIdentifier(ns, podName), conditionDesc)
} }
// WaitForAllPodsCondition waits for the listed pods to match the given condition. // Range determines how many items must exist and how many must match a certain
// To succeed, at least minPods must be listed, and all listed pods must match the condition. // condition. Values <= 0 are ignored.
func WaitForAllPodsCondition(ctx context.Context, c clientset.Interface, ns string, opts metav1.ListOptions, minPods int, conditionDesc string, timeout time.Duration, condition podCondition) (*v1.PodList, error) { // TODO (?): move to test/e2e/framework/range
framework.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc) type Range struct {
var pods *v1.PodList // MinMatching must be <= actual matching items or <= 0.
matched := 0 MinMatching int
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (done bool, err error) { // MaxMatching must be >= actual matching items or <= 0.
pods, err = c.CoreV1().Pods(ns).List(ctx, opts) // To check for "no matching items", set NonMatching.
if err != nil { MaxMatching int
return handleWaitingAPIError(err, true, "listing pods") // NoneMatching indicates that no item must match.
} NoneMatching bool
// AllMatching indicates that all items must match.
AllMatching bool
// MinFound must be <= existing items or <= 0.
MinFound int
}
// Min returns how many items must exist.
func (r Range) Min() int {
min := r.MinMatching
if min < r.MinFound {
min = r.MinFound
}
return min
}
// WaitForPods waits for pods in the given namespace to match the given
// condition. How many pods must exist and how many must match the condition
// is determined by the range parameter. The condition callback may use
// gomega.StopTrying(...).Now() to abort early. The condition description
// will be used with "expected pods to <description>".
func WaitForPods(ctx context.Context, c clientset.Interface, ns string, opts metav1.ListOptions, r Range, timeout time.Duration, conditionDesc string, condition func(*v1.Pod) bool) (*v1.PodList, error) {
var finalPods *v1.PodList
minPods := r.Min()
match := func(pods *v1.PodList) (func() string, error) {
finalPods = pods
if len(pods.Items) < minPods { if len(pods.Items) < minPods {
framework.Logf("found %d pods, waiting for at least %d", len(pods.Items), minPods) return func() string {
return false, nil return fmt.Sprintf("expected at least %d pods, only got %d", minPods, len(pods.Items))
}, nil
} }
nonMatchingPods := []string{} var nonMatchingPods, matchingPods []v1.Pod
for _, pod := range pods.Items { for _, pod := range pods.Items {
done, err := condition(&pod) if condition(&pod) {
if done && err != nil { matchingPods = append(matchingPods, pod)
return false, fmt.Errorf("error evaluating pod %s: %w", identifier(&pod), err) } else {
} nonMatchingPods = append(nonMatchingPods, pod)
if !done {
nonMatchingPods = append(nonMatchingPods, identifier(&pod))
} }
} }
matched = len(pods.Items) - len(nonMatchingPods) matching := len(pods.Items) - len(nonMatchingPods)
if len(nonMatchingPods) <= 0 { if matching < r.MinMatching && r.MinMatching > 0 {
return true, nil // All pods match. return func() string {
return fmt.Sprintf("expected at least %d pods to %s, %d out of %d were not:\n%s",
r.MinMatching, conditionDesc, len(nonMatchingPods), len(pods.Items),
format.Object(nonMatchingPods, 1))
}, nil
} }
framework.Logf("%d pods are not %s: %v", len(nonMatchingPods), conditionDesc, nonMatchingPods) if len(nonMatchingPods) > 0 && r.AllMatching {
return false, nil return func() string {
}) return fmt.Sprintf("expected all pods to %s, %d out of %d were not:\n%s",
return pods, maybeTimeoutError(err, "waiting for at least %d pods to be %s (matched %d)", minPods, conditionDesc, matched) conditionDesc, len(nonMatchingPods), len(pods.Items),
format.Object(nonMatchingPods, 1))
}, nil
}
if matching > r.MaxMatching && r.MaxMatching > 0 {
return func() string {
return fmt.Sprintf("expected at most %d pods to %s, %d out of %d were:\n%s",
r.MinMatching, conditionDesc, len(matchingPods), len(pods.Items),
format.Object(matchingPods, 1))
}, nil
}
if matching > 0 && r.NoneMatching {
return func() string {
return fmt.Sprintf("expected no pods to %s, %d out of %d were:\n%s",
conditionDesc, len(matchingPods), len(pods.Items),
format.Object(matchingPods, 1))
}, nil
}
return nil, nil
}
err := framework.Gomega().
Eventually(ctx, framework.ListObjects(c.CoreV1().Pods(ns).List, opts)).
WithTimeout(timeout).
Should(framework.MakeMatcher(match))
return finalPods, err
}
// RunningReady checks whether pod p's phase is running and it has a ready
// condition of status true.
func RunningReady(p *v1.Pod) bool {
return p.Status.Phase == v1.PodRunning && podutil.IsPodReady(p)
} }
// WaitForPodsRunning waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` are running. // WaitForPodsRunning waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` are running.
func WaitForPodsRunning(c clientset.Interface, ns string, num int, timeout time.Duration) error { func WaitForPodsRunning(c clientset.Interface, ns string, num int, timeout time.Duration) error {
matched := 0 _, err := WaitForPods(context.TODO(), c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, timeout,
err := wait.PollImmediate(framework.PollInterval(), timeout, func() (done bool, err error) { "be running and ready", func(pod *v1.Pod) bool {
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) ready, _ := testutils.PodRunningReady(pod)
if err != nil { return ready
return handleWaitingAPIError(err, true, "listing pods") })
} return err
matched = 0
for _, pod := range pods.Items {
if ready, _ := testutils.PodRunningReady(&pod); ready {
matched++
}
}
if matched == num {
return true, nil
}
framework.Logf("expect %d pods are running, but got %v", num, matched)
return false, nil
})
return maybeTimeoutError(err, "waiting for pods to be running (want %v, matched %d)", num, matched)
} }
// WaitForPodsSchedulingGated waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` stay in scheduling gated state. // WaitForPodsSchedulingGated waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` stay in scheduling gated state.
func WaitForPodsSchedulingGated(c clientset.Interface, ns string, num int, timeout time.Duration) error { func WaitForPodsSchedulingGated(c clientset.Interface, ns string, num int, timeout time.Duration) error {
matched := 0 _, err := WaitForPods(context.TODO(), c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, timeout,
err := wait.PollImmediate(framework.PollInterval(), timeout, func() (done bool, err error) { "be in scheduling gated state", func(pod *v1.Pod) bool {
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
if err != nil {
return handleWaitingAPIError(err, true, "listing pods")
}
matched = 0
for _, pod := range pods.Items {
for _, condition := range pod.Status.Conditions { for _, condition := range pod.Status.Conditions {
if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonSchedulingGated { if condition.Type == v1.PodScheduled && condition.Reason == v1.PodReasonSchedulingGated {
matched++ return true
} }
} }
} return false
if matched == num { })
return true, nil return err
}
framework.Logf("expect %d pods in scheduling gated state, but got %v", num, matched)
return false, nil
})
return maybeTimeoutError(err, "waiting for pods to be scheduling gated (want %d, matched %d)", num, matched)
} }
// WaitForPodsWithSchedulingGates waits for a given `timeout` to evaluate if a certain amount of pods in given `ns` // WaitForPodsWithSchedulingGates waits for a given `timeout` to evaluate if a certain amount of pods in given `ns`
// match the given `schedulingGates`stay in scheduling gated state. // match the given `schedulingGates`stay in scheduling gated state.
func WaitForPodsWithSchedulingGates(c clientset.Interface, ns string, num int, timeout time.Duration, schedulingGates []v1.PodSchedulingGate) error { func WaitForPodsWithSchedulingGates(c clientset.Interface, ns string, num int, timeout time.Duration, schedulingGates []v1.PodSchedulingGate) error {
matched := 0 _, err := WaitForPods(context.TODO(), c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, timeout,
err := wait.PollImmediate(framework.PollInterval(), timeout, func() (done bool, err error) { "have certain scheduling gates", func(pod *v1.Pod) bool {
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{}) return reflect.DeepEqual(pod.Spec.SchedulingGates, schedulingGates)
if err != nil { })
return handleWaitingAPIError(err, true, "listing pods") return err
}
matched = 0
for _, pod := range pods.Items {
if reflect.DeepEqual(pod.Spec.SchedulingGates, schedulingGates) {
matched++
}
}
if matched == num {
return true, nil
}
framework.Logf("expect %d pods carry the expected scheduling gates, but got %v", num, matched)
return false, nil
})
return maybeTimeoutError(err, "waiting for pods to carry the expected scheduling gates (want %d, matched %d)", num, matched)
} }
// WaitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate, // WaitForPodTerminatedInNamespace returns an error if it takes too long for the pod to terminate,
@ -550,18 +452,12 @@ func WaitForPodRunningInNamespaceSlow(ctx context.Context, c clientset.Interface
} }
// WaitTimeoutForPodRunningInNamespace waits the given timeout duration for the specified pod to become running. // WaitTimeoutForPodRunningInNamespace waits the given timeout duration for the specified pod to become running.
// It does not need to exist yet when this function gets called and the pod is not expected to be recreated
// when it succeeds or fails.
func WaitTimeoutForPodRunningInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error { func WaitTimeoutForPodRunningInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return WaitForPodCondition(ctx, c, namespace, podName, "running", timeout, func(pod *v1.Pod) (bool, error) { return framework.Gomega().Eventually(ctx, framework.RetryNotFound(framework.GetObject(c.CoreV1().Pods(namespace).Get, podName, metav1.GetOptions{}))).
switch pod.Status.Phase { WithTimeout(timeout).
case v1.PodRunning: Should(BeRunningNoRetries())
return true, nil
case v1.PodFailed:
return false, errPodFailed
case v1.PodSucceeded:
return false, errPodCompleted
}
return false, nil
})
} }
// WaitForPodRunningInNamespace waits default amount of time (podStartTimeout) for the specified pod to become running. // WaitForPodRunningInNamespace waits default amount of time (podStartTimeout) for the specified pod to become running.
@ -595,17 +491,11 @@ func WaitForPodNoLongerRunningInNamespace(ctx context.Context, c clientset.Inter
func WaitTimeoutForPodReadyInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error { func WaitTimeoutForPodReadyInNamespace(ctx context.Context, c clientset.Interface, podName, namespace string, timeout time.Duration) error {
return WaitForPodCondition(ctx, c, namespace, podName, "running and ready", timeout, func(pod *v1.Pod) (bool, error) { return WaitForPodCondition(ctx, c, namespace, podName, "running and ready", timeout, func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase { switch pod.Status.Phase {
case v1.PodFailed: case v1.PodFailed, v1.PodSucceeded:
framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status) return false, gomega.StopTrying(fmt.Sprintf("The phase of Pod %s is %s which is unexpected.", pod.Name, pod.Status.Phase))
return false, errPodFailed
case v1.PodSucceeded:
framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
return false, errPodCompleted
case v1.PodRunning: case v1.PodRunning:
framework.Logf("The phase of Pod %s is %s (Ready = %v)", pod.Name, pod.Status.Phase, podutils.IsPodReady(pod))
return podutils.IsPodReady(pod), nil return podutils.IsPodReady(pod), nil
} }
framework.Logf("The phase of Pod %s is %s, waiting for it to be Running (with Ready = true)", pod.Name, pod.Status.Phase)
return false, nil return false, nil
}) })
} }
@ -637,108 +527,140 @@ func WaitForPodSuccessInNamespaceSlow(ctx context.Context, c clientset.Interface
// WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate. // WaitForPodNotFoundInNamespace returns an error if it takes too long for the pod to fully terminate.
// Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get // Unlike `waitForPodTerminatedInNamespace`, the pod's Phase and Reason are ignored. If the pod Get
// api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other // api returns IsNotFound then the wait stops and nil is returned. If the Get api returns an error other
// than "not found" then that error is returned and the wait stops. // than "not found" and that error is final, that error is returned and the wait stops.
func WaitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, podName, ns string, timeout time.Duration) error { func WaitForPodNotFoundInNamespace(ctx context.Context, c clientset.Interface, podName, ns string, timeout time.Duration) error {
var lastPod *v1.Pod err := framework.Gomega().Eventually(ctx, framework.HandleRetry(func(ctx context.Context) (*v1.Pod, error) {
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) {
pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(ns).Get(ctx, podName, metav1.GetOptions{})
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
return true, nil // done return nil, nil
} }
if err != nil { return pod, err
return handleWaitingAPIError(err, true, "getting pod %s", podIdentifier(ns, podName)) })).WithTimeout(timeout).Should(gomega.BeNil())
} if err != nil {
lastPod = pod return fmt.Errorf("expected pod to not be found: %w", err)
return false, nil
})
if err == nil {
return nil
} }
if IsTimeout(err) && lastPod != nil { return nil
return TimeoutError(fmt.Sprintf("timed out while waiting for pod %s to be Not Found", podIdentifier(ns, podName)),
lastPod,
)
}
return maybeTimeoutError(err, "waiting for pod %s not found", podIdentifier(ns, podName))
}
// WaitForPodToDisappear waits the given timeout duration for the specified pod to disappear.
func WaitForPodToDisappear(ctx context.Context, c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
var lastPod *v1.Pod
err := wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
framework.Logf("Waiting for pod %s to disappear", podName)
options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(ctx, options)
if err != nil {
return handleWaitingAPIError(err, true, "listing pods")
}
found := false
for i, pod := range pods.Items {
if pod.Name == podName {
framework.Logf("Pod %s still exists", podName)
found = true
lastPod = &(pods.Items[i])
break
}
}
if !found {
framework.Logf("Pod %s no longer exists", podName)
return true, nil
}
return false, nil
})
if err == nil {
return nil
}
if IsTimeout(err) {
return TimeoutError(fmt.Sprintf("timed out while waiting for pod %s to disappear", podIdentifier(ns, podName)),
lastPod,
)
}
return maybeTimeoutError(err, "waiting for pod %s to disappear", podIdentifier(ns, podName))
} }
// PodsResponding waits for the pods to response. // PodsResponding waits for the pods to response.
func PodsResponding(ctx context.Context, c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error { func WaitForPodsResponding(ctx context.Context, c clientset.Interface, ns string, controllerName string, wantName bool, timeout time.Duration, pods *v1.PodList) error {
if timeout == 0 {
timeout = podRespondingTimeout
}
ginkgo.By("trying to dial each unique pod") ginkgo.By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": controllerName}))
err := wait.PollImmediateWithContext(ctx, framework.PollInterval(), podRespondingTimeout, NewProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses) options := metav1.ListOptions{LabelSelector: label.String()}
return maybeTimeoutError(err, "waiting for pods to be responsive")
type response struct {
podName string
response string
}
get := func(ctx context.Context) ([]response, error) {
currentPods, err := c.CoreV1().Pods(ns).List(ctx, options)
if err != nil {
return nil, fmt.Errorf("list pods: %w", err)
}
var responses []response
for _, pod := range pods.Items {
// Check that the replica list remains unchanged, otherwise we have problems.
if !isElementOf(pod.UID, currentPods) {
return nil, gomega.StopTrying(fmt.Sprintf("Pod with UID %s is no longer a member of the replica set. Must have been restarted for some reason.\nCurrent replica set:\n%s", pod.UID, format.Object(currentPods, 1)))
}
ctxUntil, cancel := context.WithTimeout(ctx, singleCallTimeout)
defer cancel()
body, err := c.CoreV1().RESTClient().Get().
Namespace(ns).
Resource("pods").
SubResource("proxy").
Name(string(pod.Name)).
Do(ctxUntil).
Raw()
if err != nil {
// We may encounter errors here because of a race between the pod readiness and apiserver
// proxy. So, we log the error and retry if this occurs.
return nil, fmt.Errorf("Controller %s: failed to Get from replica pod %s:\n%s\nPod status:\n%s",
controllerName, pod.Name,
format.Object(err, 1), format.Object(pod.Status, 1))
}
responses = append(responses, response{podName: pod.Name, response: string(body)})
}
return responses, nil
}
match := func(responses []response) (func() string, error) {
// The response checker expects the pod's name unless !respondName, in
// which case it just checks for a non-empty response.
var unexpected []response
for _, response := range responses {
if wantName {
if response.response != response.podName {
unexpected = append(unexpected, response)
}
} else {
if len(response.response) == 0 {
unexpected = append(unexpected, response)
}
}
}
if len(unexpected) > 0 {
return func() string {
what := "some response"
if wantName {
what = "the pod's own name as response"
}
return fmt.Sprintf("Wanted %s, but the following pods replied with something else:\n%s", what, format.Object(unexpected, 1))
}, nil
}
return nil, nil
}
err := framework.Gomega().
Eventually(ctx, framework.HandleRetry(get)).
WithTimeout(timeout).
Should(framework.MakeMatcher(match))
if err != nil {
return fmt.Errorf("checking pod responses: %w", err)
}
return nil
}
func isElementOf(podUID apitypes.UID, pods *v1.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
}
}
return false
} }
// WaitForNumberOfPods waits up to timeout to ensure there are exact // WaitForNumberOfPods waits up to timeout to ensure there are exact
// `num` pods in namespace `ns`. // `num` pods in namespace `ns`.
// It returns the matching Pods or a timeout error. // It returns the matching Pods or a timeout error.
func WaitForNumberOfPods(ctx context.Context, c clientset.Interface, ns string, num int, timeout time.Duration) (pods *v1.PodList, err error) { func WaitForNumberOfPods(ctx context.Context, c clientset.Interface, ns string, num int, timeout time.Duration) (pods *v1.PodList, err error) {
actualNum := 0 return WaitForPods(ctx, c, ns, metav1.ListOptions{}, Range{MinMatching: num, MaxMatching: num}, podScheduledBeforeTimeout, "exist", func(pod *v1.Pod) bool {
err = wait.PollImmediateWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) { return true
pods, err = c.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return handleWaitingAPIError(err, false, "listing pods")
}
actualNum = len(pods.Items)
return actualNum == num, nil
}) })
return pods, maybeTimeoutError(err, "waiting for there to be exactly %d pods in namespace (last seen %d)", num, actualNum)
} }
// WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one // WaitForPodsWithLabelScheduled waits for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods. // matching pod exists. Return the list of matching pods.
func WaitForPodsWithLabelScheduled(ctx context.Context, c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) { func WaitForPodsWithLabelScheduled(ctx context.Context, c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
opts := metav1.ListOptions{LabelSelector: label.String()} opts := metav1.ListOptions{LabelSelector: label.String()}
return WaitForAllPodsCondition(ctx, c, ns, opts, 1, "scheduled", podScheduledBeforeTimeout, func(pod *v1.Pod) (bool, error) { return WaitForPods(ctx, c, ns, opts, Range{MinFound: 1, AllMatching: true}, podScheduledBeforeTimeout, "be scheduled", func(pod *v1.Pod) bool {
if pod.Spec.NodeName == "" { return pod.Spec.NodeName != ""
return false, nil
}
return true, nil
}) })
} }
// WaitForPodsWithLabel waits up to podListTimeout for getting pods with certain label // WaitForPodsWithLabel waits up to podListTimeout for getting pods with certain label
func WaitForPodsWithLabel(ctx context.Context, c clientset.Interface, ns string, label labels.Selector) (*v1.PodList, error) { func WaitForPodsWithLabel(ctx context.Context, c clientset.Interface, ns string, label labels.Selector) (*v1.PodList, error) {
opts := metav1.ListOptions{LabelSelector: label.String()} opts := metav1.ListOptions{LabelSelector: label.String()}
return WaitForAllPodsCondition(ctx, c, ns, opts, 1, "existent", podListTimeout, func(pod *v1.Pod) (bool, error) { return WaitForPods(ctx, c, ns, opts, Range{MinFound: 1}, podListTimeout, "exist", func(pod *v1.Pod) bool {
return true, nil return true
}) })
} }
@ -746,31 +668,39 @@ func WaitForPodsWithLabel(ctx context.Context, c clientset.Interface, ns string,
// Return the list of matching pods. // Return the list of matching pods.
func WaitForPodsWithLabelRunningReady(ctx context.Context, c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) { func WaitForPodsWithLabelRunningReady(ctx context.Context, c clientset.Interface, ns string, label labels.Selector, num int, timeout time.Duration) (pods *v1.PodList, err error) {
opts := metav1.ListOptions{LabelSelector: label.String()} opts := metav1.ListOptions{LabelSelector: label.String()}
return WaitForAllPodsCondition(ctx, c, ns, opts, 1, "running and ready", timeout, testutils.PodRunningReady) return WaitForPods(ctx, c, ns, opts, Range{MinFound: num, AllMatching: true}, timeout, "be running and ready", RunningReady)
} }
// WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them, // WaitForNRestartablePods tries to list restarting pods using ps until it finds expect of them,
// returning their names if it can do so before timeout. // returning their names if it can do so before timeout.
func WaitForNRestartablePods(ctx context.Context, ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) { func WaitForNRestartablePods(ctx context.Context, ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) {
var pods []*v1.Pod var pods []*v1.Pod
var errLast error
found := wait.PollWithContext(ctx, framework.PollInterval(), timeout, func(ctx context.Context) (bool, error) { get := func(ctx context.Context) ([]*v1.Pod, error) {
allPods := ps.List() return ps.List(), nil
}
match := func(allPods []*v1.Pod) (func() string, error) {
pods = FilterNonRestartablePods(allPods) pods = FilterNonRestartablePods(allPods)
if len(pods) != expect { if len(pods) != expect {
errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods)) return func() string {
framework.Logf("Error getting pods: %v", errLast) return fmt.Sprintf("expected to find non-restartable %d pods, but found %d:\n%s", expect, len(pods), format.Object(pods, 1))
return false, nil }, nil
} }
return true, nil return nil, nil
}) == nil }
err := framework.Gomega().
Eventually(ctx, framework.HandleRetry(get)).
WithTimeout(timeout).
Should(framework.MakeMatcher(match))
if err != nil {
return nil, err
}
podNames := make([]string, len(pods)) podNames := make([]string, len(pods))
for i, p := range pods { for i, p := range pods {
podNames[i] = p.ObjectMeta.Name podNames[i] = p.Name
}
if !found {
return podNames, fmt.Errorf("couldn't find %d pods within %v; last error: %v",
expect, timeout, errLast)
} }
return podNames, nil return podNames, nil
} }
@ -842,23 +772,3 @@ func WaitForContainerRunning(ctx context.Context, c clientset.Interface, namespa
return false, nil return false, nil
}) })
} }
// handleWaitingAPIErrror handles an error from an API request in the context of a Wait function.
// If the error is retryable, sleep the recommended delay and ignore the error.
// If the error is terminal, return it.
func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) {
taskDescription := fmt.Sprintf(taskFormat, taskArgs...)
if retryNotFound && apierrors.IsNotFound(err) {
framework.Logf("Ignoring NotFound error while " + taskDescription)
return false, nil
}
if retry, delay := framework.ShouldRetry(err); retry {
framework.Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err)
if delay > 0 {
time.Sleep(delay)
}
return false, nil
}
framework.Logf("Encountered non-retryable error while %s: %v", taskDescription, err)
return false, err
}

View File

@ -18,19 +18,23 @@ package pod_test
import ( import (
"context" "context"
"strings" "fmt"
"regexp"
"testing" "testing"
"time" "time"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
"github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/reporters"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/internal/output" "k8s.io/kubernetes/test/e2e/framework/internal/output"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
_ "k8s.io/kubernetes/test/utils/format" // activate YAML object dumps
) )
// The line number of the following code is checked in TestFailureOutput below. // The line number of the following code is checked in TestFailureOutput below.
@ -43,36 +47,67 @@ import (
// //
// //
// //
//
//
//
//
// This must be line #50. // This must be line #50.
var _ = ginkgo.Describe("pod", func() { var _ = ginkgo.Describe("pod", func() {
ginkgo.It("not found", func(ctx context.Context) { ginkgo.It("not found, must exist", func(ctx context.Context) {
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, "no-such-pod", "default", timeout /* no explanation here to cover that code path */)) gomega.Eventually(ctx, framework.HandleRetry(getNoSuchPod)).WithTimeout(timeout).Should(e2epod.BeInPhase(v1.PodRunning))
})
ginkgo.It("not found, retry", func(ctx context.Context) {
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, "no-such-pod", "default", timeout))
})
ginkgo.It("not found, retry with wrappers", func(ctx context.Context) {
gomega.Eventually(ctx, framework.RetryNotFound(framework.HandleRetry(getNoSuchPod))).WithTimeout(timeout).Should(e2epod.BeInPhase(v1.PodRunning))
})
ginkgo.It("not found, retry with inverted wrappers", func(ctx context.Context) {
gomega.Eventually(ctx, framework.HandleRetry(framework.RetryNotFound(getNoSuchPod))).WithTimeout(timeout).Should(e2epod.BeInPhase(v1.PodRunning))
}) })
ginkgo.It("not running", func(ctx context.Context) { ginkgo.It("not running", func(ctx context.Context) {
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, podName, podNamespace, timeout), "wait for pod %s running", podName /* tests printf formatting */) ginkgo.By(fmt.Sprintf("waiting for pod %s to run", podName))
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, podName, podNamespace, timeout))
}) })
ginkgo.It("failed", func(ctx context.Context) { ginkgo.It("failed", func(ctx context.Context) {
framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, failedPodName, podNamespace, timeout)) framework.ExpectNoError(e2epod.WaitTimeoutForPodRunningInNamespace(ctx, clientSet, failedPodName, podNamespace, timeout))
}) })
ginkgo.It("gets reported with API error", func(ctx context.Context) {
called := false
getPod := func(ctx context.Context) (*v1.Pod, error) {
if called {
ginkgo.By("returning fake API error")
return nil, apierrors.NewTooManyRequests("fake API error", 10)
}
called = true
pod, err := clientSet.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
return nil, err
}
ginkgo.By("returning pod")
return pod, err
}
gomega.Eventually(ctx, framework.HandleRetry(getPod)).WithTimeout(5 * timeout).Should(e2epod.BeInPhase(v1.PodRunning))
})
}) })
func getNoSuchPod(ctx context.Context) (*v1.Pod, error) {
return clientSet.CoreV1().Pods("default").Get(ctx, "no-such-pod", metav1.GetOptions{})
}
const ( const (
podName = "pending-pod" podName = "pending-pod"
podNamespace = "default" podNamespace = "default"
failedPodName = "failed-pod" failedPodName = "failed-pod"
timeout = 5 * time.Second timeout = time.Second
) )
var ( var (
clientSet = fake.NewSimpleClientset( clientSet = fake.NewSimpleClientset(
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}}, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: podName, Namespace: podNamespace}, Status: v1.PodStatus{Phase: v1.PodPending}},
&v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: failedPodName, Namespace: podNamespace}, Status: v1.PodStatus{Phase: v1.PodFailed}}, &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: failedPodName, Namespace: podNamespace}, Status: v1.PodStatus{Phase: v1.PodFailed}},
) )
) )
@ -80,35 +115,43 @@ var (
func TestFailureOutput(t *testing.T) { func TestFailureOutput(t *testing.T) {
expected := output.TestResult{ expected := output.TestResult{
// "INFO: Ignoring ..." or "INFO: Pod ..." will normally occur NormalizeOutput: func(in string) string {
// every two seconds, but we reduce it to one line because it return regexp.MustCompile(`wait.go:[[:digit:]]*`).ReplaceAllString(in, `wait.go`)
// might occur less often on a loaded system.
NormalizeOutput: func(output string) string {
return trimDuplicateLines(output, "INFO: ")
}, },
Suite: reporters.JUnitTestSuite{ Suite: reporters.JUnitTestSuite{
Tests: 3, Tests: 7,
Failures: 3, Failures: 7,
Errors: 0, Errors: 0,
Disabled: 0, Disabled: 0,
Skipped: 0, Skipped: 0,
TestCases: []reporters.JUnitTestCase{ TestCases: []reporters.JUnitTestCase{
{ {
Name: "[It] pod not found", Name: "[It] pod not found, must exist",
Status: "failed", Status: "failed",
Failure: &reporters.JUnitFailure{ Failure: &reporters.JUnitFailure{
Type: "failed", Type: "failed",
Description: `[FAILED] error while waiting for pod default/no-such-pod to be running: pods "no-such-pod" not found Description: `[FAILED] Told to stop trying after <after>.
Unexpected final error while getting *v1.Pod: pods "no-such-pod" not found
In [It] at: wait_test.go:54 <time> In [It] at: wait_test.go:54 <time>
`, `,
}, },
SystemErr: `> Enter [It] not found - wait_test.go:53 <time> SystemErr: `> Enter [It] not found, must exist - wait_test.go:53 <time>
INFO: Waiting up to 5s for pod "no-such-pod" in namespace "default" to be "running" [FAILED] Told to stop trying after <after>.
INFO: Ignoring NotFound error while getting pod default/no-such-pod Unexpected final error while getting *v1.Pod: pods "no-such-pod" not found
INFO: Unexpected error: In [It] at: wait_test.go:54 <time>
<*fmt.wrapError>: { < Exit [It] not found, must exist - wait_test.go:53 <time>
msg: "error while waiting for pod default/no-such-pod to be running: pods \"no-such-pod\" not found", `,
err: <*errors.StatusError>{ },
{
Name: "[It] pod not found, retry",
Status: "failed",
Failure: &reporters.JUnitFailure{
Type: "failed",
Description: `[FAILED] Timed out after <after>.
The function passed to Eventually returned the following error:
pods "no-such-pod" not found
<framework.transientError>: {
error: <*errors.StatusError>{
ErrStatus: { ErrStatus: {
TypeMeta: {Kind: "", APIVersion: ""}, TypeMeta: {Kind: "", APIVersion: ""},
ListMeta: { ListMeta: {
@ -125,134 +168,280 @@ INFO: Unexpected error:
}, },
}, },
} }
[FAILED] error while waiting for pod default/no-such-pod to be running: pods "no-such-pod" not found In [It] at: wait_test.go:58 <time>
In [It] at: wait_test.go:54 <time> `,
< Exit [It] not found - wait_test.go:53 <time> },
SystemErr: `> Enter [It] not found, retry - wait_test.go:57 <time>
INFO: Failed inside E2E framework:
k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodRunningInNamespace()
wait.go
k8s.io/kubernetes/test/e2e/framework/pod_test.glob..func1.2()
wait_test.go:58
[FAILED] Timed out after <after>.
The function passed to Eventually returned the following error:
pods "no-such-pod" not found
<framework.transientError>: {
error: <*errors.StatusError>{
ErrStatus: {
TypeMeta: {Kind: "", APIVersion: ""},
ListMeta: {
SelfLink: "",
ResourceVersion: "",
Continue: "",
RemainingItemCount: nil,
},
Status: "Failure",
Message: "pods \"no-such-pod\" not found",
Reason: "NotFound",
Details: {Name: "no-such-pod", Group: "", Kind: "pods", UID: "", Causes: nil, RetryAfterSeconds: 0},
Code: 404,
},
},
}
In [It] at: wait_test.go:58 <time>
< Exit [It] not found, retry - wait_test.go:57 <time>
`,
},
{
Name: "[It] pod not found, retry with wrappers",
Status: "failed",
Failure: &reporters.JUnitFailure{
Type: "failed",
Description: `[FAILED] Timed out after <after>.
The function passed to Eventually returned the following error:
pods "no-such-pod" not found
<framework.transientError>: {
error: <*errors.StatusError>{
ErrStatus: {
TypeMeta: {Kind: "", APIVersion: ""},
ListMeta: {
SelfLink: "",
ResourceVersion: "",
Continue: "",
RemainingItemCount: nil,
},
Status: "Failure",
Message: "pods \"no-such-pod\" not found",
Reason: "NotFound",
Details: {Name: "no-such-pod", Group: "", Kind: "pods", UID: "", Causes: nil, RetryAfterSeconds: 0},
Code: 404,
},
},
}
In [It] at: wait_test.go:62 <time>
`,
},
SystemErr: `> Enter [It] not found, retry with wrappers - wait_test.go:61 <time>
[FAILED] Timed out after <after>.
The function passed to Eventually returned the following error:
pods "no-such-pod" not found
<framework.transientError>: {
error: <*errors.StatusError>{
ErrStatus: {
TypeMeta: {Kind: "", APIVersion: ""},
ListMeta: {
SelfLink: "",
ResourceVersion: "",
Continue: "",
RemainingItemCount: nil,
},
Status: "Failure",
Message: "pods \"no-such-pod\" not found",
Reason: "NotFound",
Details: {Name: "no-such-pod", Group: "", Kind: "pods", UID: "", Causes: nil, RetryAfterSeconds: 0},
Code: 404,
},
},
}
In [It] at: wait_test.go:62 <time>
< Exit [It] not found, retry with wrappers - wait_test.go:61 <time>
`,
},
{
Name: "[It] pod not found, retry with inverted wrappers",
Status: "failed",
Failure: &reporters.JUnitFailure{
Type: "failed",
Description: `[FAILED] Timed out after <after>.
The function passed to Eventually returned the following error:
pods "no-such-pod" not found
<framework.transientError>: {
error: <*errors.StatusError>{
ErrStatus: {
TypeMeta: {Kind: "", APIVersion: ""},
ListMeta: {
SelfLink: "",
ResourceVersion: "",
Continue: "",
RemainingItemCount: nil,
},
Status: "Failure",
Message: "pods \"no-such-pod\" not found",
Reason: "NotFound",
Details: {Name: "no-such-pod", Group: "", Kind: "pods", UID: "", Causes: nil, RetryAfterSeconds: 0},
Code: 404,
},
},
}
In [It] at: wait_test.go:66 <time>
`,
},
SystemErr: `> Enter [It] not found, retry with inverted wrappers - wait_test.go:65 <time>
[FAILED] Timed out after <after>.
The function passed to Eventually returned the following error:
pods "no-such-pod" not found
<framework.transientError>: {
error: <*errors.StatusError>{
ErrStatus: {
TypeMeta: {Kind: "", APIVersion: ""},
ListMeta: {
SelfLink: "",
ResourceVersion: "",
Continue: "",
RemainingItemCount: nil,
},
Status: "Failure",
Message: "pods \"no-such-pod\" not found",
Reason: "NotFound",
Details: {Name: "no-such-pod", Group: "", Kind: "pods", UID: "", Causes: nil, RetryAfterSeconds: 0},
Code: 404,
},
},
}
In [It] at: wait_test.go:66 <time>
< Exit [It] not found, retry with inverted wrappers - wait_test.go:65 <time>
`, `,
}, },
{ {
Name: "[It] pod not running", Name: "[It] pod not running",
Status: "failed", Status: "failed",
Failure: &reporters.JUnitFailure{ Failure: &reporters.JUnitFailure{
Description: `[FAILED] wait for pod pending-pod running: timed out while waiting for pod default/pending-pod to be running Description: `[FAILED] Timed out after <after>.
In [It] at: wait_test.go:58 <time> Expected Pod to be in <v1.PodPhase>: "Running"
Got instead:
<*v1.Pod>:
metadata:
creationTimestamp: null
name: pending-pod
namespace: default
spec:
containers: null
status:
phase: Pending
In [It] at: wait_test.go:71 <time>
`, `,
Type: "failed", Type: "failed",
}, },
SystemErr: `> Enter [It] not running - wait_test.go:57 <time> SystemErr: `> Enter [It] not running - wait_test.go:69 <time>
INFO: Waiting up to 5s for pod "pending-pod" in namespace "default" to be "running" STEP: waiting for pod pending-pod to run - wait_test.go:70 <time>
INFO: Pod "pending-pod": Phase="", Reason="", readiness=false. Elapsed: <elapsed> INFO: Failed inside E2E framework:
INFO: Unexpected error: wait for pod pending-pod running: k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodRunningInNamespace()
<*pod.timeoutError>: { wait.go
msg: "timed out while waiting for pod default/pending-pod to be running", k8s.io/kubernetes/test/e2e/framework/pod_test.glob..func1.5()
observedObjects: [ wait_test.go:71
<*v1.Pod>{ [FAILED] Timed out after <after>.
TypeMeta: {Kind: "", APIVersion: ""}, Expected Pod to be in <v1.PodPhase>: "Running"
ObjectMeta: { Got instead:
Name: "pending-pod", <*v1.Pod>:
GenerateName: "", metadata:
Namespace: "default", creationTimestamp: null
SelfLink: "", name: pending-pod
UID: "", namespace: default
ResourceVersion: "", spec:
Generation: 0, containers: null
CreationTimestamp: { status:
Time: {wall: 0, ext: 0, loc: nil}, phase: Pending
}, In [It] at: wait_test.go:71 <time>
DeletionTimestamp: nil, < Exit [It] not running - wait_test.go:69 <time>
DeletionGracePeriodSeconds: nil,
Labels: nil,
Annotations: nil,
OwnerReferences: nil,
Finalizers: nil,
ManagedFields: nil,
},
Spec: {
Volumes: nil,
InitContainers: nil,
Containers: nil,
EphemeralContainers: nil,
RestartPolicy: "",
TerminationGracePeriodSeconds: nil,
ActiveDeadlineSeconds: nil,
DNSPolicy: "",
NodeSelector: nil,
ServiceAccountName: "",
DeprecatedServiceAccount: "",
AutomountServiceAccountToken: nil,
NodeName: "",
HostNetwork: false,
HostPID: false,
HostIPC: false,
ShareProcessNamespace: nil,
SecurityContext: nil,
ImagePullSecrets: nil,
Hostname: "",
Subdomain: "",
Affinity: nil,
SchedulerName: "",
Tolerations: nil,
HostAliases: nil,
PriorityClassName: "",
Priority: nil,
DNSConfig: nil,
ReadinessGates: nil,
RuntimeClassName: nil,
EnableServiceLinks: nil,
PreemptionPolicy: nil,
Overhead: nil,
TopologySpreadConstraints: nil,
SetHostnameAsFQDN: nil,
OS: nil,
HostUsers: nil,
SchedulingGates: nil,
ResourceClaims: nil,
},
Status: {
Phase: "",
Conditions: nil,
Message: "",
Reason: "",
NominatedNodeName: "",
HostIP: "",
PodIP: "",
PodIPs: nil,
StartTime: nil,
InitContainerStatuses: nil,
ContainerStatuses: nil,
QOSClass: "",
EphemeralContainerStatuses: nil,
},
},
],
}
[FAILED] wait for pod pending-pod running: timed out while waiting for pod default/pending-pod to be running
In [It] at: wait_test.go:58 <time>
< Exit [It] not running - wait_test.go:57 <time>
`, `,
}, },
{ {
Name: "[It] pod failed", Name: "[It] pod failed",
Status: "failed", Status: "failed",
Failure: &reporters.JUnitFailure{ Failure: &reporters.JUnitFailure{
Description: `[FAILED] error while waiting for pod default/failed-pod to be running: final error: pod failed permanently Description: `[FAILED] Told to stop trying after <after>.
In [It] at: wait_test.go:62 <time> Expected pod to reach phase "Running", got final phase "Failed" instead.
In [It] at: wait_test.go:75 <time>
`, `,
Type: "failed", Type: "failed",
}, },
SystemErr: `> Enter [It] failed - wait_test.go:61 <time> SystemErr: `> Enter [It] failed - wait_test.go:74 <time>
INFO: Waiting up to 5s for pod "failed-pod" in namespace "default" to be "running" INFO: Failed inside E2E framework:
<*fmt.wrapError>: { k8s.io/kubernetes/test/e2e/framework/pod.WaitTimeoutForPodRunningInNamespace()
msg: "error while waiting for pod default/failed-pod to be running: final error: pod failed permanently", wait.go
err: <*pod.FinalErr>{ k8s.io/kubernetes/test/e2e/framework/pod_test.glob..func1.6()
Err: <*errors.errorString>{ wait_test.go:75
s: "pod failed permanently", [FAILED] Told to stop trying after <after>.
}, Expected pod to reach phase "Running", got final phase "Failed" instead.
}, In [It] at: wait_test.go:75 <time>
< Exit [It] failed - wait_test.go:74 <time>
`,
},
{
Name: "[It] pod gets reported with API error",
Status: "failed",
Failure: &reporters.JUnitFailure{
Description: `[FAILED] Timed out after <after>.
The function passed to Eventually returned the following error:
fake API error
<*errors.StatusError>: {
ErrStatus:
code: 429
details:
retryAfterSeconds: 10
message: fake API error
metadata: {}
reason: TooManyRequests
status: Failure,
} }
[FAILED] error while waiting for pod default/failed-pod to be running: final error: pod failed permanently At one point, however, the function did return successfully.
In [It] at: wait_test.go:62 <time> Yet, Eventually failed because the matcher was not satisfied:
< Exit [It] failed - wait_test.go:61 <time> Expected Pod to be in <v1.PodPhase>: "Running"
Got instead:
<*v1.Pod>:
metadata:
creationTimestamp: null
name: pending-pod
namespace: default
spec:
containers: null
status:
phase: Pending
In [It] at: wait_test.go:93 <time>
`,
Type: "failed",
},
SystemErr: `> Enter [It] gets reported with API error - wait_test.go:78 <time>
STEP: returning pod - wait_test.go:90 <time>
STEP: returning fake API error - wait_test.go:82 <time>
[FAILED] Timed out after <after>.
The function passed to Eventually returned the following error:
fake API error
<*errors.StatusError>: {
ErrStatus:
code: 429
details:
retryAfterSeconds: 10
message: fake API error
metadata: {}
reason: TooManyRequests
status: Failure,
}
At one point, however, the function did return successfully.
Yet, Eventually failed because the matcher was not satisfied:
Expected Pod to be in <v1.PodPhase>: "Running"
Got instead:
<*v1.Pod>:
metadata:
creationTimestamp: null
name: pending-pod
namespace: default
spec:
containers: null
status:
phase: Pending
In [It] at: wait_test.go:93 <time>
< Exit [It] gets reported with API error - wait_test.go:78 <time>
`, `,
}, },
}, },
@ -260,24 +449,3 @@ In [It] at: wait_test.go:62 <time>
} }
output.TestGinkgoOutput(t, expected) output.TestGinkgoOutput(t, expected)
} }
func trimDuplicateLines(output, prefix string) string {
lines := strings.Split(output, "\n")
trimming := false
validLines := 0
for i := 0; i < len(lines); i++ {
if strings.HasPrefix(lines[i], prefix) {
// Keep the first line, and only that one.
if !trimming {
trimming = true
lines[validLines] = lines[i]
validLines++
}
} else {
trimming = false
lines[validLines] = lines[i]
validLines++
}
}
return strings.Join(lines[0:validLines], "\n")
}

View File

@ -68,7 +68,7 @@ func (p *Provider) GroupSize(group string) (int, error) {
client := autoscaling.New(awsSession) client := autoscaling.New(awsSession)
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group) instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
if err != nil { if err != nil {
return -1, fmt.Errorf("error describing instance group: %v", err) return -1, fmt.Errorf("error describing instance group: %w", err)
} }
if instanceGroup == nil { if instanceGroup == nil {
return -1, fmt.Errorf("instance group not found: %s", group) return -1, fmt.Errorf("instance group not found: %s", group)
@ -157,7 +157,7 @@ func (p *Provider) DeletePD(pdName string) error {
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" { if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
framework.Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName) framework.Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
} else { } else {
return fmt.Errorf("error deleting EBS volumes: %v", err) return fmt.Errorf("error deleting EBS volumes: %w", err)
} }
} }
return nil return nil

View File

@ -374,22 +374,22 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset
expPorts := PackProtocolsPortsFromFirewall(exp.Allowed) expPorts := PackProtocolsPortsFromFirewall(exp.Allowed)
if portsSubset { if portsSubset {
if err := isPortsSubset(expPorts, actualPorts); err != nil { if err := isPortsSubset(expPorts, actualPorts); err != nil {
return fmt.Errorf("incorrect allowed protocol ports: %v", err) return fmt.Errorf("incorrect allowed protocol ports: %w", err)
} }
} else { } else {
if err := SameStringArray(actualPorts, expPorts, false); err != nil { if err := SameStringArray(actualPorts, expPorts, false); err != nil {
return fmt.Errorf("incorrect allowed protocols ports: %v", err) return fmt.Errorf("incorrect allowed protocols ports: %w", err)
} }
} }
if err := SameStringArray(res.SourceRanges, exp.SourceRanges, false); err != nil { if err := SameStringArray(res.SourceRanges, exp.SourceRanges, false); err != nil {
return fmt.Errorf("incorrect source ranges %v, expected %v: %v", res.SourceRanges, exp.SourceRanges, err) return fmt.Errorf("incorrect source ranges %v, expected %v: %w", res.SourceRanges, exp.SourceRanges, err)
} }
if err := SameStringArray(res.SourceTags, exp.SourceTags, false); err != nil { if err := SameStringArray(res.SourceTags, exp.SourceTags, false); err != nil {
return fmt.Errorf("incorrect source tags %v, expected %v: %v", res.SourceTags, exp.SourceTags, err) return fmt.Errorf("incorrect source tags %v, expected %v: %w", res.SourceTags, exp.SourceTags, err)
} }
if err := SameStringArray(res.TargetTags, exp.TargetTags, false); err != nil { if err := SameStringArray(res.TargetTags, exp.TargetTags, false); err != nil {
return fmt.Errorf("incorrect target tags %v, expected %v: %v", res.TargetTags, exp.TargetTags, err) return fmt.Errorf("incorrect target tags %v, expected %v: %w", res.TargetTags, exp.TargetTags, err)
} }
return nil return nil
} }

View File

@ -68,7 +68,7 @@ func factory() (framework.ProviderInterface, error) {
if region == "" { if region == "" {
region, err = gcecloud.GetGCERegion(zone) region, err = gcecloud.GetGCERegion(zone)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err) return nil, fmt.Errorf("error parsing GCE/GKE region from zone %q: %w", zone, err)
} }
} }
managedZones := []string{} // Manage all zones in the region managedZones := []string{} // Manage all zones in the region
@ -95,7 +95,7 @@ func factory() (framework.ProviderInterface, error) {
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("Error building GCE/GKE provider: %v", err) return nil, fmt.Errorf("Error building GCE/GKE provider: %w", err)
} }
// Arbitrarily pick one of the zones we have nodes in, looking at prepopulated zones first. // Arbitrarily pick one of the zones we have nodes in, looking at prepopulated zones first.
@ -189,7 +189,7 @@ func (p *Provider) EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, p
project := framework.TestContext.CloudConfig.ProjectID project := framework.TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(framework.TestContext.CloudConfig.Zone) region, err := gcecloud.GetGCERegion(framework.TestContext.CloudConfig.Zone)
if err != nil { if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", framework.TestContext.CloudConfig.Zone, err) return fmt.Errorf("could not get region for zone %q: %w", framework.TestContext.CloudConfig.Zone, err)
} }
return wait.PollWithContext(ctx, 10*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) { return wait.PollWithContext(ctx, 10*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) {
@ -304,7 +304,7 @@ func (p *Provider) cleanupGCEResources(ctx context.Context, c clientset.Interfac
var err error var err error
region, err = gcecloud.GetGCERegion(zone) region, err = gcecloud.GetGCERegion(zone)
if err != nil { if err != nil {
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err) return fmt.Errorf("error parsing GCE/GKE region from zone %q: %w", zone, err)
} }
} }
if err := p.gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil && if err := p.gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil &&
@ -404,7 +404,7 @@ func GetGCECloud() (*gcecloud.Cloud, error) {
func GetClusterID(ctx context.Context, c clientset.Interface) (string, error) { func GetClusterID(ctx context.Context, c clientset.Interface) (string, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, gcecloud.UIDConfigMapName, metav1.GetOptions{}) cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, gcecloud.UIDConfigMapName, metav1.GetOptions{})
if err != nil || cm == nil { if err != nil || cm == nil {
return "", fmt.Errorf("error getting cluster ID: %v", err) return "", fmt.Errorf("error getting cluster ID: %w", err)
} }
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster] clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider] providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]

View File

@ -641,12 +641,12 @@ func (cont *IngressController) verifyBackendMode(svcPorts map[string]v1.ServiceP
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
beList, err := gceCloud.ListGlobalBackendServices() beList, err := gceCloud.ListGlobalBackendServices()
if err != nil { if err != nil {
return fmt.Errorf("failed to list backend services: %v", err) return fmt.Errorf("failed to list backend services: %w", err)
} }
hcList, err := gceCloud.ListHealthChecks() hcList, err := gceCloud.ListHealthChecks()
if err != nil { if err != nil {
return fmt.Errorf("failed to list health checks: %v", err) return fmt.Errorf("failed to list health checks: %w", err)
} }
// Generate short UID // Generate short UID

View File

@ -141,7 +141,7 @@ func PVPVCCleanup(ctx context.Context, c clientset.Interface, ns string, pv *v1.
if pvc != nil { if pvc != nil {
err := DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns) err := DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err)) errs = append(errs, fmt.Errorf("failed to delete PVC %q: %w", pvc.Name, err))
} }
} else { } else {
framework.Logf("pvc is nil") framework.Logf("pvc is nil")
@ -149,7 +149,7 @@ func PVPVCCleanup(ctx context.Context, c clientset.Interface, ns string, pv *v1.
if pv != nil { if pv != nil {
err := DeletePersistentVolume(ctx, c, pv.Name) err := DeletePersistentVolume(ctx, c, pv.Name)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err)) errs = append(errs, fmt.Errorf("failed to delete PV %q: %w", pv.Name, err))
} }
} else { } else {
framework.Logf("pv is nil") framework.Logf("pv is nil")
@ -166,7 +166,7 @@ func PVPVCMapCleanup(ctx context.Context, c clientset.Interface, ns string, pvol
for pvcKey := range claims { for pvcKey := range claims {
err := DeletePersistentVolumeClaim(ctx, c, pvcKey.Name, ns) err := DeletePersistentVolumeClaim(ctx, c, pvcKey.Name, ns)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvcKey.Name, err)) errs = append(errs, fmt.Errorf("failed to delete PVC %q: %w", pvcKey.Name, err))
} else { } else {
delete(claims, pvcKey) delete(claims, pvcKey)
} }
@ -175,7 +175,7 @@ func PVPVCMapCleanup(ctx context.Context, c clientset.Interface, ns string, pvol
for pvKey := range pvols { for pvKey := range pvols {
err := DeletePersistentVolume(ctx, c, pvKey) err := DeletePersistentVolume(ctx, c, pvKey)
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pvKey, err)) errs = append(errs, fmt.Errorf("failed to delete PV %q: %w", pvKey, err))
} else { } else {
delete(pvols, pvKey) delete(pvols, pvKey)
} }
@ -189,7 +189,7 @@ func DeletePersistentVolume(ctx context.Context, c clientset.Interface, pvName s
framework.Logf("Deleting PersistentVolume %q", pvName) framework.Logf("Deleting PersistentVolume %q", pvName)
err := c.CoreV1().PersistentVolumes().Delete(ctx, pvName, metav1.DeleteOptions{}) err := c.CoreV1().PersistentVolumes().Delete(ctx, pvName, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("PV Delete API error: %v", err) return fmt.Errorf("PV Delete API error: %w", err)
} }
} }
return nil return nil
@ -201,7 +201,7 @@ func DeletePersistentVolumeClaim(ctx context.Context, c clientset.Interface, pvc
framework.Logf("Deleting PersistentVolumeClaim %q", pvcName) framework.Logf("Deleting PersistentVolumeClaim %q", pvcName)
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(ctx, pvcName, metav1.DeleteOptions{}) err := c.CoreV1().PersistentVolumeClaims(ns).Delete(ctx, pvcName, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) { if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("PVC Delete API error: %v", err) return fmt.Errorf("PVC Delete API error: %w", err)
} }
} }
return nil return nil
@ -222,13 +222,13 @@ func DeletePVCandValidatePV(ctx context.Context, c clientset.Interface, timeouts
framework.Logf("Waiting for reclaim process to complete.") framework.Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(ctx, expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim) err = WaitForPersistentVolumePhase(ctx, expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim)
if err != nil { if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err) return fmt.Errorf("pv %q phase did not become %v: %w", pv.Name, expectPVPhase, err)
} }
// examine the pv's ClaimRef and UID and compare to expected values // examine the pv's ClaimRef and UID and compare to expected values
pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("PV Get API error: %v", err) return fmt.Errorf("PV Get API error: %w", err)
} }
cr := pv.Spec.ClaimRef cr := pv.Spec.ClaimRef
if expectPVPhase == v1.VolumeAvailable { if expectPVPhase == v1.VolumeAvailable {
@ -260,7 +260,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim
for pvName := range pvols { for pvName := range pvols {
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("PV Get API error: %v", err) return fmt.Errorf("PV Get API error: %w", err)
} }
cr := pv.Spec.ClaimRef cr := pv.Spec.ClaimRef
// if pv is bound then delete the pvc it is bound to // if pv is bound then delete the pvc it is bound to
@ -279,7 +279,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim
return err return err
} }
} else if !apierrors.IsNotFound(err) { } else if !apierrors.IsNotFound(err) {
return fmt.Errorf("PVC Get API error: %v", err) return fmt.Errorf("PVC Get API error: %w", err)
} }
// delete pvckey from map even if apierrors.IsNotFound above is true and thus the // delete pvckey from map even if apierrors.IsNotFound above is true and thus the
// claim was not actually deleted here // claim was not actually deleted here
@ -316,10 +316,10 @@ func createPV(ctx context.Context, c clientset.Interface, timeouts *framework.Ti
}) })
// if we have an error from creating the PV, use that instead of a timeout error // if we have an error from creating the PV, use that instead of a timeout error
if lastCreateErr != nil { if lastCreateErr != nil {
return nil, fmt.Errorf("PV Create API error: %v", err) return nil, fmt.Errorf("PV Create API error: %w", err)
} }
if err != nil { if err != nil {
return nil, fmt.Errorf("PV Create API error: %v", err) return nil, fmt.Errorf("PV Create API error: %w", err)
} }
return resultPV, nil return resultPV, nil
@ -334,7 +334,7 @@ func CreatePV(ctx context.Context, c clientset.Interface, timeouts *framework.Ti
func CreatePVC(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) { func CreatePVC(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{}) pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("PVC Create API error: %v", err) return nil, fmt.Errorf("PVC Create API error: %w", err)
} }
return pvc, nil return pvc, nil
} }
@ -464,24 +464,24 @@ func WaitOnPVandPVC(ctx context.Context, c clientset.Interface, timeouts *framew
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name) framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound) err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound)
if err != nil { if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err) return fmt.Errorf("PVC %q did not become Bound: %w", pvc.Name, err)
} }
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be // Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound. // since the PVC is already bound.
err = WaitForPersistentVolumePhase(ctx, v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound) err = WaitForPersistentVolumePhase(ctx, v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound)
if err != nil { if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err) return fmt.Errorf("PV %q did not become Bound: %w", pv.Name, err)
} }
// Re-get the pv and pvc objects // Re-get the pv and pvc objects
pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{}) pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("PV Get API error: %v", err) return fmt.Errorf("PV Get API error: %w", err)
} }
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvc.Name, metav1.GetOptions{}) pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("PVC Get API error: %v", err) return fmt.Errorf("PVC Get API error: %w", err)
} }
// The pv and pvc are both bound, but to each other? // The pv and pvc are both bound, but to each other?
@ -523,12 +523,12 @@ func WaitAndVerifyBinds(ctx context.Context, c clientset.Interface, timeouts *fr
continue continue
} }
if err != nil { if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pvName, err) return fmt.Errorf("PV %q did not become Bound: %w", pvName, err)
} }
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("PV Get API error: %v", err) return fmt.Errorf("PV Get API error: %w", err)
} }
cr := pv.Spec.ClaimRef cr := pv.Spec.ClaimRef
if cr != nil && len(cr.Name) > 0 { if cr != nil && len(cr.Name) > 0 {
@ -541,7 +541,7 @@ func WaitAndVerifyBinds(ctx context.Context, c clientset.Interface, timeouts *fr
err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound) err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound)
if err != nil { if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err) return fmt.Errorf("PVC %q did not become Bound: %w", cr.Name, err)
} }
actualBinds++ actualBinds++
} }
@ -665,7 +665,7 @@ func createPDWithRetry(ctx context.Context, zone string) (string, error) {
for start := time.Now(); ; time.Sleep(pdRetryPollTime) { for start := time.Now(); ; time.Sleep(pdRetryPollTime) {
if time.Since(start) >= pdRetryTimeout || if time.Since(start) >= pdRetryTimeout ||
ctx.Err() != nil { ctx.Err() != nil {
return "", fmt.Errorf("timed out while trying to create PD in zone %q, last error: %v", zone, err) return "", fmt.Errorf("timed out while trying to create PD in zone %q, last error: %w", zone, err)
} }
newDiskName, err = createPD(zone) newDiskName, err = createPD(zone)
@ -702,7 +702,7 @@ func DeletePDWithRetry(ctx context.Context, diskName string) error {
for start := time.Now(); ; time.Sleep(pdRetryPollTime) { for start := time.Now(); ; time.Sleep(pdRetryPollTime) {
if time.Since(start) >= pdRetryTimeout || if time.Since(start) >= pdRetryTimeout ||
ctx.Err() != nil { ctx.Err() != nil {
return fmt.Errorf("timed out while trying to delete PD %q, last error: %v", diskName, err) return fmt.Errorf("timed out while trying to delete PD %q, last error: %w", diskName, err)
} }
err = deletePD(diskName) err = deletePD(diskName)
if err != nil { if err != nil {
@ -737,12 +737,12 @@ func WaitForPVClaimBoundPhase(ctx context.Context, client clientset.Interface, p
// Get new copy of the claim // Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{}) claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return persistentvolumes, fmt.Errorf("PVC Get API error: %v", err) return persistentvolumes, fmt.Errorf("PVC Get API error: %w", err)
} }
// Get the bounded PV // Get the bounded PV
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{}) persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
if err != nil { if err != nil {
return persistentvolumes, fmt.Errorf("PV Get API error: %v", err) return persistentvolumes, fmt.Errorf("PV Get API error: %w", err)
} }
} }
return persistentvolumes, nil return persistentvolumes, nil
@ -822,7 +822,7 @@ func DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) er
func GetDefaultStorageClassName(ctx context.Context, c clientset.Interface) (string, error) { func GetDefaultStorageClassName(ctx context.Context, c clientset.Interface) (string, error) {
list, err := c.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{}) list, err := c.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return "", fmt.Errorf("Error listing storage classes: %v", err) return "", fmt.Errorf("Error listing storage classes: %w", err)
} }
var scName string var scName string
for _, sc := range list.Items { for _, sc := range list.Items {

View File

@ -57,7 +57,7 @@ func ScaleResource(
) error { ) error {
ginkgo.By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size)) ginkgo.By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
if err := testutils.ScaleResourceWithRetries(scalesGetter, ns, name, size, gvr); err != nil { if err := testutils.ScaleResourceWithRetries(scalesGetter, ns, name, size, gvr); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err) return fmt.Errorf("error while scaling RC %s to %d replicas: %w", name, size, err)
} }
if !wait { if !wait {
return nil return nil
@ -131,7 +131,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
if err == nil || apierrors.IsNotFound(err) { if err == nil || apierrors.IsNotFound(err) {
return true, nil return true, nil
} }
return false, fmt.Errorf("failed to delete object with non-retriable error: %v", err) return false, fmt.Errorf("failed to delete object with non-retriable error: %w", err)
}); err != nil { }); err != nil {
return err return err
} }
@ -157,7 +157,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
err = waitForPodsInactive(ctx, ps, interval, timeout) err = waitForPodsInactive(ctx, ps, interval, timeout)
if err != nil { if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err) return fmt.Errorf("error while waiting for pods to become inactive %s: %w", name, err)
} }
terminatePodTime := time.Since(startTime) - deleteTime terminatePodTime := time.Since(startTime) - deleteTime
framework.Logf("Terminating %v %s pods took: %v", description, name, terminatePodTime) framework.Logf("Terminating %v %s pods took: %v", description, name, terminatePodTime)
@ -167,7 +167,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
// restart VM in that case and delete the pod. // restart VM in that case and delete the pod.
err = waitForPodsGone(ctx, ps, interval, 20*time.Minute) err = waitForPodsGone(ctx, ps, interval, 20*time.Minute)
if err != nil { if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err) return fmt.Errorf("error while waiting for pods gone %s: %w", name, err)
} }
return nil return nil
} }
@ -231,7 +231,7 @@ func WaitForControlledPodsRunning(ctx context.Context, c clientset.Interface, ns
} }
err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas)) err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas))
if err != nil { if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err) return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %w", name, err)
} }
return nil return nil
} }

View File

@ -83,7 +83,7 @@ func GetSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
case *autoscalingv1.Scale: case *autoscalingv1.Scale:
selector, err := metav1.ParseToLabelSelector(typed.Status.Selector) selector, err := metav1.ParseToLabelSelector(typed.Status.Selector)
if err != nil { if err != nil {
return nil, fmt.Errorf("Parsing selector for: %v encountered an error: %v", obj, err) return nil, fmt.Errorf("Parsing selector for: %v encountered an error: %w", obj, err)
} }
return metav1.LabelSelectorAsSelector(selector) return metav1.LabelSelectorAsSelector(selector)
default: default:

View File

@ -115,7 +115,7 @@ func (j *TestJig) CreateTCPServiceWithPort(ctx context.Context, tweak func(svc *
} }
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{}) result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err) return nil, fmt.Errorf("failed to create TCP Service %q: %w", svc.Name, err)
} }
return j.sanityCheckService(result, svc.Spec.Type) return j.sanityCheckService(result, svc.Spec.Type)
} }
@ -137,7 +137,7 @@ func (j *TestJig) CreateUDPService(ctx context.Context, tweak func(svc *v1.Servi
} }
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{}) result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create UDP Service %q: %v", svc.Name, err) return nil, fmt.Errorf("failed to create UDP Service %q: %w", svc.Name, err)
} }
return j.sanityCheckService(result, svc.Spec.Type) return j.sanityCheckService(result, svc.Spec.Type)
} }
@ -162,7 +162,7 @@ func (j *TestJig) CreateExternalNameService(ctx context.Context, tweak func(svc
} }
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{}) result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create ExternalName Service %q: %v", svc.Name, err) return nil, fmt.Errorf("failed to create ExternalName Service %q: %w", svc.Name, err)
} }
return j.sanityCheckService(result, svc.Spec.Type) return j.sanityCheckService(result, svc.Spec.Type)
} }
@ -254,7 +254,7 @@ func (j *TestJig) CreateLoadBalancerService(ctx context.Context, timeout time.Du
} }
_, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{}) _, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err) return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %w", svc.Name, err)
} }
ginkgo.By("waiting for loadbalancer for service " + j.Namespace + "/" + j.Name) ginkgo.By("waiting for loadbalancer for service " + j.Namespace + "/" + j.Name)
@ -521,7 +521,7 @@ func (j *TestJig) UpdateService(ctx context.Context, update func(*v1.Service)) (
for i := 0; i < 3; i++ { for i := 0; i < 3; i++ {
service, err := j.Client.CoreV1().Services(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{}) service, err := j.Client.CoreV1().Services(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get Service %q: %v", j.Name, err) return nil, fmt.Errorf("failed to get Service %q: %w", j.Name, err)
} }
update(service) update(service)
result, err := j.Client.CoreV1().Services(j.Namespace).Update(ctx, service, metav1.UpdateOptions{}) result, err := j.Client.CoreV1().Services(j.Namespace).Update(ctx, service, metav1.UpdateOptions{})
@ -529,7 +529,7 @@ func (j *TestJig) UpdateService(ctx context.Context, update func(*v1.Service)) (
return j.sanityCheckService(result, service.Spec.Type) return j.sanityCheckService(result, service.Spec.Type)
} }
if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) { if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) {
return nil, fmt.Errorf("failed to update Service %q: %v", j.Name, err) return nil, fmt.Errorf("failed to update Service %q: %w", j.Name, err)
} }
} }
return nil, fmt.Errorf("too many retries updating Service %q", j.Name) return nil, fmt.Errorf("too many retries updating Service %q", j.Name)
@ -706,7 +706,7 @@ func (j *TestJig) CreatePDB(ctx context.Context, rc *v1.ReplicationController) (
return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err) return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err)
} }
if err := j.waitForPdbReady(ctx); err != nil { if err := j.waitForPdbReady(ctx); err != nil {
return nil, fmt.Errorf("failed waiting for PDB to be ready: %v", err) return nil, fmt.Errorf("failed waiting for PDB to be ready: %w", err)
} }
return newPdb, nil return newPdb, nil
@ -743,14 +743,14 @@ func (j *TestJig) Run(ctx context.Context, tweak func(rc *v1.ReplicationControll
} }
result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(ctx, rc, metav1.CreateOptions{}) result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(ctx, rc, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create RC %q: %v", rc.Name, err) return nil, fmt.Errorf("failed to create RC %q: %w", rc.Name, err)
} }
pods, err := j.waitForPodsCreated(ctx, int(*(rc.Spec.Replicas))) pods, err := j.waitForPodsCreated(ctx, int(*(rc.Spec.Replicas)))
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create pods: %v", err) return nil, fmt.Errorf("failed to create pods: %w", err)
} }
if err := j.waitForPodsReady(ctx, pods); err != nil { if err := j.waitForPodsReady(ctx, pods); err != nil {
return nil, fmt.Errorf("failed waiting for pods to be running: %v", err) return nil, fmt.Errorf("failed waiting for pods to be running: %w", err)
} }
return result, nil return result, nil
} }
@ -760,21 +760,21 @@ func (j *TestJig) Scale(ctx context.Context, replicas int) error {
rc := j.Name rc := j.Name
scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(ctx, rc, metav1.GetOptions{}) scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(ctx, rc, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("failed to get scale for RC %q: %v", rc, err) return fmt.Errorf("failed to get scale for RC %q: %w", rc, err)
} }
scale.ResourceVersion = "" // indicate the scale update should be unconditional scale.ResourceVersion = "" // indicate the scale update should be unconditional
scale.Spec.Replicas = int32(replicas) scale.Spec.Replicas = int32(replicas)
_, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(ctx, rc, scale, metav1.UpdateOptions{}) _, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(ctx, rc, scale, metav1.UpdateOptions{})
if err != nil { if err != nil {
return fmt.Errorf("failed to scale RC %q: %v", rc, err) return fmt.Errorf("failed to scale RC %q: %w", rc, err)
} }
pods, err := j.waitForPodsCreated(ctx, replicas) pods, err := j.waitForPodsCreated(ctx, replicas)
if err != nil { if err != nil {
return fmt.Errorf("failed waiting for pods: %v", err) return fmt.Errorf("failed waiting for pods: %w", err)
} }
if err := j.waitForPodsReady(ctx, pods); err != nil { if err := j.waitForPodsReady(ctx, pods); err != nil {
return fmt.Errorf("failed waiting for pods to be running: %v", err) return fmt.Errorf("failed waiting for pods to be running: %w", err)
} }
return nil return nil
} }
@ -1063,7 +1063,7 @@ func (j *TestJig) CreateSCTPServiceWithPort(ctx context.Context, tweak func(svc
} }
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{}) result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create SCTP Service %q: %v", svc.Name, err) return nil, fmt.Errorf("failed to create SCTP Service %q: %w", svc.Name, err)
} }
return j.sanityCheckService(result, svc.Spec.Type) return j.sanityCheckService(result, svc.Spec.Type)
} }
@ -1081,7 +1081,7 @@ func (j *TestJig) CreateLoadBalancerServiceWaitForClusterIPOnly(tweak func(svc *
} }
result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{}) result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err) return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %w", svc.Name, err)
} }
return j.sanityCheckService(result, v1.ServiceTypeLoadBalancer) return j.sanityCheckService(result, v1.ServiceTypeLoadBalancer)

View File

@ -213,11 +213,11 @@ func SkipUnlessSSHKeyPresent() {
func serverVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) (bool, error) { func serverVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion() serverVersion, err := c.ServerVersion()
if err != nil { if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err) return false, fmt.Errorf("Unable to get server version: %w", err)
} }
sv, err := utilversion.ParseSemantic(serverVersion.GitVersion) sv, err := utilversion.ParseSemantic(serverVersion.GitVersion)
if err != nil { if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err) return false, fmt.Errorf("Unable to parse server version %q: %w", serverVersion.GitVersion, err)
} }
return sv.AtLeast(v), nil return sv.AtLeast(v), nil
} }

View File

@ -103,12 +103,12 @@ func GetSigner(provider string) (ssh.Signer, error) {
func makePrivateKeySignerFromFile(key string) (ssh.Signer, error) { func makePrivateKeySignerFromFile(key string) (ssh.Signer, error) {
buffer, err := os.ReadFile(key) buffer, err := os.ReadFile(key)
if err != nil { if err != nil {
return nil, fmt.Errorf("error reading SSH key %s: '%v'", key, err) return nil, fmt.Errorf("error reading SSH key %s: %w", key, err)
} }
signer, err := ssh.ParsePrivateKey(buffer) signer, err := ssh.ParsePrivateKey(buffer)
if err != nil { if err != nil {
return nil, fmt.Errorf("error parsing SSH key: '%v'", err) return nil, fmt.Errorf("error parsing SSH key: %w", err)
} }
return signer, err return signer, err
@ -201,7 +201,7 @@ func SSH(ctx context.Context, cmd, host, provider string) (Result, error) {
// Get a signer for the provider. // Get a signer for the provider.
signer, err := GetSigner(provider) signer, err := GetSigner(provider)
if err != nil { if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err) return result, fmt.Errorf("error getting signer for provider %s: %w", provider, err)
} }
// RunSSHCommand will default to Getenv("USER") if user == "", but we're // RunSSHCommand will default to Getenv("USER") if user == "", but we're
@ -250,12 +250,12 @@ func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signe
}) })
} }
if err != nil { if err != nil {
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: '%v'", user, host, err) return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %w", user, host, err)
} }
defer client.Close() defer client.Close()
session, err := client.NewSession() session, err := client.NewSession()
if err != nil { if err != nil {
return "", "", 0, fmt.Errorf("error creating session to %s@%s: '%v'", user, host, err) return "", "", 0, fmt.Errorf("error creating session to %s@%s: %w", user, host, err)
} }
defer session.Close() defer session.Close()
@ -275,7 +275,7 @@ func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signe
} else { } else {
// Some other kind of error happened (e.g. an IOError); consider the // Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful. // SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err) err = fmt.Errorf("failed running `%s` on %s@%s: %w", cmd, user, host, err)
} }
} }
return bout.String(), berr.String(), code, err return bout.String(), berr.String(), code, err
@ -304,26 +304,26 @@ func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host strin
}) })
} }
if err != nil { if err != nil {
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %v", user, bastion, err) return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %w", user, bastion, err)
} }
defer bastionClient.Close() defer bastionClient.Close()
conn, err := bastionClient.Dial("tcp", host) conn, err := bastionClient.Dial("tcp", host)
if err != nil { if err != nil {
return "", "", 0, fmt.Errorf("error dialing %s from bastion: %v", host, err) return "", "", 0, fmt.Errorf("error dialing %s from bastion: %w", host, err)
} }
defer conn.Close() defer conn.Close()
ncc, chans, reqs, err := ssh.NewClientConn(conn, host, config) ncc, chans, reqs, err := ssh.NewClientConn(conn, host, config)
if err != nil { if err != nil {
return "", "", 0, fmt.Errorf("error creating forwarding connection %s from bastion: %v", host, err) return "", "", 0, fmt.Errorf("error creating forwarding connection %s from bastion: %w", host, err)
} }
client := ssh.NewClient(ncc, chans, reqs) client := ssh.NewClient(ncc, chans, reqs)
defer client.Close() defer client.Close()
session, err := client.NewSession() session, err := client.NewSession()
if err != nil { if err != nil {
return "", "", 0, fmt.Errorf("error creating session to %s@%s from bastion: '%v'", user, host, err) return "", "", 0, fmt.Errorf("error creating session to %s@%s from bastion: %w", user, host, err)
} }
defer session.Close() defer session.Close()
@ -343,7 +343,7 @@ func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host strin
} else { } else {
// Some other kind of error happened (e.g. an IOError); consider the // Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful. // SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err) err = fmt.Errorf("failed running `%s` on %s@%s: %w", cmd, user, host, err)
} }
} }
return bout.String(), berr.String(), code, err return bout.String(), berr.String(), code, err

View File

@ -215,7 +215,7 @@ func CheckMount(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulS
fmt.Sprintf("touch %v", filepath.Join(mountPath, fmt.Sprintf("%v", time.Now().UnixNano()))), fmt.Sprintf("touch %v", filepath.Join(mountPath, fmt.Sprintf("%v", time.Now().UnixNano()))),
} { } {
if err := ExecInStatefulPods(ctx, c, ss, cmd); err != nil { if err := ExecInStatefulPods(ctx, c, ss, cmd); err != nil {
return fmt.Errorf("failed to execute %v, error: %v", cmd, err) return fmt.Errorf("failed to execute %v, error: %w", cmd, err)
} }
} }
return nil return nil

View File

@ -73,7 +73,7 @@ func Read(filePath string) ([]byte, error) {
for _, filesource := range filesources { for _, filesource := range filesources {
data, err := filesource.ReadTestFile(filePath) data, err := filesource.ReadTestFile(filePath)
if err != nil { if err != nil {
return nil, fmt.Errorf("fatal error retrieving test file %s: %s", filePath, err) return nil, fmt.Errorf("fatal error retrieving test file %s: %w", filePath, err)
} }
if data != nil { if data != nil {
return data, nil return data, nil

View File

@ -51,7 +51,6 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
clientexec "k8s.io/client-go/util/exec" clientexec "k8s.io/client-go/util/exec"
@ -464,7 +463,7 @@ func runVolumeTesterPod(ctx context.Context, client clientset.Interface, timeout
} }
if err != nil { if err != nil {
e2epod.DeletePodOrFail(ctx, client, clientPod.Namespace, clientPod.Name) e2epod.DeletePodOrFail(ctx, client, clientPod.Namespace, clientPod.Name)
_ = e2epod.WaitForPodToDisappear(ctx, client, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete) _ = e2epod.WaitForPodNotFoundInNamespace(ctx, client, clientPod.Namespace, clientPod.Name, timeouts.PodDelete)
return nil, err return nil, err
} }
return clientPod, nil return clientPod, nil
@ -542,7 +541,7 @@ func testVolumeClient(ctx context.Context, f *framework.Framework, config TestCo
// testVolumeClient might get used more than once per test, therefore // testVolumeClient might get used more than once per test, therefore
// we have to clean up before returning. // we have to clean up before returning.
e2epod.DeletePodOrFail(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name) e2epod.DeletePodOrFail(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name)
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)) framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, clientPod.Namespace, clientPod.Name, timeouts.PodDelete))
}() }()
testVolumeContent(f, clientPod, "", fsGroup, fsType, tests) testVolumeContent(f, clientPod, "", fsGroup, fsType, tests)
@ -577,7 +576,7 @@ func InjectContent(ctx context.Context, f *framework.Framework, config TestConfi
// This pod must get deleted before the function returns becaue the test relies on // This pod must get deleted before the function returns becaue the test relies on
// the volume not being in use. // the volume not being in use.
e2epod.DeletePodOrFail(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name) e2epod.DeletePodOrFail(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name)
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name, labels.Everything(), framework.Poll, timeouts.PodDelete)) framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, injectorPod.Namespace, injectorPod.Name, timeouts.PodDelete))
}() }()
ginkgo.By("Writing text file contents in the container.") ginkgo.By("Writing text file contents in the container.")

View File

@ -40,7 +40,7 @@ func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) { func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
tlsConfig, err := restclient.TLSConfigFor(config) tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to create tls config: %v", err) return nil, fmt.Errorf("Failed to create tls config: %w", err)
} }
if url.Scheme == "https" { if url.Scheme == "https" {
url.Scheme = "wss" url.Scheme = "wss"
@ -49,11 +49,11 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st
} }
headers, err := headersForConfig(config, url) headers, err := headersForConfig(config, url)
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to load http headers: %v", err) return nil, fmt.Errorf("Failed to load http headers: %w", err)
} }
cfg, err := websocket.NewConfig(url.String(), "http://localhost") cfg, err := websocket.NewConfig(url.String(), "http://localhost")
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed to create websocket config: %v", err) return nil, fmt.Errorf("Failed to create websocket config: %w", err)
} }
cfg.Header = headers cfg.Header = headers
cfg.TlsConfig = tlsConfig cfg.TlsConfig = tlsConfig

View File

@ -46,7 +46,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
gomega.Eventually(ctx, func() error { gomega.Eventually(ctx, func() error {
grabber, err = e2emetrics.NewMetricsGrabber(ctx, c, ec, f.ClientConfig(), true, true, true, true, true, true) grabber, err = e2emetrics.NewMetricsGrabber(ctx, c, ec, f.ClientConfig(), true, true, true, true, true, true)
if err != nil { if err != nil {
return fmt.Errorf("failed to create metrics grabber: %v", err) return fmt.Errorf("failed to create metrics grabber: %w", err)
} }
return nil return nil
}, 5*time.Minute, 10*time.Second).Should(gomega.BeNil()) }, 5*time.Minute, 10*time.Second).Should(gomega.BeNil())

View File

@ -120,7 +120,7 @@ func verifyPodExists(response []byte, containerName string) (bool, error) {
var metadata Metadata var metadata Metadata
err := json.Unmarshal(response, &metadata) err := json.Unmarshal(response, &metadata)
if err != nil { if err != nil {
return false, fmt.Errorf("Failed to unmarshall: %s", err) return false, fmt.Errorf("Failed to unmarshall: %w", err)
} }
for _, result := range metadata.Results { for _, result := range metadata.Results {
@ -130,7 +130,7 @@ func verifyPodExists(response []byte, containerName string) (bool, error) {
} }
resource, err := parseResource(rawResource) resource, err := parseResource(rawResource)
if err != nil { if err != nil {
return false, fmt.Errorf("No 'resource' label: %s", err) return false, fmt.Errorf("No 'resource' label: %w", err)
} }
if resource.resourceType == "k8s_container" && if resource.resourceType == "k8s_container" &&
resource.resourceLabels["container_name"] == containerName { resource.resourceLabels["container_name"] == containerName {

View File

@ -411,7 +411,7 @@ var _ = SIGDescribe("Kubectl client", func() {
ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml)) ginkgo.By(fmt.Sprintf("creating the pod from %v", podYaml))
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in"))) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pod-with-readiness-probe.yaml.in")))
e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(ctx, c, ns, []string{simplePodName}, framework.PodStartTimeout), true) framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, simplePodName, ns, framework.PodStartTimeout))
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, simplePodSelector) cleanupKubectlInputs(podYaml, ns, simplePodSelector)
@ -734,7 +734,7 @@ metadata:
if !strings.Contains(ee.String(), "timed out") { if !strings.Contains(ee.String(), "timed out") {
framework.Failf("Missing expected 'timed out' error, got: %#v", ee) framework.Failf("Missing expected 'timed out' error, got: %#v", ee)
} }
framework.ExpectNoError(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, ns, "failure-3", labels.Everything(), 2*time.Second, 2*v1.DefaultTerminationGracePeriodSeconds*time.Second)) framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, ns, "failure-3", 2*v1.DefaultTerminationGracePeriodSeconds*time.Second))
}) })
ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func(ctx context.Context) { ginkgo.It("[Slow] running a failing command with --leave-stdin-open", func(ctx context.Context) {
@ -798,9 +798,7 @@ metadata:
g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g) runTestPod, _, err := polymorphichelpers.GetFirstPod(f.ClientSet.CoreV1(), ns, "run=run-test-3", 1*time.Minute, g)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if !e2epod.CheckPodsRunningReady(ctx, c, ns, []string{runTestPod.Name}, time.Minute) { framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, runTestPod.Name, ns, time.Minute))
framework.Failf("Pod %q of Job %q should still be running", runTestPod.Name, "run-test-3")
}
gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil()) gomega.Expect(c.CoreV1().Pods(ns).Delete(ctx, "run-test-3", metav1.DeleteOptions{})).To(gomega.BeNil())
}) })
@ -1384,7 +1382,7 @@ metadata:
err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) { err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
cj, err := c.BatchV1().CronJobs(ns).List(ctx, metav1.ListOptions{}) cj, err := c.BatchV1().CronJobs(ns).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("Failed getting CronJob %s: %v", ns, err) return false, fmt.Errorf("Failed getting CronJob %s: %w", ns, err)
} }
return len(cj.Items) > 0, nil return len(cj.Items) > 0, nil
}) })
@ -1500,7 +1498,7 @@ metadata:
ginkgo.By("creating the pod") ginkgo.By("creating the pod")
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in"))) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("pause-pod.yaml.in")))
e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(ctx, c, ns, []string{pausePodName}, framework.PodStartTimeout), true) framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, pausePodName, ns, framework.PodStartTimeout))
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, pausePodSelector) cleanupKubectlInputs(podYaml, ns, pausePodSelector)
@ -1539,7 +1537,7 @@ metadata:
ginkgo.By("creating the pod") ginkgo.By("creating the pod")
podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml.in"))) podYaml = commonutils.SubstituteImageName(string(readTestFileOrDie("busybox-pod.yaml.in")))
e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-") e2ekubectl.RunKubectlOrDieInput(ns, podYaml, "create", "-f", "-")
framework.ExpectEqual(e2epod.CheckPodsRunningReady(ctx, c, ns, []string{busyboxPodName}, framework.PodStartTimeout), true) framework.ExpectNoError(e2epod.WaitTimeoutForPodReadyInNamespace(ctx, c, busyboxPodName, ns, framework.PodStartTimeout))
}) })
ginkgo.AfterEach(func() { ginkgo.AfterEach(func() {
cleanupKubectlInputs(podYaml, ns, busyboxPodSelector) cleanupKubectlInputs(podYaml, ns, busyboxPodSelector)
@ -2028,11 +2026,11 @@ func checkContainersImage(containers []v1.Container, expectImage string) bool {
func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) { func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
body, err := curl(apiEndpoint) body, err := curl(apiEndpoint)
if err != nil { if err != nil {
return nil, fmt.Errorf("Failed http.Get of %s: %v", apiEndpoint, err) return nil, fmt.Errorf("Failed http.Get of %s: %w", apiEndpoint, err)
} }
var apiVersions metav1.APIVersions var apiVersions metav1.APIVersions
if err := json.Unmarshal([]byte(body), &apiVersions); err != nil { if err := json.Unmarshal([]byte(body), &apiVersions); err != nil {
return nil, fmt.Errorf("Failed to parse /api output %s: %v", body, err) return nil, fmt.Errorf("Failed to parse /api output %s: %w", body, err)
} }
return &apiVersions, nil return &apiVersions, nil
} }
@ -2050,7 +2048,7 @@ func startProxyServer(ns string) (int, *exec.Cmd, error) {
buf := make([]byte, 128) buf := make([]byte, 128)
var n int var n int
if n, err = stdout.Read(buf); err != nil { if n, err = stdout.Read(buf); err != nil {
return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %v", err) return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %w", err)
} }
output := string(buf[:n]) output := string(buf[:n])
match := proxyRegexp.FindStringSubmatch(output) match := proxyRegexp.FindStringSubmatch(output)
@ -2268,17 +2266,17 @@ func newBlockingReader(s string) (io.Reader, io.Closer, error) {
func createApplyCustomResource(resource, namespace, name string, crd *crd.TestCrd) error { func createApplyCustomResource(resource, namespace, name string, crd *crd.TestCrd) error {
ginkgo.By("successfully create CR") ginkgo.By("successfully create CR")
if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "create", "--validate=true", "-f", "-"); err != nil { if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "create", "--validate=true", "-f", "-"); err != nil {
return fmt.Errorf("failed to create CR %s in namespace %s: %v", resource, namespace, err) return fmt.Errorf("failed to create CR %s in namespace %s: %w", resource, namespace, err)
} }
if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil { if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
return fmt.Errorf("failed to delete CR %s: %v", name, err) return fmt.Errorf("failed to delete CR %s: %w", name, err)
} }
ginkgo.By("successfully apply CR") ginkgo.By("successfully apply CR")
if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "apply", "--validate=true", "-f", "-"); err != nil { if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "apply", "--validate=true", "-f", "-"); err != nil {
return fmt.Errorf("failed to apply CR %s in namespace %s: %v", resource, namespace, err) return fmt.Errorf("failed to apply CR %s in namespace %s: %w", resource, namespace, err)
} }
if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil { if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
return fmt.Errorf("failed to delete CR %s: %v", name, err) return fmt.Errorf("failed to delete CR %s: %w", name, err)
} }
return nil return nil
} }

View File

@ -389,7 +389,7 @@ func doTestOverWebSockets(ctx context.Context, bindAddress string, f *framework.
gomega.Eventually(ctx, func() error { gomega.Eventually(ctx, func() error {
channel, msg, err := wsRead(ws) channel, msg, err := wsRead(ws)
if err != nil { if err != nil {
return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err) return fmt.Errorf("failed to read completely from websocket %s: %w", url.String(), err)
} }
if channel != 0 { if channel != 0 {
return fmt.Errorf("got message from server that didn't start with channel 0 (data): %v", msg) return fmt.Errorf("got message from server that didn't start with channel 0 (data): %v", msg)
@ -403,7 +403,7 @@ func doTestOverWebSockets(ctx context.Context, bindAddress string, f *framework.
gomega.Eventually(ctx, func() error { gomega.Eventually(ctx, func() error {
channel, msg, err := wsRead(ws) channel, msg, err := wsRead(ws)
if err != nil { if err != nil {
return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err) return fmt.Errorf("failed to read completely from websocket %s: %w", url.String(), err)
} }
if channel != 1 { if channel != 1 {
return fmt.Errorf("got message from server that didn't start with channel 1 (error): %v", msg) return fmt.Errorf("got message from server that didn't start with channel 1 (error): %v", msg)
@ -426,7 +426,7 @@ func doTestOverWebSockets(ctx context.Context, bindAddress string, f *framework.
gomega.Eventually(ctx, func() error { gomega.Eventually(ctx, func() error {
channel, msg, err := wsRead(ws) channel, msg, err := wsRead(ws)
if err != nil { if err != nil {
return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err) return fmt.Errorf("failed to read completely from websocket %s: %w", url.String(), err)
} }
if channel != 0 { if channel != 0 {
return fmt.Errorf("got message from server that didn't start with channel 0 (data): %v", msg) return fmt.Errorf("got message from server that didn't start with channel 0 (data): %v", msg)

View File

@ -116,7 +116,7 @@ var _ = common.SIGDescribe("ClusterDns [Feature:Example]", func() {
options := metav1.ListOptions{LabelSelector: label.String()} options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns.Name).List(ctx, options) pods, err := c.CoreV1().Pods(ns.Name).List(ctx, options)
framework.ExpectNoError(err, "failed to list pods in namespace: %s", ns.Name) framework.ExpectNoError(err, "failed to list pods in namespace: %s", ns.Name)
err = e2epod.PodsResponding(ctx, c, ns.Name, backendName, false, pods) err = e2epod.WaitForPodsResponding(ctx, c, ns.Name, backendName, false, 0, pods)
framework.ExpectNoError(err, "waiting for all pods to respond") framework.ExpectNoError(err, "waiting for all pods to respond")
framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name) framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)

View File

@ -233,7 +233,7 @@ var _ = common.SIGDescribe("KubeProxy", func() {
} }
timeoutSeconds, err := strconv.Atoi(line[2]) timeoutSeconds, err := strconv.Atoi(line[2])
if err != nil { if err != nil {
return false, fmt.Errorf("failed to convert matched timeout %s to integer: %v", line[2], err) return false, fmt.Errorf("failed to convert matched timeout %s to integer: %w", line[2], err)
} }
if math.Abs(float64(timeoutSeconds-expectedTimeoutSeconds)) < epsilonSeconds { if math.Abs(float64(timeoutSeconds-expectedTimeoutSeconds)) < epsilonSeconds {
return true, nil return true, nil

View File

@ -81,11 +81,11 @@ func getInternalIP(node *v1.Node) (string, error) {
func getSubnetPrefix(ctx context.Context, c clientset.Interface) (*net.IPNet, error) { func getSubnetPrefix(ctx context.Context, c clientset.Interface) (*net.IPNet, error) {
node, err := getReadySchedulableWorkerNode(ctx, c) node, err := getReadySchedulableWorkerNode(ctx, c)
if err != nil { if err != nil {
return nil, fmt.Errorf("error getting a ready schedulable worker Node, err: %v", err) return nil, fmt.Errorf("error getting a ready schedulable worker Node, err: %w", err)
} }
internalIP, err := getInternalIP(node) internalIP, err := getInternalIP(node)
if err != nil { if err != nil {
return nil, fmt.Errorf("error getting Node internal IP, err: %v", err) return nil, fmt.Errorf("error getting Node internal IP, err: %w", err)
} }
ip := netutils.ParseIPSloppy(internalIP) ip := netutils.ParseIPSloppy(internalIP)
if ip == nil { if ip == nil {

View File

@ -85,12 +85,12 @@ func iperf2ServerDeployment(ctx context.Context, client clientset.Interface, nam
deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{}) deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err) return nil, fmt.Errorf("deployment %q Create API error: %w", deploymentSpec.Name, err)
} }
framework.Logf("Waiting for deployment %q to complete", deploymentSpec.Name) framework.Logf("Waiting for deployment %q to complete", deploymentSpec.Name)
err = e2edeployment.WaitForDeploymentComplete(client, deployment) err = e2edeployment.WaitForDeploymentComplete(client, deployment)
if err != nil { if err != nil {
return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err) return nil, fmt.Errorf("deployment %q failed to complete: %w", deploymentSpec.Name, err)
} }
return deployment, nil return deployment, nil
@ -119,7 +119,7 @@ func iperf2ClientDaemonSet(ctx context.Context, client clientset.Interface, name
ds, err := client.AppsV1().DaemonSets(namespace).Create(ctx, spec, metav1.CreateOptions{}) ds, err := client.AppsV1().DaemonSets(namespace).Create(ctx, spec, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("daemonset %s Create API error: %v", spec.Name, err) return nil, fmt.Errorf("daemonset %s Create API error: %w", spec.Name, err)
} }
return ds, nil return ds, nil
} }

View File

@ -120,7 +120,7 @@ func (f *IngressScaleFramework) PrepareScaleTest(ctx context.Context) error {
Cloud: f.CloudConfig, Cloud: f.CloudConfig,
} }
if err := f.GCEController.Init(ctx); err != nil { if err := f.GCEController.Init(ctx); err != nil {
return fmt.Errorf("failed to initialize GCE controller: %v", err) return fmt.Errorf("failed to initialize GCE controller: %w", err)
} }
f.ScaleTestSvcs = []*v1.Service{} f.ScaleTestSvcs = []*v1.Service{}
@ -137,7 +137,7 @@ func (f *IngressScaleFramework) CleanupScaleTest(ctx context.Context) []error {
for _, ing := range f.ScaleTestIngs { for _, ing := range f.ScaleTestIngs {
if ing != nil { if ing != nil {
if err := f.Clientset.NetworkingV1().Ingresses(ing.Namespace).Delete(ctx, ing.Name, metav1.DeleteOptions{}); err != nil { if err := f.Clientset.NetworkingV1().Ingresses(ing.Namespace).Delete(ctx, ing.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err)) errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %w", ing.Namespace, ing.Name, err))
} }
} }
} }
@ -145,14 +145,14 @@ func (f *IngressScaleFramework) CleanupScaleTest(ctx context.Context) []error {
for _, svc := range f.ScaleTestSvcs { for _, svc := range f.ScaleTestSvcs {
if svc != nil { if svc != nil {
if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil { if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err)) errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %w", svc.Namespace, svc.Name, err))
} }
} }
} }
if f.ScaleTestDeploy != nil { if f.ScaleTestDeploy != nil {
f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name) f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name)
if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(ctx, f.ScaleTestDeploy.Name, metav1.DeleteOptions{}); err != nil { if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(ctx, f.ScaleTestDeploy.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err)) errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %w", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err))
} }
} }
@ -172,7 +172,7 @@ func (f *IngressScaleFramework) RunScaleTest(ctx context.Context) []error {
f.Logger.Infof("Creating deployment %s...", testDeploy.Name) f.Logger.Infof("Creating deployment %s...", testDeploy.Name)
testDeploy, err := f.Jig.Client.AppsV1().Deployments(f.Namespace).Create(ctx, testDeploy, metav1.CreateOptions{}) testDeploy, err := f.Jig.Client.AppsV1().Deployments(f.Namespace).Create(ctx, testDeploy, metav1.CreateOptions{})
if err != nil { if err != nil {
errs = append(errs, fmt.Errorf("failed to create deployment %s: %v", testDeploy.Name, err)) errs = append(errs, fmt.Errorf("failed to create deployment %s: %w", testDeploy.Name, err))
return errs return errs
} }
f.ScaleTestDeploy = testDeploy f.ScaleTestDeploy = testDeploy
@ -180,7 +180,7 @@ func (f *IngressScaleFramework) RunScaleTest(ctx context.Context) []error {
if f.EnableTLS { if f.EnableTLS {
f.Logger.Infof("Ensuring TLS secret %s...", scaleTestSecretName) f.Logger.Infof("Ensuring TLS secret %s...", scaleTestSecretName)
if err := f.Jig.PrepareTLSSecret(ctx, f.Namespace, scaleTestSecretName, scaleTestHostname); err != nil { if err := f.Jig.PrepareTLSSecret(ctx, f.Namespace, scaleTestSecretName, scaleTestHostname); err != nil {
errs = append(errs, fmt.Errorf("failed to prepare TLS secret %s: %v", scaleTestSecretName, err)) errs = append(errs, fmt.Errorf("failed to prepare TLS secret %s: %w", scaleTestSecretName, err))
return errs return errs
} }
} }

View File

@ -4163,7 +4163,7 @@ func translatePodNameToUID(ctx context.Context, c clientset.Interface, ns string
for name, portList := range expectedEndpoints { for name, portList := range expectedEndpoints {
pod, err := c.CoreV1().Pods(ns).Get(ctx, name, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %w", name, err)
} }
portsByUID[pod.ObjectMeta.UID] = portList portsByUID[pod.ObjectMeta.UID] = portList
} }
@ -4249,14 +4249,14 @@ func restartApiserver(ctx context.Context, namespace string, cs clientset.Interf
func restartComponent(ctx context.Context, cs clientset.Interface, cName, ns string, matchLabels map[string]string) error { func restartComponent(ctx context.Context, cs clientset.Interface, cName, ns string, matchLabels map[string]string) error {
pods, err := e2epod.GetPods(ctx, cs, ns, matchLabels) pods, err := e2epod.GetPods(ctx, cs, ns, matchLabels)
if err != nil { if err != nil {
return fmt.Errorf("failed to get %s's pods, err: %v", cName, err) return fmt.Errorf("failed to get %s's pods, err: %w", cName, err)
} }
if len(pods) == 0 { if len(pods) == 0 {
return fmt.Errorf("%s pod count is 0", cName) return fmt.Errorf("%s pod count is 0", cName)
} }
if err := e2epod.DeletePodsWithGracePeriod(ctx, cs, pods, 0); err != nil { if err := e2epod.DeletePodsWithGracePeriod(ctx, cs, pods, 0); err != nil {
return fmt.Errorf("failed to restart component: %s, err: %v", cName, err) return fmt.Errorf("failed to restart component: %s, err: %w", cName, err)
} }
_, err = e2epod.PodsCreatedByLabel(ctx, cs, ns, cName, int32(len(pods)), labels.SelectorFromSet(matchLabels)) _, err = e2epod.PodsCreatedByLabel(ctx, cs, ns, cName, int32(len(pods)), labels.SelectorFromSet(matchLabels))
@ -4330,7 +4330,7 @@ func translatePortsByPodNameToPortsByPodUID(c clientset.Interface, ns string, ex
for name, portList := range expectedEndpoints { for name, portList := range expectedEndpoints {
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err) return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %w", name, err)
} }
portsByUID[pod.ObjectMeta.UID] = portList portsByUID[pod.ObjectMeta.UID] = portList
} }

View File

@ -869,7 +869,7 @@ func patchPod(cs clientset.Interface, old, new *v1.Pod) (*v1.Pod, error) {
} }
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Pod{}) patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Pod{})
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to create merge patch for Pod %q: %v", old.Name, err) return nil, fmt.Errorf("failed to create merge patch for Pod %q: %w", old.Name, err)
} }
return cs.CoreV1().Pods(new.Namespace).Patch(context.TODO(), new.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) return cs.CoreV1().Pods(new.Namespace).Patch(context.TODO(), new.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
} }

View File

@ -930,7 +930,7 @@ func patchNode(ctx context.Context, client clientset.Interface, old *v1.Node, ne
} }
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{}) patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{})
if err != nil { if err != nil {
return fmt.Errorf("failed to create merge patch for node %q: %v", old.Name, err) return fmt.Errorf("failed to create merge patch for node %q: %w", old.Name, err)
} }
_, err = client.CoreV1().Nodes().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status") _, err = client.CoreV1().Nodes().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
return err return err
@ -948,7 +948,7 @@ func patchPriorityClass(ctx context.Context, cs clientset.Interface, old, new *s
} }
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &schedulingv1.PriorityClass{}) patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &schedulingv1.PriorityClass{})
if err != nil { if err != nil {
return fmt.Errorf("failed to create merge patch for PriorityClass %q: %v", old.Name, err) return fmt.Errorf("failed to create merge patch for PriorityClass %q: %w", old.Name, err)
} }
_, err = cs.SchedulingV1().PriorityClasses().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}) _, err = cs.SchedulingV1().PriorityClasses().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
return err return err

View File

@ -108,7 +108,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
err = framework.CheckTestingNSDeletedExcept(ctx, cs, ns) err = framework.CheckTestingNSDeletedExcept(ctx, cs, ns)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2epod.WaitForPodsRunningReady(ctx, cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, map[string]string{}) err = e2epod.WaitForPodsRunningReady(ctx, cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// skip if the most utilized node has less than the cri-o minMemLimit available // skip if the most utilized node has less than the cri-o minMemLimit available

View File

@ -180,7 +180,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
err = wait.PollImmediateUntilWithContext(ctx, time.Second, func(ctx context.Context) (done bool, err error) { err = wait.PollImmediateUntilWithContext(ctx, time.Second, func(ctx context.Context) (done bool, err error) {
c, index, err := compareCSICalls(ctx, deterministicCalls, expected, m.driver.GetCalls) c, index, err := compareCSICalls(ctx, deterministicCalls, expected, m.driver.GetCalls)
if err != nil { if err != nil {
return true, fmt.Errorf("error waiting for expected CSI calls: %s", err) return true, fmt.Errorf("error waiting for expected CSI calls: %w", err)
} }
calls = c calls = c
if index == 0 { if index == 0 {

View File

@ -416,7 +416,7 @@ func waitForResizeStatus(pvc *v1.PersistentVolumeClaim, c clientset.Interface, e
updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %v", pvc.Name, err) return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %w", pvc.Name, err)
} }
actualResizeStatus = updatedPVC.Status.ResizeStatus actualResizeStatus = updatedPVC.Status.ResizeStatus
@ -442,7 +442,7 @@ func waitForAllocatedResource(pvc *v1.PersistentVolumeClaim, m *mockDriverSetup,
updatedPVC, err := m.cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) updatedPVC, err := m.cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %v", pvc.Name, err) return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %w", pvc.Name, err)
} }
actualAllocatedSize := updatedPVC.Status.AllocatedResources.Storage() actualAllocatedSize := updatedPVC.Status.AllocatedResources.Storage()
if actualAllocatedSize != nil && actualAllocatedSize.Equal(expectedQuantity) { if actualAllocatedSize != nil && actualAllocatedSize.Equal(expectedQuantity) {

View File

@ -751,7 +751,7 @@ func (m *mockCSIDriver) GetCalls(ctx context.Context) ([]MockCSICall, error) {
// Load logs of driver pod // Load logs of driver pod
log, err := e2epod.GetPodLogs(ctx, m.clientSet, m.driverNamespace.Name, driverPodName, driverContainerName) log, err := e2epod.GetPodLogs(ctx, m.clientSet, m.driverNamespace.Name, driverPodName, driverContainerName)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not load CSI driver logs: %s", err) return nil, fmt.Errorf("could not load CSI driver logs: %w", err)
} }
logLines := strings.Split(log, "\n") logLines := strings.Split(log, "\n")

View File

@ -75,7 +75,7 @@ func (p PodDirIO) CreateFile(path string, content io.Reader) error {
// Therefore the content is now encoded inside the command itself. // Therefore the content is now encoded inside the command itself.
data, err := io.ReadAll(content) data, err := io.ReadAll(content)
if err != nil { if err != nil {
return fmt.Errorf("read content: %v", err) return fmt.Errorf("read content: %w", err)
} }
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(data))) encoded := make([]byte, base64.StdEncoding.EncodedLen(len(data)))
base64.StdEncoding.Encode(encoded, data) base64.StdEncoding.Encode(encoded, data)

View File

@ -97,7 +97,7 @@ func Listen(ctx context.Context, clientset kubernetes.Interface, restConfig *res
SubResource("portforward") SubResource("portforward")
transport, upgrader, err := spdy.RoundTripperFor(restConfig) transport, upgrader, err := spdy.RoundTripperFor(restConfig)
if err != nil { if err != nil {
return nil, fmt.Errorf("create round tripper: %v", err) return nil, fmt.Errorf("create round tripper: %w", err)
} }
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL()) dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
@ -212,7 +212,7 @@ type stream struct {
func dial(ctx context.Context, prefix string, dialer httpstream.Dialer, port int) (s *stream, finalErr error) { func dial(ctx context.Context, prefix string, dialer httpstream.Dialer, port int) (s *stream, finalErr error) {
streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name) streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name)
if err != nil { if err != nil {
return nil, fmt.Errorf("dialer failed: %v", err) return nil, fmt.Errorf("dialer failed: %w", err)
} }
requestID := "1" requestID := "1"
defer func() { defer func() {
@ -231,7 +231,7 @@ func dial(ctx context.Context, prefix string, dialer httpstream.Dialer, port int
// This happens asynchronously. // This happens asynchronously.
errorStream, err := streamConn.CreateStream(headers) errorStream, err := streamConn.CreateStream(headers)
if err != nil { if err != nil {
return nil, fmt.Errorf("error creating error stream: %v", err) return nil, fmt.Errorf("error creating error stream: %w", err)
} }
errorStream.Close() errorStream.Close()
go func() { go func() {
@ -248,7 +248,7 @@ func dial(ctx context.Context, prefix string, dialer httpstream.Dialer, port int
headers.Set(v1.StreamType, v1.StreamTypeData) headers.Set(v1.StreamType, v1.StreamTypeData)
dataStream, err := streamConn.CreateStream(headers) dataStream, err := streamConn.CreateStream(headers)
if err != nil { if err != nil {
return nil, fmt.Errorf("error creating data stream: %v", err) return nil, fmt.Errorf("error creating data stream: %w", err)
} }
return &stream{ return &stream{

View File

@ -26,7 +26,6 @@ import (
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
@ -197,11 +196,6 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
testFlexVolume(ctx, driverInstallAs, config, f) testFlexVolume(ctx, driverInstallAs, config, f)
ginkgo.By("waiting for flex client pod to terminate")
if err := e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, config.Prefix+"-client", "", f.Namespace.Name); !apierrors.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name)) ginkgo.By(fmt.Sprintf("uninstalling flexvolume %s from node %s", driverInstallAs, node.Name))
uninstallFlex(ctx, cs, node, "k8s", driverInstallAs) uninstallFlex(ctx, cs, node, "k8s", driverInstallAs)
}) })
@ -217,11 +211,6 @@ var _ = utils.SIGDescribe("Flexvolumes", func() {
testFlexVolume(ctx, driverInstallAs, config, f) testFlexVolume(ctx, driverInstallAs, config, f)
ginkgo.By("waiting for flex client pod to terminate")
if err := e2epod.WaitForPodTerminatedInNamespace(ctx, f.ClientSet, config.Prefix+"-client", "", f.Namespace.Name); !apierrors.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
// Detach might occur after pod deletion. Wait before deleting driver. // Detach might occur after pod deletion. Wait before deleting driver.
time.Sleep(detachTimeout) time.Sleep(detachTimeout)

View File

@ -178,17 +178,17 @@ func createNginxPod(ctx context.Context, client clientset.Interface, namespace s
pod := makeNginxPod(namespace, nodeSelector, pvclaims) pod := makeNginxPod(namespace, nodeSelector, pvclaims)
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{}) pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err) return nil, fmt.Errorf("pod Create API error: %w", err)
} }
// Waiting for pod to be running // Waiting for pod to be running
err = e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace) err = e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)
if err != nil { if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err) return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
} }
// get fresh pod info // get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{}) pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err) return pod, fmt.Errorf("pod Get API error: %w", err)
} }
return pod, nil return pod, nil
} }

View File

@ -143,7 +143,7 @@ func UpdatePVSize(ctx context.Context, pv *v1.PersistentVolume, size resource.Qu
var err error var err error
pvToUpdate, err = c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{}) pvToUpdate, err = c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("error fetching pv %s: %v", pvName, err) return false, fmt.Errorf("error fetching pv %s: %w", pvName, err)
} }
pvToUpdate.Spec.Capacity[v1.ResourceStorage] = size pvToUpdate.Spec.Capacity[v1.ResourceStorage] = size
pvToUpdate, err = c.CoreV1().PersistentVolumes().Update(ctx, pvToUpdate, metav1.UpdateOptions{}) pvToUpdate, err = c.CoreV1().PersistentVolumes().Update(ctx, pvToUpdate, metav1.UpdateOptions{})

View File

@ -165,7 +165,7 @@ func waitForDeploymentToRecreatePod(ctx context.Context, client clientset.Interf
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) { waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
podList, err := e2edeployment.GetPodsForDeployment(ctx, client, deployment) podList, err := e2edeployment.GetPodsForDeployment(ctx, client, deployment)
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get pods for deployment: %v", err) return false, fmt.Errorf("failed to get pods for deployment: %w", err)
} }
for _, pod := range podList.Items { for _, pod := range podList.Items {
switch pod.Status.Phase { switch pod.Status.Phase {

View File

@ -35,7 +35,6 @@ import (
"k8s.io/kubernetes/test/e2e/storage/drivers" "k8s.io/kubernetes/test/e2e/storage/drivers"
storageframework "k8s.io/kubernetes/test/e2e/storage/framework" storageframework "k8s.io/kubernetes/test/e2e/storage/framework"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
) )
@ -119,7 +118,7 @@ var _ = utils.SIGDescribe("[Feature:NodeOutOfServiceVolumeDetach] [Disruptive] [
LabelSelector: labelSelectorStr, LabelSelector: labelSelectorStr,
FieldSelector: fields.OneTermNotEqualSelector("spec.nodeName", oldNodeName).String(), FieldSelector: fields.OneTermNotEqualSelector("spec.nodeName", oldNodeName).String(),
} }
_, err = e2epod.WaitForAllPodsCondition(ctx, c, ns, podListOpts, 1, "running and ready", framework.PodStartTimeout, testutils.PodRunningReady) _, err = e2epod.WaitForPods(ctx, c, ns, podListOpts, e2epod.Range{MinMatching: 1}, framework.PodStartTimeout, "be running and ready", e2epod.RunningReady)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Bring the node back online and remove the taint // Bring the node back online and remove the taint

View File

@ -33,7 +33,6 @@ import (
policyv1 "k8s.io/api/policy/v1" policyv1 "k8s.io/api/policy/v1"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
@ -196,7 +195,7 @@ var _ = utils.SIGDescribe("Pod Disks [Feature:StorageProvider]", func() {
ginkgo.By("deleting host0Pod") // delete this pod before creating next pod ginkgo.By("deleting host0Pod") // delete this pod before creating next pod
framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, podDelOpt), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(ctx, host0Pod.Name, podDelOpt), "Failed to delete host0Pod")
framework.Logf("deleted host0Pod %q", host0Pod.Name) framework.Logf("deleted host0Pod %q", host0Pod.Name)
e2epod.WaitForPodToDisappear(ctx, cs, host0Pod.Namespace, host0Pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete) e2epod.WaitForPodNotFoundInNamespace(ctx, cs, host0Pod.Namespace, host0Pod.Name, f.Timeouts.PodDelete)
framework.Logf("deleted host0Pod %q disappeared", host0Pod.Name) framework.Logf("deleted host0Pod %q disappeared", host0Pod.Name)
} }
@ -527,7 +526,7 @@ func detachPD(nodeName types.NodeName, pdName string) error {
} else if framework.TestContext.Provider == "aws" { } else if framework.TestContext.Provider == "aws" {
awsSession, err := session.NewSession() awsSession, err := session.NewSession()
if err != nil { if err != nil {
return fmt.Errorf("error creating session: %v", err) return fmt.Errorf("error creating session: %w", err)
} }
client := ec2.New(awsSession) client := ec2.New(awsSession)
tokens := strings.Split(pdName, "/") tokens := strings.Split(pdName, "/")
@ -537,7 +536,7 @@ func detachPD(nodeName types.NodeName, pdName string) error {
} }
_, err = client.DetachVolume(&request) _, err = client.DetachVolume(&request)
if err != nil { if err != nil {
return fmt.Errorf("error detaching EBS volume: %v", err) return fmt.Errorf("error detaching EBS volume: %w", err)
} }
return nil return nil
@ -562,7 +561,7 @@ func attachPD(nodeName types.NodeName, pdName string) error {
} else if framework.TestContext.Provider == "aws" { } else if framework.TestContext.Provider == "aws" {
awsSession, err := session.NewSession() awsSession, err := session.NewSession()
if err != nil { if err != nil {
return fmt.Errorf("error creating session: %v", err) return fmt.Errorf("error creating session: %w", err)
} }
client := ec2.New(awsSession) client := ec2.New(awsSession)
tokens := strings.Split(pdName, "/") tokens := strings.Split(pdName, "/")
@ -570,7 +569,7 @@ func attachPD(nodeName types.NodeName, pdName string) error {
ebsUtil := utils.NewEBSUtil(client) ebsUtil := utils.NewEBSUtil(client)
err = ebsUtil.AttachDisk(awsVolumeID, string(nodeName)) err = ebsUtil.AttachDisk(awsVolumeID, string(nodeName))
if err != nil { if err != nil {
return fmt.Errorf("error attaching volume %s to node %s: %v", awsVolumeID, nodeName, err) return fmt.Errorf("error attaching volume %s to node %s: %w", awsVolumeID, nodeName, err)
} }
return nil return nil
} else { } else {

View File

@ -959,7 +959,7 @@ func createLocalPVCsPVs(ctx context.Context, config *localTestConfig, volumes []
for _, volume := range volumes { for _, volume := range volumes {
pvc, err := config.client.CoreV1().PersistentVolumeClaims(volume.pvc.Namespace).Get(ctx, volume.pvc.Name, metav1.GetOptions{}) pvc, err := config.client.CoreV1().PersistentVolumeClaims(volume.pvc.Namespace).Get(ctx, volume.pvc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to get PVC %s/%s: %v", volume.pvc.Namespace, volume.pvc.Name, err) return false, fmt.Errorf("failed to get PVC %s/%s: %w", volume.pvc.Namespace, volume.pvc.Name, err)
} }
if pvc.Status.Phase != v1.ClaimPending { if pvc.Status.Phase != v1.ClaimPending {
return true, nil return true, nil

View File

@ -70,7 +70,7 @@ func completeMultiTest(ctx context.Context, f *framework.Framework, c clientset.
for pvcKey := range claims { for pvcKey := range claims {
pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(ctx, pvcKey.Name, metav1.GetOptions{}) pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(ctx, pvcKey.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return fmt.Errorf("error getting pvc %q: %v", pvcKey.Name, err) return fmt.Errorf("error getting pvc %q: %w", pvcKey.Name, err)
} }
if len(pvc.Spec.VolumeName) == 0 { if len(pvc.Spec.VolumeName) == 0 {
continue // claim is not bound continue // claim is not bound
@ -450,7 +450,7 @@ func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *frame
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command) pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command)
runPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}) runPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
if err != nil { if err != nil {
return fmt.Errorf("pod Create API error: %v", err) return fmt.Errorf("pod Create API error: %w", err)
} }
defer func() { defer func() {
delErr := e2epod.DeletePodWithWait(ctx, c, runPod) delErr := e2epod.DeletePodWithWait(ctx, c, runPod)
@ -461,7 +461,7 @@ func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *frame
err = testPodSuccessOrFail(ctx, c, t, ns, runPod) err = testPodSuccessOrFail(ctx, c, t, ns, runPod)
if err != nil { if err != nil {
return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err) return fmt.Errorf("pod %q did not exit with Success: %w", runPod.Name, err)
} }
return // note: named return value return // note: named return value
} }
@ -470,7 +470,7 @@ func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *frame
func testPodSuccessOrFail(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, pod *v1.Pod) error { func testPodSuccessOrFail(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, pod *v1.Pod) error {
framework.Logf("Pod should terminate with exitcode 0 (success)") framework.Logf("Pod should terminate with exitcode 0 (success)")
if err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, t.PodStart); err != nil { if err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, t.PodStart); err != nil {
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err) return fmt.Errorf("pod %q failed to reach Success: %w", pod.Name, err)
} }
framework.Logf("Pod %v succeeded ", pod.Name) framework.Logf("Pod %v succeeded ", pod.Name)
return nil return nil

View File

@ -176,7 +176,7 @@ func waitForPVCStorageClass(ctx context.Context, c clientset.Interface, namespac
}) })
if err != nil { if err != nil {
return watchedPVC, fmt.Errorf("error waiting for claim %s to have StorageClass set to %s: %v", pvcName, scName, err) return watchedPVC, fmt.Errorf("error waiting for claim %s to have StorageClass set to %s: %w", pvcName, scName, err)
} }
return watchedPVC, nil return watchedPVC, nil

View File

@ -33,7 +33,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic" "k8s.io/client-go/dynamic"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
@ -846,7 +845,7 @@ func (t StorageClassTest) TestBindingWaitForFirstConsumerMultiPVC(ctx context.Co
framework.ExpectNoError(err) framework.ExpectNoError(err)
ginkgo.DeferCleanup(func(ctx context.Context) error { ginkgo.DeferCleanup(func(ctx context.Context) error {
e2epod.DeletePodOrFail(ctx, t.Client, pod.Namespace, pod.Name) e2epod.DeletePodOrFail(ctx, t.Client, pod.Namespace, pod.Name)
return e2epod.WaitForPodToDisappear(ctx, t.Client, pod.Namespace, pod.Name, labels.Everything(), framework.Poll, t.Timeouts.PodDelete) return e2epod.WaitForPodNotFoundInNamespace(ctx, t.Client, pod.Namespace, pod.Name, t.Timeouts.PodDelete)
}) })
if expectUnschedulable { if expectUnschedulable {
// Verify that no claims are provisioned. // Verify that no claims are provisioned.

View File

@ -303,7 +303,7 @@ func ExpandPVCSize(ctx context.Context, origPVC *v1.PersistentVolumeClaim, size
var err error var err error
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Get(ctx, pvcName, metav1.GetOptions{}) updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("error fetching pvc %q for resizing: %v", pvcName, err) return false, fmt.Errorf("error fetching pvc %q for resizing: %w", pvcName, err)
} }
updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = size updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = size
@ -331,7 +331,7 @@ func WaitForResizingCondition(ctx context.Context, pvc *v1.PersistentVolumeClaim
updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %v", pvc.Name, err) return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %w", pvc.Name, err)
} }
pvcConditions := updatedPVC.Status.Conditions pvcConditions := updatedPVC.Status.Conditions
@ -381,7 +381,7 @@ func WaitForPendingFSResizeCondition(ctx context.Context, pvc *v1.PersistentVolu
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err) return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %w", pvc.Name, err)
} }
inProgressConditions := updatedPVC.Status.Conditions inProgressConditions := updatedPVC.Status.Conditions
@ -409,7 +409,7 @@ func WaitForFSResize(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clien
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{}) updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err) return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %w", pvc.Name, err)
} }
pvcSize := updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] pvcSize := updatedPVC.Spec.Resources.Requests[v1.ResourceStorage]

View File

@ -263,7 +263,7 @@ func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize in
} }
size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n")) size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
if err != nil { if err != nil {
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err) return fmt.Errorf("unable to convert string %q to int: %w", rtnstr, err)
} }
if int64(size) != expectSize { if int64(size) != expectSize {
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize) return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
@ -320,7 +320,7 @@ func testVolumeIO(ctx context.Context, f *framework.Framework, cs clientset.Inte
podsNamespacer := cs.CoreV1().Pods(config.Namespace) podsNamespacer := cs.CoreV1().Pods(config.Namespace)
clientPod, err = podsNamespacer.Create(ctx, clientPod, metav1.CreateOptions{}) clientPod, err = podsNamespacer.Create(ctx, clientPod, metav1.CreateOptions{})
if err != nil { if err != nil {
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err) return fmt.Errorf("failed to create client pod %q: %w", clientPod.Name, err)
} }
ginkgo.DeferCleanup(func(ctx context.Context) { ginkgo.DeferCleanup(func(ctx context.Context) {
deleteFile(f, clientPod, ddInput) deleteFile(f, clientPod, ddInput)
@ -339,7 +339,7 @@ func testVolumeIO(ctx context.Context, f *framework.Framework, cs clientset.Inte
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, clientPod.Name, clientPod.Namespace, f.Timeouts.PodStart) err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, clientPod.Name, clientPod.Namespace, f.Timeouts.PodStart)
if err != nil { if err != nil {
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err) return fmt.Errorf("client pod %q not running: %w", clientPod.Name, err)
} }
// create files of the passed-in file sizes and verify test file size and content // create files of the passed-in file sizes and verify test file size and content

View File

@ -342,7 +342,7 @@ func waitForAllPVCsBound(ctx context.Context, cs clientset.Interface, timeout ti
return true, nil return true, nil
}) })
if err != nil { if err != nil {
return nil, fmt.Errorf("error waiting for all PVCs to be bound: %v", err) return nil, fmt.Errorf("error waiting for all PVCs to be bound: %w", err)
} }
return pvNames, nil return pvNames, nil
} }
@ -411,7 +411,7 @@ func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *stora
return true, nil return true, nil
}) })
if err != nil { if err != nil {
return 0, fmt.Errorf("could not get CSINode limit for driver %s: %v", driverInfo.Name, err) return 0, fmt.Errorf("could not get CSINode limit for driver %s: %w", driverInfo.Name, err)
} }
return limit, nil return limit, nil
} }

View File

@ -506,7 +506,7 @@ func listPodDirectory(ctx context.Context, h storageutils.HostExec, path string,
cmd := fmt.Sprintf("find %s -mindepth 2 -maxdepth 2", path) cmd := fmt.Sprintf("find %s -mindepth 2 -maxdepth 2", path)
out, err := h.IssueCommandWithResult(ctx, cmd, node) out, err := h.IssueCommandWithResult(ctx, cmd, node)
if err != nil { if err != nil {
return nil, fmt.Errorf("error checking directory %s on node %s: %s", path, node.Name, err) return nil, fmt.Errorf("error checking directory %s on node %s: %w", path, node.Name, err)
} }
return strings.Split(out, "\n"), nil return strings.Split(out, "\n"), nil
} }

View File

@ -24,7 +24,6 @@ import (
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -101,7 +100,7 @@ func PodsUseStaticPVsOrFail(ctx context.Context, f *framework.Framework, podCoun
go func(config *staticPVTestConfig) { go func(config *staticPVTestConfig) {
defer ginkgo.GinkgoRecover() defer ginkgo.GinkgoRecover()
defer wg.Done() defer wg.Done()
err := e2epod.WaitForPodToDisappear(ctx, c, ns, config.pod.Name, labels.Everything(), framework.Poll, f.Timeouts.PodDelete) err := e2epod.WaitForPodNotFoundInNamespace(ctx, c, ns, config.pod.Name, f.Timeouts.PodDelete)
framework.ExpectNoError(err, "while waiting for pod to disappear") framework.ExpectNoError(err, "while waiting for pod to disappear")
errs := e2epv.PVPVCCleanup(ctx, c, ns, config.pv, config.pvc) errs := e2epv.PVPVCCleanup(ctx, c, ns, config.pv, config.pvc)
framework.ExpectNoError(utilerrors.NewAggregate(errs), "while cleaning up PVs and PVCs") framework.ExpectNoError(utilerrors.NewAggregate(errs), "while cleaning up PVs and PVCs")

View File

@ -64,16 +64,16 @@ func NewEBSUtil(client *ec2.EC2) *EBSUtil {
func (ebs *EBSUtil) AttachDisk(volumeID string, nodeName string) error { func (ebs *EBSUtil) AttachDisk(volumeID string, nodeName string) error {
instance, err := findInstanceByNodeName(nodeName, ebs.client) instance, err := findInstanceByNodeName(nodeName, ebs.client)
if err != nil { if err != nil {
return fmt.Errorf("error finding node %s: %v", nodeName, err) return fmt.Errorf("error finding node %s: %w", nodeName, err)
} }
err = ebs.waitForAvailable(volumeID) err = ebs.waitForAvailable(volumeID)
if err != nil { if err != nil {
return fmt.Errorf("error waiting volume %s to be available: %v", volumeID, err) return fmt.Errorf("error waiting volume %s to be available: %w", volumeID, err)
} }
device, err := ebs.findFreeDevice(instance) device, err := ebs.findFreeDevice(instance)
if err != nil { if err != nil {
return fmt.Errorf("error finding free device on node %s: %v", nodeName, err) return fmt.Errorf("error finding free device on node %s: %w", nodeName, err)
} }
hostDevice := "/dev/xvd" + string(device) hostDevice := "/dev/xvd" + string(device)
attachInput := &ec2.AttachVolumeInput{ attachInput := &ec2.AttachVolumeInput{
@ -83,7 +83,7 @@ func (ebs *EBSUtil) AttachDisk(volumeID string, nodeName string) error {
} }
_, err = ebs.client.AttachVolume(attachInput) _, err = ebs.client.AttachVolume(attachInput)
if err != nil { if err != nil {
return fmt.Errorf("error attaching volume %s to node %s: %v", volumeID, nodeName, err) return fmt.Errorf("error attaching volume %s to node %s: %w", volumeID, nodeName, err)
} }
return ebs.waitForAttach(volumeID) return ebs.waitForAttach(volumeID)
} }
@ -245,7 +245,7 @@ func describeInstances(request *ec2.DescribeInstancesInput, cloud *ec2.EC2) ([]*
for { for {
response, err := cloud.DescribeInstances(request) response, err := cloud.DescribeInstances(request)
if err != nil { if err != nil {
return nil, fmt.Errorf("error listing AWS instances: %v", err) return nil, fmt.Errorf("error listing AWS instances: %w", err)
} }
for _, reservation := range response.Reservations { for _, reservation := range response.Reservations {

View File

@ -72,7 +72,7 @@ func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool)
awsSession, err := session.NewSession() awsSession, err := session.NewSession()
if err != nil { if err != nil {
return fmt.Errorf("error creating session: %v", err) return fmt.Errorf("error creating session: %w", err)
} }
if len(zone) > 0 { if len(zone) > 0 {
@ -90,7 +90,7 @@ func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool)
} }
info, err := client.DescribeVolumes(request) info, err := client.DescribeVolumes(request)
if err != nil { if err != nil {
return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err) return fmt.Errorf("error querying ec2 for volume %q: %w", volumeID, err)
} }
if len(info.Volumes) == 0 { if len(info.Volumes) == 0 {
return fmt.Errorf("no volumes found for volume %q", volumeID) return fmt.Errorf("no volumes found for volume %q", volumeID)
@ -737,7 +737,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
err = wait.Poll(time.Second, framework.ClaimProvisionTimeout, func() (bool, error) { err = wait.Poll(time.Second, framework.ClaimProvisionTimeout, func() (bool, error) {
events, err := c.CoreV1().Events(claim.Namespace).List(ctx, metav1.ListOptions{}) events, err := c.CoreV1().Events(claim.Namespace).List(ctx, metav1.ListOptions{})
if err != nil { if err != nil {
return false, fmt.Errorf("could not list PVC events in %s: %v", claim.Namespace, err) return false, fmt.Errorf("could not list PVC events in %s: %w", claim.Namespace, err)
} }
for _, event := range events.Items { for _, event := range events.Items {
if strings.Contains(event.Message, "failed to create encrypted volume: the volume disappeared after creation, most likely due to inaccessible KMS encryption key") { if strings.Contains(event.Message, "failed to create encrypted volume: the volume disappeared after creation, most likely due to inaccessible KMS encryption key") {
@ -894,7 +894,7 @@ func waitForProvisionedVolumesDeleted(ctx context.Context, c clientset.Interface
return true, nil // No PVs remain return true, nil // No PVs remain
}) })
if err != nil { if err != nil {
return remainingPVs, fmt.Errorf("Error waiting for PVs to be deleted: %v", err) return remainingPVs, fmt.Errorf("Error waiting for PVs to be deleted: %w", err)
} }
return nil, nil return nil, nil
} }

View File

@ -782,7 +782,7 @@ func invokeVCenterServiceControl(ctx context.Context, command, service, host str
result, err := e2essh.SSH(ctx, sshCmd, host, framework.TestContext.Provider) result, err := e2essh.SSH(ctx, sshCmd, host, framework.TestContext.Provider)
if err != nil || result.Code != 0 { if err != nil || result.Code != 0 {
e2essh.LogResult(result) e2essh.LogResult(result)
return fmt.Errorf("couldn't execute command: %s on vCenter host: %v", sshCmd, err) return fmt.Errorf("couldn't execute command: %s on vCenter host: %w", sshCmd, err)
} }
return nil return nil
} }

View File

@ -84,7 +84,7 @@ func restartKubelet(ctx context.Context, host string) error {
result, err := e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider) result, err := e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider)
if err != nil || result.Code != 0 { if err != nil || result.Code != 0 {
e2essh.LogResult(result) e2essh.LogResult(result)
return fmt.Errorf("couldn't restart kubelet: %v", err) return fmt.Errorf("couldn't restart kubelet: %w", err)
} }
return nil return nil
} }

View File

@ -49,22 +49,22 @@ func gatherTestSuiteMetrics(ctx context.Context) error {
framework.Logf("Gathering metrics") framework.Logf("Gathering metrics")
config, err := framework.LoadConfig() config, err := framework.LoadConfig()
if err != nil { if err != nil {
return fmt.Errorf("error loading client config: %v", err) return fmt.Errorf("error loading client config: %w", err)
} }
c, err := clientset.NewForConfig(config) c, err := clientset.NewForConfig(config)
if err != nil { if err != nil {
return fmt.Errorf("error creating client: %v", err) return fmt.Errorf("error creating client: %w", err)
} }
// Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally). // Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally).
grabber, err := e2emetrics.NewMetricsGrabber(ctx, c, nil, config, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false) grabber, err := e2emetrics.NewMetricsGrabber(ctx, c, nil, config, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
if err != nil { if err != nil {
return fmt.Errorf("failed to create MetricsGrabber: %v", err) return fmt.Errorf("failed to create MetricsGrabber: %w", err)
} }
received, err := grabber.Grab(ctx) received, err := grabber.Grab(ctx)
if err != nil { if err != nil {
return fmt.Errorf("failed to grab metrics: %v", err) return fmt.Errorf("failed to grab metrics: %w", err)
} }
metricsForE2E := (*e2emetrics.ComponentCollection)(&received) metricsForE2E := (*e2emetrics.ComponentCollection)(&received)
@ -72,7 +72,7 @@ func gatherTestSuiteMetrics(ctx context.Context) error {
if framework.TestContext.ReportDir != "" { if framework.TestContext.ReportDir != "" {
filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json") filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json")
if err := os.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil { if err := os.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil {
return fmt.Errorf("error writing to %q: %v", filePath, err) return fmt.Errorf("error writing to %q: %w", filePath, err)
} }
} else { } else {
framework.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON) framework.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)

View File

@ -184,7 +184,7 @@ func waitForDeploymentRevision(ctx context.Context, c clientset.Interface, d *ap
return revision == targetRevision, nil return revision == targetRevision, nil
}) })
if err != nil { if err != nil {
return fmt.Errorf("error waiting for revision to become %q for deployment %q: %v", targetRevision, d.Name, err) return fmt.Errorf("error waiting for revision to become %q for deployment %q: %w", targetRevision, d.Name, err)
} }
return nil return nil
} }

View File

@ -100,7 +100,7 @@ func inClusterClientMustWork(ctx context.Context, f *framework.Framework, pod *v
numTokens, err := e2eauth.ParseInClusterClientLogs(logs) numTokens, err := e2eauth.ParseInClusterClientLogs(logs)
if err != nil { if err != nil {
framework.Logf("Error parsing inclusterclient logs: %v", err) framework.Logf("Error parsing inclusterclient logs: %v", err)
return false, fmt.Errorf("inclusterclient reported an error: %v", err) return false, fmt.Errorf("inclusterclient reported an error: %w", err)
} }
if numTokens == 0 { if numTokens == 0 {
framework.Logf("No authenticated API calls found") framework.Logf("No authenticated API calls found")

View File

@ -143,7 +143,7 @@ func waitForKubeProxyStaticPodsRunning(ctx context.Context, c clientset.Interfac
} }
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil { if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy static pods running: %v", err) return fmt.Errorf("error waiting for kube-proxy static pods running: %w", err)
} }
return nil return nil
} }
@ -166,7 +166,7 @@ func waitForKubeProxyStaticPodsDisappear(ctx context.Context, c clientset.Interf
} }
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil { if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy static pods disappear: %v", err) return fmt.Errorf("error waiting for kube-proxy static pods disappear: %w", err)
} }
return nil return nil
} }
@ -190,7 +190,7 @@ func waitForKubeProxyDaemonSetRunning(ctx context.Context, f *framework.Framewor
} }
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil { if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy DaemonSet running: %v", err) return fmt.Errorf("error waiting for kube-proxy DaemonSet running: %w", err)
} }
return nil return nil
} }
@ -213,7 +213,7 @@ func waitForKubeProxyDaemonSetDisappear(ctx context.Context, c clientset.Interfa
} }
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil { if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy DaemonSet disappear: %v", err) return fmt.Errorf("error waiting for kube-proxy DaemonSet disappear: %w", err)
} }
return nil return nil
} }

Some files were not shown because too many files have changed in this diff Show More