e2e: use error wrapping with %w

The recently introduced failure handling in ExpectNoError depends on error
wrapping: if an error prefix gets added with `fmt.Errorf("foo: %v", err)`, then
ExpectNoError cannot detect that the root cause is an assertion failure and
then will add another useless "unexpected error" prefix and will not dump the
additional failure information (currently the backtrace inside the E2E
framework).

Instead of manually deciding on a case-by-case basis where %w is needed, all
error wrapping was updated automatically with

    sed -i "s/fmt.Errorf\(.*\): '*\(%s\|%v\)'*\",\(.* err)\)/fmt.Errorf\1: %w\",\3/" $(git grep -l 'fmt.Errorf' test/e2e*)

This may be unnecessary in some cases, but it's not wrong.
This commit is contained in:
Patrick Ohly 2023-01-31 08:22:39 +01:00
parent 5973e2c8cb
commit 222f655062
104 changed files with 374 additions and 374 deletions

View File

@ -73,7 +73,7 @@ func restartAPIServer(ctx context.Context, node *v1.Node) error {
result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(controlPlaneAddress, e2essh.SSHPort), framework.TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
return fmt.Errorf("couldn't restart kube-apiserver: %v", err)
return fmt.Errorf("couldn't restart kube-apiserver: %w", err)
}
return nil
}

View File

@ -561,7 +561,7 @@ func setupCRDAndVerifySchemaWithOptions(f *framework.Framework, schema, expect [
})
crd, err := crd.CreateMultiVersionTestCRD(f, group, options...)
if err != nil {
return nil, fmt.Errorf("failed to create CRD: %v", err)
return nil, fmt.Errorf("failed to create CRD: %w", err)
}
for _, v := range crd.Crd.Spec.Versions {
@ -623,7 +623,7 @@ func waitForDefinition(c k8sclientset.Interface, name string, schema []byte) err
return true, ""
})
if err != nil {
return fmt.Errorf("failed to wait for definition %q to be served with the right OpenAPI schema: %v", name, err)
return fmt.Errorf("failed to wait for definition %q to be served with the right OpenAPI schema: %w", name, err)
}
return nil
}
@ -637,7 +637,7 @@ func waitForDefinitionCleanup(c k8sclientset.Interface, name string) error {
return true, ""
})
if err != nil {
return fmt.Errorf("failed to wait for definition %q not to be served anymore: %v", name, err)
return fmt.Errorf("failed to wait for definition %q not to be served anymore: %w", name, err)
}
return nil
}
@ -718,7 +718,7 @@ func dropDefaults(s *spec.Schema) {
func verifyKubectlExplain(ns, name, pattern string) error {
result, err := e2ekubectl.RunKubectl(ns, "explain", name)
if err != nil {
return fmt.Errorf("failed to explain %s: %v", name, err)
return fmt.Errorf("failed to explain %s: %w", name, err)
}
r := regexp.MustCompile(pattern)
if !r.Match([]byte(result)) {

View File

@ -181,7 +181,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects
case "Pods":
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list pods: %v", err)
return false, fmt.Errorf("failed to list pods: %w", err)
}
if len(pods.Items) != num {
ret = false
@ -190,7 +190,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects
case "Deployments":
deployments, err := f.ClientSet.AppsV1().Deployments(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list deployments: %v", err)
return false, fmt.Errorf("failed to list deployments: %w", err)
}
if len(deployments.Items) != num {
ret = false
@ -199,7 +199,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects
case "ReplicaSets":
rs, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err)
return false, fmt.Errorf("failed to list rs: %w", err)
}
if len(rs.Items) != num {
ret = false
@ -208,7 +208,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects
case "ReplicationControllers":
rcs, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list replication controllers: %v", err)
return false, fmt.Errorf("failed to list replication controllers: %w", err)
}
if len(rcs.Items) != num {
ret = false
@ -217,7 +217,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects
case "CronJobs":
cronJobs, err := f.ClientSet.BatchV1().CronJobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list cronjobs: %v", err)
return false, fmt.Errorf("failed to list cronjobs: %w", err)
}
if len(cronJobs.Items) != num {
ret = false
@ -226,7 +226,7 @@ func verifyRemainingObjects(ctx context.Context, f *framework.Framework, objects
case "Jobs":
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list jobs: %v", err)
return false, fmt.Errorf("failed to list jobs: %w", err)
}
if len(jobs.Items) != num {
ret = false
@ -325,7 +325,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
pods, err := podClient.List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list pods: %v", err)
return false, fmt.Errorf("failed to list pods: %w", err)
}
// We intentionally don't wait the number of pods to reach
// rc.Spec.Replicas. We want to see if the garbage collector and the
@ -383,7 +383,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err)
return false, fmt.Errorf("failed to get rc: %w", err)
}
if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil
@ -410,7 +410,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.PollWithContext(ctx, 5*time.Second, 120*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
rcs, err := rcClient.List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rcs: %v", err)
return false, fmt.Errorf("failed to list rcs: %w", err)
}
if len(rcs.Items) != 0 {
return false, nil
@ -452,7 +452,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err)
return false, fmt.Errorf("failed to get rc: %w", err)
}
if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil
@ -505,7 +505,7 @@ var _ = SIGDescribe("Garbage collector", func() {
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) {
rsList, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err)
return false, fmt.Errorf("failed to list rs: %w", err)
}
return len(rsList.Items) > 0, nil
@ -530,7 +530,7 @@ var _ = SIGDescribe("Garbage collector", func() {
errList = append(errList, err)
remainingRSs, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil {
errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err))
errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %w", err))
} else {
errList = append(errList, fmt.Errorf("remaining rs are: %#v", remainingRSs))
}
@ -565,7 +565,7 @@ var _ = SIGDescribe("Garbage collector", func() {
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) {
rsList, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list rs: %v", err)
return false, fmt.Errorf("failed to list rs: %w", err)
}
if len(rsList.Items) > 0 {
replicaset = rsList.Items[0]
@ -599,7 +599,7 @@ var _ = SIGDescribe("Garbage collector", func() {
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 1*time.Minute+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
dList, err := deployClient.List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list deployments: %v", err)
return false, fmt.Errorf("failed to list deployments: %w", err)
}
return len(dList.Items) == 0, nil
})
@ -616,13 +616,13 @@ var _ = SIGDescribe("Garbage collector", func() {
errList := make([]error, 0)
remainingRSs, err := rsClient.List(ctx, metav1.ListOptions{})
if err != nil {
errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %v", err))
errList = append(errList, fmt.Errorf("failed to list RSs post mortem: %w", err))
} else {
errList = append(errList, fmt.Errorf("remaining rs post mortem: %#v", remainingRSs))
}
remainingDSs, err := deployClient.List(ctx, metav1.ListOptions{})
if err != nil {
errList = append(errList, fmt.Errorf("failed to list Deployments post mortem: %v", err))
errList = append(errList, fmt.Errorf("failed to list Deployments post mortem: %w", err))
} else {
errList = append(errList, fmt.Errorf("remaining deployment's post mortem: %#v", remainingDSs))
}
@ -663,7 +663,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
rc, err := rcClient.Get(ctx, rc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err)
return false, fmt.Errorf("failed to get rc: %w", err)
}
if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil
@ -758,7 +758,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.PollWithContext(ctx, 5*time.Second, 30*time.Second, func(ctx context.Context) (bool, error) {
rc1, err := rcClient.Get(ctx, rc1.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get rc: %v", err)
return false, fmt.Errorf("failed to get rc: %w", err)
}
if rc1.Status.Replicas == *rc1.Spec.Replicas {
return true, nil
@ -889,7 +889,7 @@ var _ = SIGDescribe("Garbage collector", func() {
if err := wait.PollWithContext(ctx, 5*time.Second, 90*time.Second+gcInformerResyncRetryTimeout, func(ctx context.Context) (bool, error) {
pods, err2 = podClient.List(ctx, metav1.ListOptions{})
if err2 != nil {
return false, fmt.Errorf("failed to list pods: %v", err)
return false, fmt.Errorf("failed to list pods: %w", err)
}
if len(pods.Items) == 0 {
return true, nil
@ -1125,7 +1125,7 @@ var _ = SIGDescribe("Garbage collector", func() {
return false, nil
}
if err != nil && !apierrors.IsNotFound(err) {
return false, fmt.Errorf("failed to get owner: %v", err)
return false, fmt.Errorf("failed to get owner: %w", err)
}
return true, nil
}); err != nil {
@ -1153,7 +1153,7 @@ var _ = SIGDescribe("Garbage collector", func() {
err = wait.PollImmediateWithContext(ctx, 500*time.Millisecond, 2*time.Minute, func(ctx context.Context) (bool, error) {
jobs, err := f.ClientSet.BatchV1().Jobs(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("failed to list jobs: %v", err)
return false, fmt.Errorf("failed to list jobs: %w", err)
}
return len(jobs.Items) > 0, nil
})

View File

@ -1250,7 +1250,7 @@ func waitFailedDaemonPodDeleted(c clientset.Interface, pod *v1.Pod) func(ctx con
if apierrors.IsNotFound(err) {
return true, nil
}
return false, fmt.Errorf("failed to get failed daemon pod %q: %v", pod.Name, err)
return false, fmt.Errorf("failed to get failed daemon pod %q: %w", pod.Name, err)
}
return false, nil
}

View File

@ -512,9 +512,9 @@ func TestReplicationControllerServeImageOrFail(ctx context.Context, f *framework
if err != nil {
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
if getErr == nil {
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %w", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else {
err = fmt.Errorf("pod %q never run: %v", pod.Name, err)
err = fmt.Errorf("pod %q never run: %w", pod.Name, err)
}
}
framework.ExpectNoError(err)

View File

@ -210,9 +210,9 @@ func testReplicaSetServeImageOrFail(ctx context.Context, f *framework.Framework,
if err != nil {
updatePod, getErr := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{})
if getErr == nil {
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
err = fmt.Errorf("pod %q never run (phase: %s, conditions: %+v): %w", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else {
err = fmt.Errorf("pod %q never run: %v", pod.Name, err)
err = fmt.Errorf("pod %q never run: %w", pod.Name, err)
}
}
framework.ExpectNoError(err)

View File

@ -507,7 +507,7 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}
tokenCount, err := ParseInClusterClientLogs(logs)
if err != nil {
return false, fmt.Errorf("inclusterclient reported an error: %v", err)
return false, fmt.Errorf("inclusterclient reported an error: %w", err)
}
if tokenCount < 2 {
framework.Logf("Retrying. Still waiting to see more unique tokens: got=%d, want=2", tokenCount)

View File

@ -1166,7 +1166,7 @@ func enableAutoscaler(nodePool string, minCount, maxCount int) error {
if err != nil {
klog.Errorf("Failed config update result: %s", output)
return fmt.Errorf("Failed to enable autoscaling: %v", err)
return fmt.Errorf("Failed to enable autoscaling: %w", err)
}
klog.Infof("Config update result: %s", output)
@ -1190,7 +1190,7 @@ func disableAutoscaler(nodePool string, minCount, maxCount int) error {
if err != nil {
klog.Errorf("Failed config update result: %s", output)
return fmt.Errorf("Failed to disable autoscaling: %v", err)
return fmt.Errorf("Failed to disable autoscaling: %w", err)
}
klog.Infof("Config update result: %s", output)
@ -1384,7 +1384,7 @@ func waitForCaPodsReadyInNamespace(ctx context.Context, f *framework.Framework,
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)) && ctx.Err() == nil; time.Sleep(20 * time.Second) {
pods, err := c.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to get pods: %v", err)
return fmt.Errorf("failed to get pods: %w", err)
}
notready = make([]string, 0)
for _, pod := range pods.Items {

View File

@ -355,7 +355,7 @@ func waitForDNSReplicasSatisfied(ctx context.Context, c clientset.Interface, get
}
if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err)
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %w", expected, current, err)
}
framework.Logf("kube-dns reaches expected replicas: %v", expected)
return nil
@ -372,7 +372,7 @@ func waitForDNSConfigMapCreated(ctx context.Context, c clientset.Interface, time
}
if err = wait.Poll(time.Second, timeout, condition); err != nil {
return nil, fmt.Errorf("err waiting for DNS autoscaling ConfigMap got re-created: %v", err)
return nil, fmt.Errorf("err waiting for DNS autoscaling ConfigMap got re-created: %w", err)
}
return configMap, nil
}

View File

@ -375,7 +375,7 @@ func waitForReplicationController(ctx context.Context, c clientset.Interface, na
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %v", namespace, name, stateMsg[exist], err)
return fmt.Errorf("error waiting for ReplicationController %s/%s %s: %w", namespace, name, stateMsg[exist], err)
}
return nil
}
@ -402,7 +402,7 @@ func waitForServiceWithSelector(ctx context.Context, c clientset.Interface, name
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err)
return fmt.Errorf("error waiting for service with %s in namespace %s %s: %w", selector.String(), namespace, stateMsg[exist], err)
}
return nil
}
@ -426,7 +426,7 @@ func waitForReplicationControllerWithSelector(ctx context.Context, c clientset.I
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for ReplicationControllers with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err)
return fmt.Errorf("error waiting for ReplicationControllers with %s in namespace %s %s: %w", selector.String(), namespace, stateMsg[exist], err)
}
return nil
}
@ -437,7 +437,7 @@ func getMasterSSHClient() (*ssh.Client, error) {
// Get a signer for the provider.
signer, err := e2essh.GetSigner(framework.TestContext.Provider)
if err != nil {
return nil, fmt.Errorf("error getting signer for provider %s: '%v'", framework.TestContext.Provider, err)
return nil, fmt.Errorf("error getting signer for provider %s: %w", framework.TestContext.Provider, err)
}
sshUser := os.Getenv("KUBE_SSH_USER")
@ -453,7 +453,7 @@ func getMasterSSHClient() (*ssh.Client, error) {
host := framework.APIAddress() + ":22"
client, err := ssh.Dial("tcp", host, config)
if err != nil {
return nil, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err)
return nil, fmt.Errorf("error getting SSH client to host %s: %w", host, err)
}
return client, err
}
@ -468,7 +468,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err)
return "", "", 0, fmt.Errorf("error creating session to host %s: %w", client.RemoteAddr(), err)
}
defer session.Close()
@ -490,7 +490,7 @@ func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s: '%v'", cmd, client.RemoteAddr(), err)
err = fmt.Errorf("failed running `%s` on %s: %w", cmd, client.RemoteAddr(), err)
}
}
return bout.String(), berr.String(), code, err
@ -500,7 +500,7 @@ func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.
framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
session, err := sshClient.NewSession()
if err != nil {
return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err)
return fmt.Errorf("error creating session to host %s: %w", sshClient.RemoteAddr(), err)
}
defer session.Close()

View File

@ -82,7 +82,7 @@ func realVersion(s string) (string, error) {
framework.Logf("Getting real version for %q", s)
v, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/get-build.sh"), "-v", s)
if err != nil {
return v, fmt.Errorf("error getting real version for %q: %v", s, err)
return v, fmt.Errorf("error getting real version for %q: %w", s, err)
}
framework.Logf("Version for %q is %q", s, v)
return strings.TrimPrefix(strings.TrimSpace(v), "v"), nil

View File

@ -131,7 +131,7 @@ func checkControlPlaneVersion(ctx context.Context, c clientset.Interface, want s
return true, nil
})
if waitErr != nil {
return fmt.Errorf("CheckControlPlane() couldn't get the control plane version: %v", err)
return fmt.Errorf("CheckControlPlane() couldn't get the control plane version: %w", err)
}
// We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4

View File

@ -39,15 +39,15 @@ import (
func getPatchBytes(oldLease, newLease *coordinationv1.Lease) ([]byte, error) {
oldData, err := json.Marshal(oldLease)
if err != nil {
return nil, fmt.Errorf("failed to Marshal oldData: %v", err)
return nil, fmt.Errorf("failed to Marshal oldData: %w", err)
}
newData, err := json.Marshal(newLease)
if err != nil {
return nil, fmt.Errorf("failed to Marshal newData: %v", err)
return nil, fmt.Errorf("failed to Marshal newData: %w", err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, coordinationv1.Lease{})
if err != nil {
return nil, fmt.Errorf("failed to CreateTwoWayMergePatch: %v", err)
return nil, fmt.Errorf("failed to CreateTwoWayMergePatch: %w", err)
}
return patchBytes, nil
}

View File

@ -310,7 +310,7 @@ while true; do sleep 1; done
checkContainerStatus := func(ctx context.Context) error {
status, err := container.GetStatus(ctx)
if err != nil {
return fmt.Errorf("failed to get container status: %v", err)
return fmt.Errorf("failed to get container status: %w", err)
}
// We need to check container state first. The default pod status is pending, If we check pod phase first,
// and the expected pod phase is Pending, the container status may not even show up when we check it.
@ -335,7 +335,7 @@ while true; do sleep 1; done
// Check pod phase
phase, err := container.GetPhase(ctx)
if err != nil {
return fmt.Errorf("failed to get pod phase: %v", err)
return fmt.Errorf("failed to get pod phase: %w", err)
}
if phase != expectedPhase {
return fmt.Errorf("expected pod phase: %q, got: %q", expectedPhase, phase)

View File

@ -195,11 +195,11 @@ func RestartNodes(c clientset.Interface, nodes []v1.Node) error {
if err := wait.Poll(30*time.Second, framework.RestartNodeReadyAgainTimeout, func() (bool, error) {
newNode, err := c.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error getting node info after reboot: %s", err)
return false, fmt.Errorf("error getting node info after reboot: %w", err)
}
return node.Status.NodeInfo.BootID != newNode.Status.NodeInfo.BootID, nil
}); err != nil {
return fmt.Errorf("error waiting for node %s boot ID to change: %s", node.Name, err)
return fmt.Errorf("error waiting for node %s boot ID to change: %w", node.Name, err)
}
}
return nil

View File

@ -110,7 +110,7 @@ var _ = ginkgo.Describe("[sig-node] DRA [Feature:DynamicResourceAllocation]", fu
gomega.Consistently(func() error {
testPod, err := b.f.ClientSet.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("expected the test pod %s to exist: %v", pod.Name, err)
return fmt.Errorf("expected the test pod %s to exist: %w", pod.Name, err)
}
if testPod.Status.Phase != v1.PodPending {
return fmt.Errorf("pod %s: unexpected status %s, expected status: %s", pod.Name, testPod.Status.Phase, v1.PodPending)

View File

@ -148,7 +148,7 @@ func (c *ExampleController) GetClaimParameters(ctx context.Context, claim *resou
func (c *ExampleController) readParametersFromConfigMap(ctx context.Context, namespace, name string) (map[string]string, error) {
configMap, err := c.clientset.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("get config map: %v", err)
return nil, fmt.Errorf("get config map: %w", err)
}
return configMap.Data, nil
}
@ -221,7 +221,7 @@ func (c *ExampleController) allocate(ctx context.Context, claim *resourcev1alpha
toEnvVars("admin", classParameters, p.EnvVars)
data, err := json.Marshal(p)
if err != nil {
return nil, fmt.Errorf("encode parameters: %v", err)
return nil, fmt.Errorf("encode parameters: %w", err)
}
allocation.ResourceHandle = string(data)
var nodes []string

View File

@ -97,7 +97,7 @@ func StartPlugin(logger klog.Logger, cdiDir, driverName string, nodeName string,
)
d, err := kubeletplugin.Start(ex, opts...)
if err != nil {
return nil, fmt.Errorf("start kubelet plugin: %v", err)
return nil, fmt.Errorf("start kubelet plugin: %w", err)
}
ex.d = d
@ -127,7 +127,7 @@ func (ex *ExamplePlugin) NodePrepareResource(ctx context.Context, req *drapbv1.N
// Determine environment variables.
var p parameters
if err := json.Unmarshal([]byte(req.ResourceHandle), &p); err != nil {
return nil, fmt.Errorf("unmarshal resource handle: %v", err)
return nil, fmt.Errorf("unmarshal resource handle: %w", err)
}
// Sanity check scheduling.
@ -161,7 +161,7 @@ func (ex *ExamplePlugin) NodePrepareResource(ctx context.Context, req *drapbv1.N
filePath := ex.getJSONFilePath(req.ClaimUid)
buffer, err := json.Marshal(spec)
if err != nil {
return nil, fmt.Errorf("marshal spec: %v", err)
return nil, fmt.Errorf("marshal spec: %w", err)
}
if err := ex.fileOps.Create(filePath, buffer); err != nil {
return nil, fmt.Errorf("failed to write CDI file %v", err)
@ -186,7 +186,7 @@ func (ex *ExamplePlugin) NodeUnprepareResource(ctx context.Context, req *drapbv1
filePath := ex.getJSONFilePath(req.ClaimUid)
if err := ex.fileOps.Remove(filePath); err != nil {
return nil, fmt.Errorf("error removing CDI file: %v", err)
return nil, fmt.Errorf("error removing CDI file: %w", err)
}
logger.V(3).Info("CDI file removed", "path", filePath)

View File

@ -115,12 +115,12 @@ func NewCommand() *cobra.Command {
if *kubeconfig == "" {
config, err = rest.InClusterConfig()
if err != nil {
return fmt.Errorf("create in-cluster client configuration: %v", err)
return fmt.Errorf("create in-cluster client configuration: %w", err)
}
} else {
config, err = clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
return fmt.Errorf("create out-of-cluster client configuration: %v", err)
return fmt.Errorf("create out-of-cluster client configuration: %w", err)
}
}
config.QPS = *kubeAPIQPS
@ -128,7 +128,7 @@ func NewCommand() *cobra.Command {
clientset, err = kubernetes.NewForConfig(config)
if err != nil {
return fmt.Errorf("create client: %v", err)
return fmt.Errorf("create client: %w", err)
}
if *httpEndpoint != "" {
@ -158,7 +158,7 @@ func NewCommand() *cobra.Command {
listener, err := net.Listen("tcp", *httpEndpoint)
if err != nil {
return fmt.Errorf("listen on HTTP endpoint: %v", err)
return fmt.Errorf("listen on HTTP endpoint: %w", err)
}
go func() {
@ -203,12 +203,12 @@ func NewCommand() *cobra.Command {
if *resourceConfig != "" {
file, err := os.Open(*resourceConfig)
if err != nil {
return fmt.Errorf("open resource config: %v", err)
return fmt.Errorf("open resource config: %w", err)
}
decoder := json.NewDecoder(file)
decoder.DisallowUnknownFields()
if err := decoder.Decode(&resources); err != nil {
return fmt.Errorf("parse resource config %q: %v", *resourceConfig, err)
return fmt.Errorf("parse resource config %q: %w", *resourceConfig, err)
}
}
@ -230,7 +230,7 @@ func NewCommand() *cobra.Command {
// exceeds the QPS+burst limits.
leClientset, err := kubernetes.NewForConfig(config)
if err != nil {
return fmt.Errorf("create leaderelection client: %v", err)
return fmt.Errorf("create leaderelection client: %w", err)
}
le := leaderelection.New(leClientset, lockName,
@ -246,7 +246,7 @@ func NewCommand() *cobra.Command {
le.PrepareHealthCheck(mux)
}
if err := le.Run(); err != nil {
return fmt.Errorf("leader election failed: %v", err)
return fmt.Errorf("leader election failed: %w", err)
}
return nil
@ -275,10 +275,10 @@ func NewCommand() *cobra.Command {
// to know early if there is a setup problem that would prevent
// creating those directories.
if err := os.MkdirAll(*cdiDir, os.FileMode(0750)); err != nil {
return fmt.Errorf("create CDI directory: %v", err)
return fmt.Errorf("create CDI directory: %w", err)
}
if err := os.MkdirAll(filepath.Dir(*endpoint), 0750); err != nil {
return fmt.Errorf("create socket directory: %v", err)
return fmt.Errorf("create socket directory: %w", err)
}
plugin, err := StartPlugin(logger, *cdiDir, *driverName, "", FileOperations{},
@ -287,7 +287,7 @@ func NewCommand() *cobra.Command {
kubeletplugin.KubeletPluginSocketPath(*draAddress),
)
if err != nil {
return fmt.Errorf("start example plugin: %v", err)
return fmt.Errorf("start example plugin: %w", err)
}
// Handle graceful shutdown. We need to delete Unix domain

View File

@ -1001,7 +1001,7 @@ func CreateCustomSubresourceInstance(ctx context.Context, namespace, name string
}
createdObjectMeta, err := meta.Accessor(instance)
if err != nil {
return nil, fmt.Errorf("Error while creating object meta: %v", err)
return nil, fmt.Errorf("Error while creating object meta: %w", err)
}
if len(createdObjectMeta.GetUID()) == 0 {
return nil, fmt.Errorf("Missing UUID: %v", instance)

View File

@ -75,12 +75,12 @@ func CreateDeployment(ctx context.Context, client clientset.Interface, replicas
deploymentSpec := testDeployment(replicas, podLabels, nodeSelector, namespace, pvclaims, false, command)
deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
return nil, fmt.Errorf("deployment %q Create API error: %w", deploymentSpec.Name, err)
}
framework.Logf("Waiting deployment %q to complete", deploymentSpec.Name)
err = WaitForDeploymentComplete(client, deployment)
if err != nil {
return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err)
return nil, fmt.Errorf("deployment %q failed to complete: %w", deploymentSpec.Name, err)
}
return deployment, nil
}

View File

@ -42,7 +42,7 @@ func eventOccurred(c clientset.Interface, namespace, eventSelector, msg string)
return func(ctx context.Context) (bool, error) {
events, err := c.CoreV1().Events(namespace).List(ctx, options)
if err != nil {
return false, fmt.Errorf("got error while getting events: %v", err)
return false, fmt.Errorf("got error while getting events: %w", err)
}
for _, event := range events.Items {
if strings.Contains(event.Message, msg) {

View File

@ -309,7 +309,7 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) {
}
priv, err := rsa.GenerateKey(rand.Reader, rsaBits)
if err != nil {
return nil, nil, fmt.Errorf("Failed to generate key: %v", err)
return nil, nil, fmt.Errorf("Failed to generate key: %w", err)
}
notBefore := time.Now()
notAfter := notBefore.Add(validFor)
@ -318,7 +318,7 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) {
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, nil, fmt.Errorf("failed to generate serial number: %s", err)
return nil, nil, fmt.Errorf("failed to generate serial number: %w", err)
}
template := x509.Certificate{
SerialNumber: serialNumber,
@ -351,13 +351,13 @@ func GenerateRSACerts(host string, isCA bool) ([]byte, []byte, error) {
var keyOut, certOut bytes.Buffer
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil {
return nil, nil, fmt.Errorf("Failed to create certificate: %s", err)
return nil, nil, fmt.Errorf("Failed to create certificate: %w", err)
}
if err := pem.Encode(&certOut, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}); err != nil {
return nil, nil, fmt.Errorf("Failed creating cert: %v", err)
return nil, nil, fmt.Errorf("Failed creating cert: %w", err)
}
if err := pem.Encode(&keyOut, &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil {
return nil, nil, fmt.Errorf("Failed creating key: %v", err)
return nil, nil, fmt.Errorf("Failed creating key: %w", err)
}
return certOut.Bytes(), keyOut.Bytes(), nil
}
@ -532,11 +532,11 @@ func ingressFromManifest(fileName string) (*networkingv1.Ingress, error) {
func ingressToManifest(ing *networkingv1.Ingress, path string) error {
serialized, err := marshalToYaml(ing, networkingv1.SchemeGroupVersion)
if err != nil {
return fmt.Errorf("failed to marshal ingress %v to YAML: %v", ing, err)
return fmt.Errorf("failed to marshal ingress %v to YAML: %w", ing, err)
}
if err := os.WriteFile(path, serialized, 0600); err != nil {
return fmt.Errorf("error in writing ingress to file: %s", err)
return fmt.Errorf("error in writing ingress to file: %w", err)
}
return nil
}
@ -1150,17 +1150,17 @@ func (j *TestJig) DeleteTestResource(ctx context.Context, cs clientset.Interface
var errs []error
if ing != nil {
if err := j.runDelete(ctx, ing); err != nil {
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err))
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %w", ing.Namespace, ing.Name, err))
}
}
if svc != nil {
if err := cs.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err))
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %w", svc.Namespace, svc.Name, err))
}
}
if deploy != nil {
if err := cs.AppsV1().Deployments(deploy.Namespace).Delete(ctx, deploy.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", deploy.Namespace, deploy.Name, err))
errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %w", deploy.Namespace, deploy.Name, err))
}
}
return errs

View File

@ -41,7 +41,7 @@ func RestartControllerManager(ctx context.Context) error {
result, err := e2essh.SSH(ctx, cmd, net.JoinHostPort(framework.APIAddress(), e2essh.SSHPort), framework.TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
return fmt.Errorf("couldn't restart controller-manager: %v", err)
return fmt.Errorf("couldn't restart controller-manager: %w", err)
}
return nil
}

View File

@ -115,7 +115,7 @@ func DaemonSetFromURL(ctx context.Context, url string) (*appsv1.DaemonSet, error
}
if err != nil {
return nil, fmt.Errorf("Failed to get url: %v", err)
return nil, fmt.Errorf("Failed to get url: %w", err)
}
if response.StatusCode != 200 {
return nil, fmt.Errorf("invalid http response status: %v", response.StatusCode)
@ -124,7 +124,7 @@ func DaemonSetFromURL(ctx context.Context, url string) (*appsv1.DaemonSet, error
data, err := io.ReadAll(response.Body)
if err != nil {
return nil, fmt.Errorf("Failed to read html response body: %v", err)
return nil, fmt.Errorf("Failed to read html response body: %w", err)
}
return DaemonSetFromData(data)
}
@ -134,12 +134,12 @@ func DaemonSetFromData(data []byte) (*appsv1.DaemonSet, error) {
var ds appsv1.DaemonSet
dataJSON, err := utilyaml.ToJSON(data)
if err != nil {
return nil, fmt.Errorf("Failed to parse data to json: %v", err)
return nil, fmt.Errorf("Failed to parse data to json: %w", err)
}
err = runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), dataJSON, &ds)
if err != nil {
return nil, fmt.Errorf("Failed to decode DaemonSet spec: %v", err)
return nil, fmt.Errorf("Failed to decode DaemonSet spec: %w", err)
}
return &ds, nil
}

View File

@ -1026,7 +1026,7 @@ func PokeHTTP(host string, port int, path string, params *HTTPPokeParams) HTTPPo
body, err := io.ReadAll(resp.Body)
if err != nil {
ret.Status = HTTPError
ret.Error = fmt.Errorf("error reading HTTP body: %v", err)
ret.Error = fmt.Errorf("error reading HTTP body: %w", err)
framework.Logf("Poke(%q): %v", url, ret.Error)
return ret
}
@ -1191,7 +1191,7 @@ func WaitForService(ctx context.Context, c clientset.Interface, namespace, name
})
if err != nil {
stateMsg := map[bool]string{true: "to appear", false: "to disappear"}
return fmt.Errorf("error waiting for service %s/%s %s: %v", namespace, name, stateMsg[exist], err)
return fmt.Errorf("error waiting for service %s/%s %s: %w", namespace, name, stateMsg[exist], err)
}
return nil
}

View File

@ -107,7 +107,7 @@ func NodeHasTaint(ctx context.Context, c clientset.Interface, nodeName string, t
// default test add-ons.
func AllNodesReady(ctx context.Context, c clientset.Interface, timeout time.Duration) error {
if err := allNodesReady(ctx, c, timeout); err != nil {
return fmt.Errorf("checking for ready nodes: %v", err)
return fmt.Errorf("checking for ready nodes: %w", err)
}
return nil
}

View File

@ -296,7 +296,7 @@ func CollectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []stri
func PickIP(ctx context.Context, c clientset.Interface) (string, error) {
publicIps, err := GetPublicIps(ctx, c)
if err != nil {
return "", fmt.Errorf("get node public IPs error: %s", err)
return "", fmt.Errorf("get node public IPs error: %w", err)
}
if len(publicIps) == 0 {
return "", fmt.Errorf("got unexpected number (%d) of public IPs", len(publicIps))
@ -309,7 +309,7 @@ func PickIP(ctx context.Context, c clientset.Interface) (string, error) {
func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error) {
nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("get schedulable and ready nodes error: %s", err)
return nil, fmt.Errorf("get schedulable and ready nodes error: %w", err)
}
ips := CollectAddresses(nodes, v1.NodeExternalIP)
if len(ips) == 0 {
@ -327,7 +327,7 @@ func GetPublicIps(ctx context.Context, c clientset.Interface) ([]string, error)
func GetReadySchedulableNodes(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
return nil, fmt.Errorf("listing schedulable nodes error: %w", err)
}
Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node) && isNodeUntainted(&node)
@ -376,7 +376,7 @@ func GetRandomReadySchedulableNode(ctx context.Context, c clientset.Interface) (
func GetReadyNodesIncludingTainted(ctx context.Context, c clientset.Interface) (nodes *v1.NodeList, err error) {
nodes, err = checkWaitListSchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("listing schedulable nodes error: %s", err)
return nil, fmt.Errorf("listing schedulable nodes error: %w", err)
}
Filter(nodes, func(node v1.Node) bool {
return IsNodeSchedulable(&node)
@ -536,7 +536,7 @@ func PodNodePairs(ctx context.Context, c clientset.Interface, ns string) ([]PodN
func GetClusterZones(ctx context.Context, c clientset.Interface) (sets.String, error) {
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %w", err)
}
// collect values of zone label from all nodes
@ -558,7 +558,7 @@ func GetSchedulableClusterZones(ctx context.Context, c clientset.Interface) (set
// GetReadySchedulableNodes already filters our tainted and unschedulable nodes.
nodes, err := GetReadySchedulableNodes(ctx, c)
if err != nil {
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %v", err)
return nil, fmt.Errorf("error getting nodes while attempting to list cluster zones: %w", err)
}
// collect values of zone label from all nodes
@ -781,7 +781,7 @@ func removeNodeTaint(ctx context.Context, c clientset.Interface, nodeName string
func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string, oldNode *v1.Node, newNode *v1.Node) error {
oldData, err := json.Marshal(oldNode)
if err != nil {
return fmt.Errorf("failed to marshal old node %#v for node %q: %v", oldNode, nodeName, err)
return fmt.Errorf("failed to marshal old node %#v for node %q: %w", oldNode, nodeName, err)
}
newTaints := newNode.Spec.Taints
@ -789,12 +789,12 @@ func patchNodeTaints(ctx context.Context, c clientset.Interface, nodeName string
newNodeClone.Spec.Taints = newTaints
newData, err := json.Marshal(newNodeClone)
if err != nil {
return fmt.Errorf("failed to marshal new node %#v for node %q: %v", newNodeClone, nodeName, err)
return fmt.Errorf("failed to marshal new node %#v for node %q: %w", newNodeClone, nodeName, err)
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
if err != nil {
return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err)
return fmt.Errorf("failed to create patch for node %q: %w", nodeName, err)
}
_, err = c.CoreV1().Nodes().Patch(ctx, nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})

View File

@ -56,17 +56,17 @@ func CreateUnschedulablePod(ctx context.Context, client clientset.Interface, nam
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to become Unschedulable
err = WaitForPodNameUnschedulableInNamespace(ctx, client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Unschedulable: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Unschedulable: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}
@ -81,17 +81,17 @@ func CreatePod(ctx context.Context, client clientset.Interface, namespace string
pod := MakePod(namespace, nodeSelector, pvclaims, isPrivileged, command)
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to be running
err = WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}
@ -105,23 +105,23 @@ func CreateSecPod(ctx context.Context, client clientset.Interface, podConfig *Co
func CreateSecPodWithNodeSelection(ctx context.Context, client clientset.Interface, podConfig *Config, timeout time.Duration) (*v1.Pod, error) {
pod, err := MakeSecPod(podConfig)
if err != nil {
return nil, fmt.Errorf("Unable to create pod: %v", err)
return nil, fmt.Errorf("Unable to create pod: %w", err)
}
pod, err = client.CoreV1().Pods(podConfig.NS).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to be running
err = WaitTimeoutForPodRunningInNamespace(ctx, client, pod.Name, podConfig.NS, timeout)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(podConfig.NS).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}

View File

@ -65,12 +65,12 @@ func DeletePodWithWaitByName(ctx context.Context, c clientset.Interface, podName
if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
return fmt.Errorf("pod Delete API error: %w", err)
}
framework.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
err = WaitForPodNotFoundInNamespace(ctx, c, podName, podNamespace, PodDeleteTimeout)
if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", podName, err)
return fmt.Errorf("pod %q was not deleted: %w", podName, err)
}
return nil
}
@ -98,7 +98,7 @@ func DeletePodWithGracePeriodByName(ctx context.Context, c clientset.Interface,
if apierrors.IsNotFound(err) {
return nil // assume pod was already deleted
}
return fmt.Errorf("pod Delete API error: %v", err)
return fmt.Errorf("pod Delete API error: %w", err)
}
return nil
}

View File

@ -87,13 +87,13 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
SubResource("portforward")
transport, upgrader, err := spdy.RoundTripperFor(restConfig)
if err != nil {
return nil, fmt.Errorf("create round tripper: %v", err)
return nil, fmt.Errorf("create round tripper: %w", err)
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name)
if err != nil {
return nil, fmt.Errorf("dialer failed: %v", err)
return nil, fmt.Errorf("dialer failed: %w", err)
}
requestID := "1"
defer func() {
@ -112,7 +112,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
// This happens asynchronously.
errorStream, err := streamConn.CreateStream(headers)
if err != nil {
return nil, fmt.Errorf("error creating error stream: %v", err)
return nil, fmt.Errorf("error creating error stream: %w", err)
}
errorStream.Close()
go func() {
@ -129,7 +129,7 @@ func (d *Dialer) DialContainerPort(ctx context.Context, addr Addr) (conn net.Con
headers.Set(v1.StreamType, v1.StreamTypeData)
dataStream, err := streamConn.CreateStream(headers)
if err != nil {
return nil, fmt.Errorf("error creating data stream: %v", err)
return nil, fmt.Errorf("error creating data stream: %w", err)
}
return &stream{

View File

@ -107,7 +107,7 @@ func RunHostCmdWithRetries(ns, name, cmd string, interval, timeout time.Duration
return out, nil
}
if elapsed := time.Since(start); elapsed > timeout {
return out, fmt.Errorf("RunHostCmd still failed after %v: %v", elapsed, err)
return out, fmt.Errorf("RunHostCmd still failed after %v: %w", elapsed, err)
}
framework.Logf("Waiting %v to retry failed RunHostCmd: %v", interval, err)
time.Sleep(interval)
@ -166,7 +166,7 @@ func MatchContainerOutput(
// Grab its logs. Get host first.
podStatus, err := podClient.Get(ctx, createdPod.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get pod status: %v", err)
return fmt.Errorf("failed to get pod status: %w", err)
}
if podErr != nil {
@ -192,14 +192,14 @@ func MatchContainerOutput(
if err != nil {
framework.Logf("Failed to get logs from node %q pod %q container %q. %v",
podStatus.Spec.NodeName, podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %v", podStatus.Name, containerName, err)
return fmt.Errorf("failed to get logs from %s for %s: %w", podStatus.Name, containerName, err)
}
for _, expected := range expectedOutput {
m := matcher(expected)
matches, err := m.Match(logs)
if err != nil {
return fmt.Errorf("expected %q in container output: %v", expected, err)
return fmt.Errorf("expected %q in container output: %w", expected, err)
} else if !matches {
return fmt.Errorf("expected %q in container output: %s", expected, m.FailureMessage(logs))
}

View File

@ -133,7 +133,7 @@ func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *
framework.ExpectNoError(wait.PollWithContext(ctx, time.Millisecond*500, time.Second*30, func(ctx context.Context) (bool, error) {
pod, err := c.PodInterface.Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
return false, fmt.Errorf("failed to get pod %q: %w", name, err)
}
updateFn(pod)
_, err = c.PodInterface.Update(ctx, pod, metav1.UpdateOptions{})
@ -145,7 +145,7 @@ func (c *PodClient) Update(ctx context.Context, name string, updateFn func(pod *
framework.Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
return false, nil
}
return false, fmt.Errorf("failed to update pod %q: %v", name, err)
return false, fmt.Errorf("failed to update pod %q: %w", name, err)
}))
}
@ -261,7 +261,7 @@ func (c *PodClient) WaitForErrorEventOrSuccess(ctx context.Context, pod *v1.Pod)
err := wait.PollWithContext(ctx, framework.Poll, framework.PodStartTimeout, func(ctx context.Context) (bool, error) {
evnts, err := c.f.ClientSet.CoreV1().Events(pod.Namespace).Search(scheme.Scheme, pod)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
return false, fmt.Errorf("error in listing events: %w", err)
}
for _, e := range evnts.Items {
switch e.Reason {
@ -288,7 +288,7 @@ func (c *PodClient) MatchContainerOutput(ctx context.Context, name string, conta
}
regex, err := regexp.Compile(expectedRegexp)
if err != nil {
return fmt.Errorf("failed to compile regexp %q: %v", expectedRegexp, err)
return fmt.Errorf("failed to compile regexp %q: %w", expectedRegexp, err)
}
if !regex.MatchString(output) {
return fmt.Errorf("failed to match regexp %q in output %q", expectedRegexp, output)

View File

@ -533,7 +533,7 @@ func VerifyPodHasConditionWithType(ctx context.Context, f *framework.Framework,
func getNodeTTLAnnotationValue(ctx context.Context, c clientset.Interface) (time.Duration, error) {
nodes, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil || len(nodes.Items) == 0 {
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %v", err)
return time.Duration(0), fmt.Errorf("Couldn't list any nodes to get TTL annotation: %w", err)
}
// Since TTL the kubelet is using is stored in node object, for the timeout
// purpose we take it from the first node (all of them should be the same).

View File

@ -68,7 +68,7 @@ func (p *Provider) GroupSize(group string) (int, error) {
client := autoscaling.New(awsSession)
instanceGroup, err := awscloud.DescribeInstanceGroup(client, group)
if err != nil {
return -1, fmt.Errorf("error describing instance group: %v", err)
return -1, fmt.Errorf("error describing instance group: %w", err)
}
if instanceGroup == nil {
return -1, fmt.Errorf("instance group not found: %s", group)
@ -157,7 +157,7 @@ func (p *Provider) DeletePD(pdName string) error {
if awsError, ok := err.(awserr.Error); ok && awsError.Code() == "InvalidVolume.NotFound" {
framework.Logf("volume deletion implicitly succeeded because volume %q does not exist.", pdName)
} else {
return fmt.Errorf("error deleting EBS volumes: %v", err)
return fmt.Errorf("error deleting EBS volumes: %w", err)
}
}
return nil

View File

@ -374,22 +374,22 @@ func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset
expPorts := PackProtocolsPortsFromFirewall(exp.Allowed)
if portsSubset {
if err := isPortsSubset(expPorts, actualPorts); err != nil {
return fmt.Errorf("incorrect allowed protocol ports: %v", err)
return fmt.Errorf("incorrect allowed protocol ports: %w", err)
}
} else {
if err := SameStringArray(actualPorts, expPorts, false); err != nil {
return fmt.Errorf("incorrect allowed protocols ports: %v", err)
return fmt.Errorf("incorrect allowed protocols ports: %w", err)
}
}
if err := SameStringArray(res.SourceRanges, exp.SourceRanges, false); err != nil {
return fmt.Errorf("incorrect source ranges %v, expected %v: %v", res.SourceRanges, exp.SourceRanges, err)
return fmt.Errorf("incorrect source ranges %v, expected %v: %w", res.SourceRanges, exp.SourceRanges, err)
}
if err := SameStringArray(res.SourceTags, exp.SourceTags, false); err != nil {
return fmt.Errorf("incorrect source tags %v, expected %v: %v", res.SourceTags, exp.SourceTags, err)
return fmt.Errorf("incorrect source tags %v, expected %v: %w", res.SourceTags, exp.SourceTags, err)
}
if err := SameStringArray(res.TargetTags, exp.TargetTags, false); err != nil {
return fmt.Errorf("incorrect target tags %v, expected %v: %v", res.TargetTags, exp.TargetTags, err)
return fmt.Errorf("incorrect target tags %v, expected %v: %w", res.TargetTags, exp.TargetTags, err)
}
return nil
}

View File

@ -68,7 +68,7 @@ func factory() (framework.ProviderInterface, error) {
if region == "" {
region, err = gcecloud.GetGCERegion(zone)
if err != nil {
return nil, fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
return nil, fmt.Errorf("error parsing GCE/GKE region from zone %q: %w", zone, err)
}
}
managedZones := []string{} // Manage all zones in the region
@ -95,7 +95,7 @@ func factory() (framework.ProviderInterface, error) {
})
if err != nil {
return nil, fmt.Errorf("Error building GCE/GKE provider: %v", err)
return nil, fmt.Errorf("Error building GCE/GKE provider: %w", err)
}
// Arbitrarily pick one of the zones we have nodes in, looking at prepopulated zones first.
@ -189,7 +189,7 @@ func (p *Provider) EnsureLoadBalancerResourcesDeleted(ctx context.Context, ip, p
project := framework.TestContext.CloudConfig.ProjectID
region, err := gcecloud.GetGCERegion(framework.TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("could not get region for zone %q: %v", framework.TestContext.CloudConfig.Zone, err)
return fmt.Errorf("could not get region for zone %q: %w", framework.TestContext.CloudConfig.Zone, err)
}
return wait.PollWithContext(ctx, 10*time.Second, 5*time.Minute, func(ctx context.Context) (bool, error) {
@ -304,7 +304,7 @@ func (p *Provider) cleanupGCEResources(ctx context.Context, c clientset.Interfac
var err error
region, err = gcecloud.GetGCERegion(zone)
if err != nil {
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %w", zone, err)
}
}
if err := p.gceCloud.DeleteFirewall(gcecloud.MakeFirewallName(loadBalancerName)); err != nil &&
@ -404,7 +404,7 @@ func GetGCECloud() (*gcecloud.Cloud, error) {
func GetClusterID(ctx context.Context, c clientset.Interface) (string, error) {
cm, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, gcecloud.UIDConfigMapName, metav1.GetOptions{})
if err != nil || cm == nil {
return "", fmt.Errorf("error getting cluster ID: %v", err)
return "", fmt.Errorf("error getting cluster ID: %w", err)
}
clusterID, clusterIDExists := cm.Data[gcecloud.UIDCluster]
providerID, providerIDExists := cm.Data[gcecloud.UIDProvider]

View File

@ -641,12 +641,12 @@ func (cont *IngressController) verifyBackendMode(svcPorts map[string]v1.ServiceP
gceCloud := cont.Cloud.Provider.(*Provider).gceCloud
beList, err := gceCloud.ListGlobalBackendServices()
if err != nil {
return fmt.Errorf("failed to list backend services: %v", err)
return fmt.Errorf("failed to list backend services: %w", err)
}
hcList, err := gceCloud.ListHealthChecks()
if err != nil {
return fmt.Errorf("failed to list health checks: %v", err)
return fmt.Errorf("failed to list health checks: %w", err)
}
// Generate short UID

View File

@ -141,7 +141,7 @@ func PVPVCCleanup(ctx context.Context, c clientset.Interface, ns string, pv *v1.
if pvc != nil {
err := DeletePersistentVolumeClaim(ctx, c, pvc.Name, ns)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvc.Name, err))
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %w", pvc.Name, err))
}
} else {
framework.Logf("pvc is nil")
@ -149,7 +149,7 @@ func PVPVCCleanup(ctx context.Context, c clientset.Interface, ns string, pv *v1.
if pv != nil {
err := DeletePersistentVolume(ctx, c, pv.Name)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pv.Name, err))
errs = append(errs, fmt.Errorf("failed to delete PV %q: %w", pv.Name, err))
}
} else {
framework.Logf("pv is nil")
@ -166,7 +166,7 @@ func PVPVCMapCleanup(ctx context.Context, c clientset.Interface, ns string, pvol
for pvcKey := range claims {
err := DeletePersistentVolumeClaim(ctx, c, pvcKey.Name, ns)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %v", pvcKey.Name, err))
errs = append(errs, fmt.Errorf("failed to delete PVC %q: %w", pvcKey.Name, err))
} else {
delete(claims, pvcKey)
}
@ -175,7 +175,7 @@ func PVPVCMapCleanup(ctx context.Context, c clientset.Interface, ns string, pvol
for pvKey := range pvols {
err := DeletePersistentVolume(ctx, c, pvKey)
if err != nil {
errs = append(errs, fmt.Errorf("failed to delete PV %q: %v", pvKey, err))
errs = append(errs, fmt.Errorf("failed to delete PV %q: %w", pvKey, err))
} else {
delete(pvols, pvKey)
}
@ -189,7 +189,7 @@ func DeletePersistentVolume(ctx context.Context, c clientset.Interface, pvName s
framework.Logf("Deleting PersistentVolume %q", pvName)
err := c.CoreV1().PersistentVolumes().Delete(ctx, pvName, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("PV Delete API error: %v", err)
return fmt.Errorf("PV Delete API error: %w", err)
}
}
return nil
@ -201,7 +201,7 @@ func DeletePersistentVolumeClaim(ctx context.Context, c clientset.Interface, pvc
framework.Logf("Deleting PersistentVolumeClaim %q", pvcName)
err := c.CoreV1().PersistentVolumeClaims(ns).Delete(ctx, pvcName, metav1.DeleteOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return fmt.Errorf("PVC Delete API error: %v", err)
return fmt.Errorf("PVC Delete API error: %w", err)
}
}
return nil
@ -222,13 +222,13 @@ func DeletePVCandValidatePV(ctx context.Context, c clientset.Interface, timeouts
framework.Logf("Waiting for reclaim process to complete.")
err = WaitForPersistentVolumePhase(ctx, expectPVPhase, c, pv.Name, framework.Poll, timeouts.PVReclaim)
if err != nil {
return fmt.Errorf("pv %q phase did not become %v: %v", pv.Name, expectPVPhase, err)
return fmt.Errorf("pv %q phase did not become %v: %w", pv.Name, expectPVPhase, err)
}
// examine the pv's ClaimRef and UID and compare to expected values
pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
return fmt.Errorf("PV Get API error: %w", err)
}
cr := pv.Spec.ClaimRef
if expectPVPhase == v1.VolumeAvailable {
@ -260,7 +260,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim
for pvName := range pvols {
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
return fmt.Errorf("PV Get API error: %w", err)
}
cr := pv.Spec.ClaimRef
// if pv is bound then delete the pvc it is bound to
@ -279,7 +279,7 @@ func DeletePVCandValidatePVGroup(ctx context.Context, c clientset.Interface, tim
return err
}
} else if !apierrors.IsNotFound(err) {
return fmt.Errorf("PVC Get API error: %v", err)
return fmt.Errorf("PVC Get API error: %w", err)
}
// delete pvckey from map even if apierrors.IsNotFound above is true and thus the
// claim was not actually deleted here
@ -316,10 +316,10 @@ func createPV(ctx context.Context, c clientset.Interface, timeouts *framework.Ti
})
// if we have an error from creating the PV, use that instead of a timeout error
if lastCreateErr != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
return nil, fmt.Errorf("PV Create API error: %w", err)
}
if err != nil {
return nil, fmt.Errorf("PV Create API error: %v", err)
return nil, fmt.Errorf("PV Create API error: %w", err)
}
return resultPV, nil
@ -334,7 +334,7 @@ func CreatePV(ctx context.Context, c clientset.Interface, timeouts *framework.Ti
func CreatePVC(ctx context.Context, c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) {
pvc, err := c.CoreV1().PersistentVolumeClaims(ns).Create(ctx, pvc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("PVC Create API error: %v", err)
return nil, fmt.Errorf("PVC Create API error: %w", err)
}
return pvc, nil
}
@ -464,24 +464,24 @@ func WaitOnPVandPVC(ctx context.Context, c clientset.Interface, timeouts *framew
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, pvc.Name, framework.Poll, timeouts.ClaimBound)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", pvc.Name, err)
return fmt.Errorf("PVC %q did not become Bound: %w", pvc.Name, err)
}
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound.
err = WaitForPersistentVolumePhase(ctx, v1.VolumeBound, c, pv.Name, framework.Poll, timeouts.PVBound)
if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pv.Name, err)
return fmt.Errorf("PV %q did not become Bound: %w", pv.Name, err)
}
// Re-get the pv and pvc objects
pv, err = c.CoreV1().PersistentVolumes().Get(ctx, pv.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
return fmt.Errorf("PV Get API error: %w", err)
}
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PVC Get API error: %v", err)
return fmt.Errorf("PVC Get API error: %w", err)
}
// The pv and pvc are both bound, but to each other?
@ -523,12 +523,12 @@ func WaitAndVerifyBinds(ctx context.Context, c clientset.Interface, timeouts *fr
continue
}
if err != nil {
return fmt.Errorf("PV %q did not become Bound: %v", pvName, err)
return fmt.Errorf("PV %q did not become Bound: %w", pvName, err)
}
pv, err := c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("PV Get API error: %v", err)
return fmt.Errorf("PV Get API error: %w", err)
}
cr := pv.Spec.ClaimRef
if cr != nil && len(cr.Name) > 0 {
@ -541,7 +541,7 @@ func WaitAndVerifyBinds(ctx context.Context, c clientset.Interface, timeouts *fr
err := WaitForPersistentVolumeClaimPhase(ctx, v1.ClaimBound, c, ns, cr.Name, framework.Poll, timeouts.ClaimBound)
if err != nil {
return fmt.Errorf("PVC %q did not become Bound: %v", cr.Name, err)
return fmt.Errorf("PVC %q did not become Bound: %w", cr.Name, err)
}
actualBinds++
}
@ -665,7 +665,7 @@ func createPDWithRetry(ctx context.Context, zone string) (string, error) {
for start := time.Now(); ; time.Sleep(pdRetryPollTime) {
if time.Since(start) >= pdRetryTimeout ||
ctx.Err() != nil {
return "", fmt.Errorf("timed out while trying to create PD in zone %q, last error: %v", zone, err)
return "", fmt.Errorf("timed out while trying to create PD in zone %q, last error: %w", zone, err)
}
newDiskName, err = createPD(zone)
@ -702,7 +702,7 @@ func DeletePDWithRetry(ctx context.Context, diskName string) error {
for start := time.Now(); ; time.Sleep(pdRetryPollTime) {
if time.Since(start) >= pdRetryTimeout ||
ctx.Err() != nil {
return fmt.Errorf("timed out while trying to delete PD %q, last error: %v", diskName, err)
return fmt.Errorf("timed out while trying to delete PD %q, last error: %w", diskName, err)
}
err = deletePD(diskName)
if err != nil {
@ -737,12 +737,12 @@ func WaitForPVClaimBoundPhase(ctx context.Context, client clientset.Interface, p
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(ctx, claim.Name, metav1.GetOptions{})
if err != nil {
return persistentvolumes, fmt.Errorf("PVC Get API error: %v", err)
return persistentvolumes, fmt.Errorf("PVC Get API error: %w", err)
}
// Get the bounded PV
persistentvolumes[index], err = client.CoreV1().PersistentVolumes().Get(ctx, claim.Spec.VolumeName, metav1.GetOptions{})
if err != nil {
return persistentvolumes, fmt.Errorf("PV Get API error: %v", err)
return persistentvolumes, fmt.Errorf("PV Get API error: %w", err)
}
}
return persistentvolumes, nil
@ -822,7 +822,7 @@ func DeletePVSource(ctx context.Context, pvSource *v1.PersistentVolumeSource) er
func GetDefaultStorageClassName(ctx context.Context, c clientset.Interface) (string, error) {
list, err := c.StorageV1().StorageClasses().List(ctx, metav1.ListOptions{})
if err != nil {
return "", fmt.Errorf("Error listing storage classes: %v", err)
return "", fmt.Errorf("Error listing storage classes: %w", err)
}
var scName string
for _, sc := range list.Items {

View File

@ -57,7 +57,7 @@ func ScaleResource(
) error {
ginkgo.By(fmt.Sprintf("Scaling %v %s in namespace %s to %d", kind, name, ns, size))
if err := testutils.ScaleResourceWithRetries(scalesGetter, ns, name, size, gvr); err != nil {
return fmt.Errorf("error while scaling RC %s to %d replicas: %v", name, size, err)
return fmt.Errorf("error while scaling RC %s to %d replicas: %w", name, size, err)
}
if !wait {
return nil
@ -131,7 +131,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
if err == nil || apierrors.IsNotFound(err) {
return true, nil
}
return false, fmt.Errorf("failed to delete object with non-retriable error: %v", err)
return false, fmt.Errorf("failed to delete object with non-retriable error: %w", err)
}); err != nil {
return err
}
@ -157,7 +157,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
err = waitForPodsInactive(ctx, ps, interval, timeout)
if err != nil {
return fmt.Errorf("error while waiting for pods to become inactive %s: %v", name, err)
return fmt.Errorf("error while waiting for pods to become inactive %s: %w", name, err)
}
terminatePodTime := time.Since(startTime) - deleteTime
framework.Logf("Terminating %v %s pods took: %v", description, name, terminatePodTime)
@ -167,7 +167,7 @@ func deleteObjectAndWaitForGC(ctx context.Context, c clientset.Interface, rtObje
// restart VM in that case and delete the pod.
err = waitForPodsGone(ctx, ps, interval, 20*time.Minute)
if err != nil {
return fmt.Errorf("error while waiting for pods gone %s: %v", name, err)
return fmt.Errorf("error while waiting for pods gone %s: %w", name, err)
}
return nil
}
@ -231,7 +231,7 @@ func WaitForControlledPodsRunning(ctx context.Context, c clientset.Interface, ns
}
err = testutils.WaitForEnoughPodsWithLabelRunning(c, ns, selector, int(replicas))
if err != nil {
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %v", name, err)
return fmt.Errorf("Error while waiting for replication controller %s pods to be running: %w", name, err)
}
return nil
}

View File

@ -83,7 +83,7 @@ func GetSelectorFromRuntimeObject(obj runtime.Object) (labels.Selector, error) {
case *autoscalingv1.Scale:
selector, err := metav1.ParseToLabelSelector(typed.Status.Selector)
if err != nil {
return nil, fmt.Errorf("Parsing selector for: %v encountered an error: %v", obj, err)
return nil, fmt.Errorf("Parsing selector for: %v encountered an error: %w", obj, err)
}
return metav1.LabelSelectorAsSelector(selector)
default:

View File

@ -115,7 +115,7 @@ func (j *TestJig) CreateTCPServiceWithPort(ctx context.Context, tweak func(svc *
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create TCP Service %q: %v", svc.Name, err)
return nil, fmt.Errorf("failed to create TCP Service %q: %w", svc.Name, err)
}
return j.sanityCheckService(result, svc.Spec.Type)
}
@ -137,7 +137,7 @@ func (j *TestJig) CreateUDPService(ctx context.Context, tweak func(svc *v1.Servi
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create UDP Service %q: %v", svc.Name, err)
return nil, fmt.Errorf("failed to create UDP Service %q: %w", svc.Name, err)
}
return j.sanityCheckService(result, svc.Spec.Type)
}
@ -162,7 +162,7 @@ func (j *TestJig) CreateExternalNameService(ctx context.Context, tweak func(svc
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create ExternalName Service %q: %v", svc.Name, err)
return nil, fmt.Errorf("failed to create ExternalName Service %q: %w", svc.Name, err)
}
return j.sanityCheckService(result, svc.Spec.Type)
}
@ -254,7 +254,7 @@ func (j *TestJig) CreateLoadBalancerService(ctx context.Context, timeout time.Du
}
_, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err)
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %w", svc.Name, err)
}
ginkgo.By("waiting for loadbalancer for service " + j.Namespace + "/" + j.Name)
@ -521,7 +521,7 @@ func (j *TestJig) UpdateService(ctx context.Context, update func(*v1.Service)) (
for i := 0; i < 3; i++ {
service, err := j.Client.CoreV1().Services(j.Namespace).Get(ctx, j.Name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get Service %q: %v", j.Name, err)
return nil, fmt.Errorf("failed to get Service %q: %w", j.Name, err)
}
update(service)
result, err := j.Client.CoreV1().Services(j.Namespace).Update(ctx, service, metav1.UpdateOptions{})
@ -529,7 +529,7 @@ func (j *TestJig) UpdateService(ctx context.Context, update func(*v1.Service)) (
return j.sanityCheckService(result, service.Spec.Type)
}
if !apierrors.IsConflict(err) && !apierrors.IsServerTimeout(err) {
return nil, fmt.Errorf("failed to update Service %q: %v", j.Name, err)
return nil, fmt.Errorf("failed to update Service %q: %w", j.Name, err)
}
}
return nil, fmt.Errorf("too many retries updating Service %q", j.Name)
@ -706,7 +706,7 @@ func (j *TestJig) CreatePDB(ctx context.Context, rc *v1.ReplicationController) (
return nil, fmt.Errorf("failed to create PDB %q %v", pdb.Name, err)
}
if err := j.waitForPdbReady(ctx); err != nil {
return nil, fmt.Errorf("failed waiting for PDB to be ready: %v", err)
return nil, fmt.Errorf("failed waiting for PDB to be ready: %w", err)
}
return newPdb, nil
@ -743,14 +743,14 @@ func (j *TestJig) Run(ctx context.Context, tweak func(rc *v1.ReplicationControll
}
result, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).Create(ctx, rc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create RC %q: %v", rc.Name, err)
return nil, fmt.Errorf("failed to create RC %q: %w", rc.Name, err)
}
pods, err := j.waitForPodsCreated(ctx, int(*(rc.Spec.Replicas)))
if err != nil {
return nil, fmt.Errorf("failed to create pods: %v", err)
return nil, fmt.Errorf("failed to create pods: %w", err)
}
if err := j.waitForPodsReady(ctx, pods); err != nil {
return nil, fmt.Errorf("failed waiting for pods to be running: %v", err)
return nil, fmt.Errorf("failed waiting for pods to be running: %w", err)
}
return result, nil
}
@ -760,21 +760,21 @@ func (j *TestJig) Scale(ctx context.Context, replicas int) error {
rc := j.Name
scale, err := j.Client.CoreV1().ReplicationControllers(j.Namespace).GetScale(ctx, rc, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to get scale for RC %q: %v", rc, err)
return fmt.Errorf("failed to get scale for RC %q: %w", rc, err)
}
scale.ResourceVersion = "" // indicate the scale update should be unconditional
scale.Spec.Replicas = int32(replicas)
_, err = j.Client.CoreV1().ReplicationControllers(j.Namespace).UpdateScale(ctx, rc, scale, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to scale RC %q: %v", rc, err)
return fmt.Errorf("failed to scale RC %q: %w", rc, err)
}
pods, err := j.waitForPodsCreated(ctx, replicas)
if err != nil {
return fmt.Errorf("failed waiting for pods: %v", err)
return fmt.Errorf("failed waiting for pods: %w", err)
}
if err := j.waitForPodsReady(ctx, pods); err != nil {
return fmt.Errorf("failed waiting for pods to be running: %v", err)
return fmt.Errorf("failed waiting for pods to be running: %w", err)
}
return nil
}
@ -1063,7 +1063,7 @@ func (j *TestJig) CreateSCTPServiceWithPort(ctx context.Context, tweak func(svc
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(ctx, svc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create SCTP Service %q: %v", svc.Name, err)
return nil, fmt.Errorf("failed to create SCTP Service %q: %w", svc.Name, err)
}
return j.sanityCheckService(result, svc.Spec.Type)
}
@ -1081,7 +1081,7 @@ func (j *TestJig) CreateLoadBalancerServiceWaitForClusterIPOnly(tweak func(svc *
}
result, err := j.Client.CoreV1().Services(j.Namespace).Create(context.TODO(), svc, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %v", svc.Name, err)
return nil, fmt.Errorf("failed to create LoadBalancer Service %q: %w", svc.Name, err)
}
return j.sanityCheckService(result, v1.ServiceTypeLoadBalancer)

View File

@ -213,11 +213,11 @@ func SkipUnlessSSHKeyPresent() {
func serverVersionGTE(v *utilversion.Version, c discovery.ServerVersionInterface) (bool, error) {
serverVersion, err := c.ServerVersion()
if err != nil {
return false, fmt.Errorf("Unable to get server version: %v", err)
return false, fmt.Errorf("Unable to get server version: %w", err)
}
sv, err := utilversion.ParseSemantic(serverVersion.GitVersion)
if err != nil {
return false, fmt.Errorf("Unable to parse server version %q: %v", serverVersion.GitVersion, err)
return false, fmt.Errorf("Unable to parse server version %q: %w", serverVersion.GitVersion, err)
}
return sv.AtLeast(v), nil
}

View File

@ -103,12 +103,12 @@ func GetSigner(provider string) (ssh.Signer, error) {
func makePrivateKeySignerFromFile(key string) (ssh.Signer, error) {
buffer, err := os.ReadFile(key)
if err != nil {
return nil, fmt.Errorf("error reading SSH key %s: '%v'", key, err)
return nil, fmt.Errorf("error reading SSH key %s: %w", key, err)
}
signer, err := ssh.ParsePrivateKey(buffer)
if err != nil {
return nil, fmt.Errorf("error parsing SSH key: '%v'", err)
return nil, fmt.Errorf("error parsing SSH key: %w", err)
}
return signer, err
@ -201,7 +201,7 @@ func SSH(ctx context.Context, cmd, host, provider string) (Result, error) {
// Get a signer for the provider.
signer, err := GetSigner(provider)
if err != nil {
return result, fmt.Errorf("error getting signer for provider %s: '%v'", provider, err)
return result, fmt.Errorf("error getting signer for provider %s: %w", provider, err)
}
// RunSSHCommand will default to Getenv("USER") if user == "", but we're
@ -250,12 +250,12 @@ func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signe
})
}
if err != nil {
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: '%v'", user, host, err)
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %w", user, host, err)
}
defer client.Close()
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to %s@%s: '%v'", user, host, err)
return "", "", 0, fmt.Errorf("error creating session to %s@%s: %w", user, host, err)
}
defer session.Close()
@ -275,7 +275,7 @@ func runSSHCommand(ctx context.Context, cmd, user, host string, signer ssh.Signe
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err)
err = fmt.Errorf("failed running `%s` on %s@%s: %w", cmd, user, host, err)
}
}
return bout.String(), berr.String(), code, err
@ -304,26 +304,26 @@ func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host strin
})
}
if err != nil {
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %v", user, bastion, err)
return "", "", 0, fmt.Errorf("error getting SSH client to %s@%s: %w", user, bastion, err)
}
defer bastionClient.Close()
conn, err := bastionClient.Dial("tcp", host)
if err != nil {
return "", "", 0, fmt.Errorf("error dialing %s from bastion: %v", host, err)
return "", "", 0, fmt.Errorf("error dialing %s from bastion: %w", host, err)
}
defer conn.Close()
ncc, chans, reqs, err := ssh.NewClientConn(conn, host, config)
if err != nil {
return "", "", 0, fmt.Errorf("error creating forwarding connection %s from bastion: %v", host, err)
return "", "", 0, fmt.Errorf("error creating forwarding connection %s from bastion: %w", host, err)
}
client := ssh.NewClient(ncc, chans, reqs)
defer client.Close()
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to %s@%s from bastion: '%v'", user, host, err)
return "", "", 0, fmt.Errorf("error creating session to %s@%s from bastion: %w", user, host, err)
}
defer session.Close()
@ -343,7 +343,7 @@ func runSSHCommandViaBastion(ctx context.Context, cmd, user, bastion, host strin
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s@%s: '%v'", cmd, user, host, err)
err = fmt.Errorf("failed running `%s` on %s@%s: %w", cmd, user, host, err)
}
}
return bout.String(), berr.String(), code, err

View File

@ -215,7 +215,7 @@ func CheckMount(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulS
fmt.Sprintf("touch %v", filepath.Join(mountPath, fmt.Sprintf("%v", time.Now().UnixNano()))),
} {
if err := ExecInStatefulPods(ctx, c, ss, cmd); err != nil {
return fmt.Errorf("failed to execute %v, error: %v", cmd, err)
return fmt.Errorf("failed to execute %v, error: %w", cmd, err)
}
}
return nil

View File

@ -73,7 +73,7 @@ func Read(filePath string) ([]byte, error) {
for _, filesource := range filesources {
data, err := filesource.ReadTestFile(filePath)
if err != nil {
return nil, fmt.Errorf("fatal error retrieving test file %s: %s", filePath, err)
return nil, fmt.Errorf("fatal error retrieving test file %s: %w", filePath, err)
}
if data != nil {
return data, nil

View File

@ -40,7 +40,7 @@ func (rt *extractRT) RoundTrip(req *http.Request) (*http.Response, error) {
func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []string) (*websocket.Conn, error) {
tlsConfig, err := restclient.TLSConfigFor(config)
if err != nil {
return nil, fmt.Errorf("Failed to create tls config: %v", err)
return nil, fmt.Errorf("Failed to create tls config: %w", err)
}
if url.Scheme == "https" {
url.Scheme = "wss"
@ -49,11 +49,11 @@ func OpenWebSocketForURL(url *url.URL, config *restclient.Config, protocols []st
}
headers, err := headersForConfig(config, url)
if err != nil {
return nil, fmt.Errorf("Failed to load http headers: %v", err)
return nil, fmt.Errorf("Failed to load http headers: %w", err)
}
cfg, err := websocket.NewConfig(url.String(), "http://localhost")
if err != nil {
return nil, fmt.Errorf("Failed to create websocket config: %v", err)
return nil, fmt.Errorf("Failed to create websocket config: %w", err)
}
cfg.Header = headers
cfg.TlsConfig = tlsConfig

View File

@ -46,7 +46,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
gomega.Eventually(ctx, func() error {
grabber, err = e2emetrics.NewMetricsGrabber(ctx, c, ec, f.ClientConfig(), true, true, true, true, true, true)
if err != nil {
return fmt.Errorf("failed to create metrics grabber: %v", err)
return fmt.Errorf("failed to create metrics grabber: %w", err)
}
return nil
}, 5*time.Minute, 10*time.Second).Should(gomega.BeNil())

View File

@ -120,7 +120,7 @@ func verifyPodExists(response []byte, containerName string) (bool, error) {
var metadata Metadata
err := json.Unmarshal(response, &metadata)
if err != nil {
return false, fmt.Errorf("Failed to unmarshall: %s", err)
return false, fmt.Errorf("Failed to unmarshall: %w", err)
}
for _, result := range metadata.Results {
@ -130,7 +130,7 @@ func verifyPodExists(response []byte, containerName string) (bool, error) {
}
resource, err := parseResource(rawResource)
if err != nil {
return false, fmt.Errorf("No 'resource' label: %s", err)
return false, fmt.Errorf("No 'resource' label: %w", err)
}
if resource.resourceType == "k8s_container" &&
resource.resourceLabels["container_name"] == containerName {

View File

@ -1382,7 +1382,7 @@ metadata:
err := wait.PollImmediate(time.Second, time.Minute, func() (bool, error) {
cj, err := c.BatchV1().CronJobs(ns).List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed getting CronJob %s: %v", ns, err)
return false, fmt.Errorf("Failed getting CronJob %s: %w", ns, err)
}
return len(cj.Items) > 0, nil
})
@ -2026,11 +2026,11 @@ func checkContainersImage(containers []v1.Container, expectImage string) bool {
func getAPIVersions(apiEndpoint string) (*metav1.APIVersions, error) {
body, err := curl(apiEndpoint)
if err != nil {
return nil, fmt.Errorf("Failed http.Get of %s: %v", apiEndpoint, err)
return nil, fmt.Errorf("Failed http.Get of %s: %w", apiEndpoint, err)
}
var apiVersions metav1.APIVersions
if err := json.Unmarshal([]byte(body), &apiVersions); err != nil {
return nil, fmt.Errorf("Failed to parse /api output %s: %v", body, err)
return nil, fmt.Errorf("Failed to parse /api output %s: %w", body, err)
}
return &apiVersions, nil
}
@ -2048,7 +2048,7 @@ func startProxyServer(ns string) (int, *exec.Cmd, error) {
buf := make([]byte, 128)
var n int
if n, err = stdout.Read(buf); err != nil {
return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %v", err)
return -1, cmd, fmt.Errorf("Failed to read from kubectl proxy stdout: %w", err)
}
output := string(buf[:n])
match := proxyRegexp.FindStringSubmatch(output)
@ -2266,17 +2266,17 @@ func newBlockingReader(s string) (io.Reader, io.Closer, error) {
func createApplyCustomResource(resource, namespace, name string, crd *crd.TestCrd) error {
ginkgo.By("successfully create CR")
if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "create", "--validate=true", "-f", "-"); err != nil {
return fmt.Errorf("failed to create CR %s in namespace %s: %v", resource, namespace, err)
return fmt.Errorf("failed to create CR %s in namespace %s: %w", resource, namespace, err)
}
if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
return fmt.Errorf("failed to delete CR %s: %v", name, err)
return fmt.Errorf("failed to delete CR %s: %w", name, err)
}
ginkgo.By("successfully apply CR")
if _, err := e2ekubectl.RunKubectlInput(namespace, resource, "apply", "--validate=true", "-f", "-"); err != nil {
return fmt.Errorf("failed to apply CR %s in namespace %s: %v", resource, namespace, err)
return fmt.Errorf("failed to apply CR %s in namespace %s: %w", resource, namespace, err)
}
if _, err := e2ekubectl.RunKubectl(namespace, "delete", crd.Crd.Spec.Names.Plural, name); err != nil {
return fmt.Errorf("failed to delete CR %s: %v", name, err)
return fmt.Errorf("failed to delete CR %s: %w", name, err)
}
return nil
}

View File

@ -389,7 +389,7 @@ func doTestOverWebSockets(ctx context.Context, bindAddress string, f *framework.
gomega.Eventually(ctx, func() error {
channel, msg, err := wsRead(ws)
if err != nil {
return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err)
return fmt.Errorf("failed to read completely from websocket %s: %w", url.String(), err)
}
if channel != 0 {
return fmt.Errorf("got message from server that didn't start with channel 0 (data): %v", msg)
@ -403,7 +403,7 @@ func doTestOverWebSockets(ctx context.Context, bindAddress string, f *framework.
gomega.Eventually(ctx, func() error {
channel, msg, err := wsRead(ws)
if err != nil {
return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err)
return fmt.Errorf("failed to read completely from websocket %s: %w", url.String(), err)
}
if channel != 1 {
return fmt.Errorf("got message from server that didn't start with channel 1 (error): %v", msg)
@ -426,7 +426,7 @@ func doTestOverWebSockets(ctx context.Context, bindAddress string, f *framework.
gomega.Eventually(ctx, func() error {
channel, msg, err := wsRead(ws)
if err != nil {
return fmt.Errorf("failed to read completely from websocket %s: %v", url.String(), err)
return fmt.Errorf("failed to read completely from websocket %s: %w", url.String(), err)
}
if channel != 0 {
return fmt.Errorf("got message from server that didn't start with channel 0 (data): %v", msg)

View File

@ -233,7 +233,7 @@ var _ = common.SIGDescribe("KubeProxy", func() {
}
timeoutSeconds, err := strconv.Atoi(line[2])
if err != nil {
return false, fmt.Errorf("failed to convert matched timeout %s to integer: %v", line[2], err)
return false, fmt.Errorf("failed to convert matched timeout %s to integer: %w", line[2], err)
}
if math.Abs(float64(timeoutSeconds-expectedTimeoutSeconds)) < epsilonSeconds {
return true, nil

View File

@ -81,11 +81,11 @@ func getInternalIP(node *v1.Node) (string, error) {
func getSubnetPrefix(ctx context.Context, c clientset.Interface) (*net.IPNet, error) {
node, err := getReadySchedulableWorkerNode(ctx, c)
if err != nil {
return nil, fmt.Errorf("error getting a ready schedulable worker Node, err: %v", err)
return nil, fmt.Errorf("error getting a ready schedulable worker Node, err: %w", err)
}
internalIP, err := getInternalIP(node)
if err != nil {
return nil, fmt.Errorf("error getting Node internal IP, err: %v", err)
return nil, fmt.Errorf("error getting Node internal IP, err: %w", err)
}
ip := netutils.ParseIPSloppy(internalIP)
if ip == nil {

View File

@ -85,12 +85,12 @@ func iperf2ServerDeployment(ctx context.Context, client clientset.Interface, nam
deployment, err := client.AppsV1().Deployments(namespace).Create(ctx, deploymentSpec, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("deployment %q Create API error: %v", deploymentSpec.Name, err)
return nil, fmt.Errorf("deployment %q Create API error: %w", deploymentSpec.Name, err)
}
framework.Logf("Waiting for deployment %q to complete", deploymentSpec.Name)
err = e2edeployment.WaitForDeploymentComplete(client, deployment)
if err != nil {
return nil, fmt.Errorf("deployment %q failed to complete: %v", deploymentSpec.Name, err)
return nil, fmt.Errorf("deployment %q failed to complete: %w", deploymentSpec.Name, err)
}
return deployment, nil
@ -119,7 +119,7 @@ func iperf2ClientDaemonSet(ctx context.Context, client clientset.Interface, name
ds, err := client.AppsV1().DaemonSets(namespace).Create(ctx, spec, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("daemonset %s Create API error: %v", spec.Name, err)
return nil, fmt.Errorf("daemonset %s Create API error: %w", spec.Name, err)
}
return ds, nil
}

View File

@ -120,7 +120,7 @@ func (f *IngressScaleFramework) PrepareScaleTest(ctx context.Context) error {
Cloud: f.CloudConfig,
}
if err := f.GCEController.Init(ctx); err != nil {
return fmt.Errorf("failed to initialize GCE controller: %v", err)
return fmt.Errorf("failed to initialize GCE controller: %w", err)
}
f.ScaleTestSvcs = []*v1.Service{}
@ -137,7 +137,7 @@ func (f *IngressScaleFramework) CleanupScaleTest(ctx context.Context) []error {
for _, ing := range f.ScaleTestIngs {
if ing != nil {
if err := f.Clientset.NetworkingV1().Ingresses(ing.Namespace).Delete(ctx, ing.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %v", ing.Namespace, ing.Name, err))
errs = append(errs, fmt.Errorf("error while deleting ingress %s/%s: %w", ing.Namespace, ing.Name, err))
}
}
}
@ -145,14 +145,14 @@ func (f *IngressScaleFramework) CleanupScaleTest(ctx context.Context) []error {
for _, svc := range f.ScaleTestSvcs {
if svc != nil {
if err := f.Clientset.CoreV1().Services(svc.Namespace).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %v", svc.Namespace, svc.Name, err))
errs = append(errs, fmt.Errorf("error while deleting service %s/%s: %w", svc.Namespace, svc.Name, err))
}
}
}
if f.ScaleTestDeploy != nil {
f.Logger.Infof("Cleaning up deployment %s...", f.ScaleTestDeploy.Name)
if err := f.Clientset.AppsV1().Deployments(f.ScaleTestDeploy.Namespace).Delete(ctx, f.ScaleTestDeploy.Name, metav1.DeleteOptions{}); err != nil {
errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %v", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err))
errs = append(errs, fmt.Errorf("error while deleting deployment %s/%s: %w", f.ScaleTestDeploy.Namespace, f.ScaleTestDeploy.Name, err))
}
}
@ -172,7 +172,7 @@ func (f *IngressScaleFramework) RunScaleTest(ctx context.Context) []error {
f.Logger.Infof("Creating deployment %s...", testDeploy.Name)
testDeploy, err := f.Jig.Client.AppsV1().Deployments(f.Namespace).Create(ctx, testDeploy, metav1.CreateOptions{})
if err != nil {
errs = append(errs, fmt.Errorf("failed to create deployment %s: %v", testDeploy.Name, err))
errs = append(errs, fmt.Errorf("failed to create deployment %s: %w", testDeploy.Name, err))
return errs
}
f.ScaleTestDeploy = testDeploy
@ -180,7 +180,7 @@ func (f *IngressScaleFramework) RunScaleTest(ctx context.Context) []error {
if f.EnableTLS {
f.Logger.Infof("Ensuring TLS secret %s...", scaleTestSecretName)
if err := f.Jig.PrepareTLSSecret(ctx, f.Namespace, scaleTestSecretName, scaleTestHostname); err != nil {
errs = append(errs, fmt.Errorf("failed to prepare TLS secret %s: %v", scaleTestSecretName, err))
errs = append(errs, fmt.Errorf("failed to prepare TLS secret %s: %w", scaleTestSecretName, err))
return errs
}
}

View File

@ -4163,7 +4163,7 @@ func translatePodNameToUID(ctx context.Context, c clientset.Interface, ns string
for name, portList := range expectedEndpoints {
pod, err := c.CoreV1().Pods(ns).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %w", name, err)
}
portsByUID[pod.ObjectMeta.UID] = portList
}
@ -4249,14 +4249,14 @@ func restartApiserver(ctx context.Context, namespace string, cs clientset.Interf
func restartComponent(ctx context.Context, cs clientset.Interface, cName, ns string, matchLabels map[string]string) error {
pods, err := e2epod.GetPods(ctx, cs, ns, matchLabels)
if err != nil {
return fmt.Errorf("failed to get %s's pods, err: %v", cName, err)
return fmt.Errorf("failed to get %s's pods, err: %w", cName, err)
}
if len(pods) == 0 {
return fmt.Errorf("%s pod count is 0", cName)
}
if err := e2epod.DeletePodsWithGracePeriod(ctx, cs, pods, 0); err != nil {
return fmt.Errorf("failed to restart component: %s, err: %v", cName, err)
return fmt.Errorf("failed to restart component: %s, err: %w", cName, err)
}
_, err = e2epod.PodsCreatedByLabel(ctx, cs, ns, cName, int32(len(pods)), labels.SelectorFromSet(matchLabels))
@ -4330,7 +4330,7 @@ func translatePortsByPodNameToPortsByPodUID(c clientset.Interface, ns string, ex
for name, portList := range expectedEndpoints {
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %s", name, err)
return nil, fmt.Errorf("failed to get pod %s, that's pretty weird. validation failed: %w", name, err)
}
portsByUID[pod.ObjectMeta.UID] = portList
}

View File

@ -869,7 +869,7 @@ func patchPod(cs clientset.Interface, old, new *v1.Pod) (*v1.Pod, error) {
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Pod{})
if err != nil {
return nil, fmt.Errorf("failed to create merge patch for Pod %q: %v", old.Name, err)
return nil, fmt.Errorf("failed to create merge patch for Pod %q: %w", old.Name, err)
}
return cs.CoreV1().Pods(new.Namespace).Patch(context.TODO(), new.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
}

View File

@ -930,7 +930,7 @@ func patchNode(ctx context.Context, client clientset.Interface, old *v1.Node, ne
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &v1.Node{})
if err != nil {
return fmt.Errorf("failed to create merge patch for node %q: %v", old.Name, err)
return fmt.Errorf("failed to create merge patch for node %q: %w", old.Name, err)
}
_, err = client.CoreV1().Nodes().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
return err
@ -948,7 +948,7 @@ func patchPriorityClass(ctx context.Context, cs clientset.Interface, old, new *s
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, &schedulingv1.PriorityClass{})
if err != nil {
return fmt.Errorf("failed to create merge patch for PriorityClass %q: %v", old.Name, err)
return fmt.Errorf("failed to create merge patch for PriorityClass %q: %w", old.Name, err)
}
_, err = cs.SchedulingV1().PriorityClasses().Patch(ctx, old.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
return err

View File

@ -180,7 +180,7 @@ var _ = utils.SIGDescribe("CSI Mock volume storage capacity", func() {
err = wait.PollImmediateUntilWithContext(ctx, time.Second, func(ctx context.Context) (done bool, err error) {
c, index, err := compareCSICalls(ctx, deterministicCalls, expected, m.driver.GetCalls)
if err != nil {
return true, fmt.Errorf("error waiting for expected CSI calls: %s", err)
return true, fmt.Errorf("error waiting for expected CSI calls: %w", err)
}
calls = c
if index == 0 {

View File

@ -416,7 +416,7 @@ func waitForResizeStatus(pvc *v1.PersistentVolumeClaim, c clientset.Interface, e
updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %v", pvc.Name, err)
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %w", pvc.Name, err)
}
actualResizeStatus = updatedPVC.Status.ResizeStatus
@ -442,7 +442,7 @@ func waitForAllocatedResource(pvc *v1.PersistentVolumeClaim, m *mockDriverSetup,
updatedPVC, err := m.cs.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %v", pvc.Name, err)
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %w", pvc.Name, err)
}
actualAllocatedSize := updatedPVC.Status.AllocatedResources.Storage()
if actualAllocatedSize != nil && actualAllocatedSize.Equal(expectedQuantity) {

View File

@ -751,7 +751,7 @@ func (m *mockCSIDriver) GetCalls(ctx context.Context) ([]MockCSICall, error) {
// Load logs of driver pod
log, err := e2epod.GetPodLogs(ctx, m.clientSet, m.driverNamespace.Name, driverPodName, driverContainerName)
if err != nil {
return nil, fmt.Errorf("could not load CSI driver logs: %s", err)
return nil, fmt.Errorf("could not load CSI driver logs: %w", err)
}
logLines := strings.Split(log, "\n")

View File

@ -75,7 +75,7 @@ func (p PodDirIO) CreateFile(path string, content io.Reader) error {
// Therefore the content is now encoded inside the command itself.
data, err := io.ReadAll(content)
if err != nil {
return fmt.Errorf("read content: %v", err)
return fmt.Errorf("read content: %w", err)
}
encoded := make([]byte, base64.StdEncoding.EncodedLen(len(data)))
base64.StdEncoding.Encode(encoded, data)

View File

@ -97,7 +97,7 @@ func Listen(ctx context.Context, clientset kubernetes.Interface, restConfig *res
SubResource("portforward")
transport, upgrader, err := spdy.RoundTripperFor(restConfig)
if err != nil {
return nil, fmt.Errorf("create round tripper: %v", err)
return nil, fmt.Errorf("create round tripper: %w", err)
}
dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", req.URL())
@ -212,7 +212,7 @@ type stream struct {
func dial(ctx context.Context, prefix string, dialer httpstream.Dialer, port int) (s *stream, finalErr error) {
streamConn, _, err := dialer.Dial(portforward.PortForwardProtocolV1Name)
if err != nil {
return nil, fmt.Errorf("dialer failed: %v", err)
return nil, fmt.Errorf("dialer failed: %w", err)
}
requestID := "1"
defer func() {
@ -231,7 +231,7 @@ func dial(ctx context.Context, prefix string, dialer httpstream.Dialer, port int
// This happens asynchronously.
errorStream, err := streamConn.CreateStream(headers)
if err != nil {
return nil, fmt.Errorf("error creating error stream: %v", err)
return nil, fmt.Errorf("error creating error stream: %w", err)
}
errorStream.Close()
go func() {
@ -248,7 +248,7 @@ func dial(ctx context.Context, prefix string, dialer httpstream.Dialer, port int
headers.Set(v1.StreamType, v1.StreamTypeData)
dataStream, err := streamConn.CreateStream(headers)
if err != nil {
return nil, fmt.Errorf("error creating data stream: %v", err)
return nil, fmt.Errorf("error creating data stream: %w", err)
}
return &stream{

View File

@ -178,17 +178,17 @@ func createNginxPod(ctx context.Context, client clientset.Interface, namespace s
pod := makeNginxPod(namespace, nodeSelector, pvclaims)
pod, err := client.CoreV1().Pods(namespace).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return nil, fmt.Errorf("pod Create API error: %v", err)
return nil, fmt.Errorf("pod Create API error: %w", err)
}
// Waiting for pod to be running
err = e2epod.WaitForPodNameRunningInNamespace(ctx, client, pod.Name, namespace)
if err != nil {
return pod, fmt.Errorf("pod %q is not Running: %v", pod.Name, err)
return pod, fmt.Errorf("pod %q is not Running: %w", pod.Name, err)
}
// get fresh pod info
pod, err = client.CoreV1().Pods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return pod, fmt.Errorf("pod Get API error: %v", err)
return pod, fmt.Errorf("pod Get API error: %w", err)
}
return pod, nil
}

View File

@ -143,7 +143,7 @@ func UpdatePVSize(ctx context.Context, pv *v1.PersistentVolume, size resource.Qu
var err error
pvToUpdate, err = c.CoreV1().PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pv %s: %v", pvName, err)
return false, fmt.Errorf("error fetching pv %s: %w", pvName, err)
}
pvToUpdate.Spec.Capacity[v1.ResourceStorage] = size
pvToUpdate, err = c.CoreV1().PersistentVolumes().Update(ctx, pvToUpdate, metav1.UpdateOptions{})

View File

@ -165,7 +165,7 @@ func waitForDeploymentToRecreatePod(ctx context.Context, client clientset.Interf
waitErr := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
podList, err := e2edeployment.GetPodsForDeployment(ctx, client, deployment)
if err != nil {
return false, fmt.Errorf("failed to get pods for deployment: %v", err)
return false, fmt.Errorf("failed to get pods for deployment: %w", err)
}
for _, pod := range podList.Items {
switch pod.Status.Phase {

View File

@ -526,7 +526,7 @@ func detachPD(nodeName types.NodeName, pdName string) error {
} else if framework.TestContext.Provider == "aws" {
awsSession, err := session.NewSession()
if err != nil {
return fmt.Errorf("error creating session: %v", err)
return fmt.Errorf("error creating session: %w", err)
}
client := ec2.New(awsSession)
tokens := strings.Split(pdName, "/")
@ -536,7 +536,7 @@ func detachPD(nodeName types.NodeName, pdName string) error {
}
_, err = client.DetachVolume(&request)
if err != nil {
return fmt.Errorf("error detaching EBS volume: %v", err)
return fmt.Errorf("error detaching EBS volume: %w", err)
}
return nil
@ -561,7 +561,7 @@ func attachPD(nodeName types.NodeName, pdName string) error {
} else if framework.TestContext.Provider == "aws" {
awsSession, err := session.NewSession()
if err != nil {
return fmt.Errorf("error creating session: %v", err)
return fmt.Errorf("error creating session: %w", err)
}
client := ec2.New(awsSession)
tokens := strings.Split(pdName, "/")
@ -569,7 +569,7 @@ func attachPD(nodeName types.NodeName, pdName string) error {
ebsUtil := utils.NewEBSUtil(client)
err = ebsUtil.AttachDisk(awsVolumeID, string(nodeName))
if err != nil {
return fmt.Errorf("error attaching volume %s to node %s: %v", awsVolumeID, nodeName, err)
return fmt.Errorf("error attaching volume %s to node %s: %w", awsVolumeID, nodeName, err)
}
return nil
} else {

View File

@ -959,7 +959,7 @@ func createLocalPVCsPVs(ctx context.Context, config *localTestConfig, volumes []
for _, volume := range volumes {
pvc, err := config.client.CoreV1().PersistentVolumeClaims(volume.pvc.Namespace).Get(ctx, volume.pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get PVC %s/%s: %v", volume.pvc.Namespace, volume.pvc.Name, err)
return false, fmt.Errorf("failed to get PVC %s/%s: %w", volume.pvc.Namespace, volume.pvc.Name, err)
}
if pvc.Status.Phase != v1.ClaimPending {
return true, nil

View File

@ -70,7 +70,7 @@ func completeMultiTest(ctx context.Context, f *framework.Framework, c clientset.
for pvcKey := range claims {
pvc, err := c.CoreV1().PersistentVolumeClaims(pvcKey.Namespace).Get(ctx, pvcKey.Name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error getting pvc %q: %v", pvcKey.Name, err)
return fmt.Errorf("error getting pvc %q: %w", pvcKey.Name, err)
}
if len(pvc.Spec.VolumeName) == 0 {
continue // claim is not bound
@ -450,7 +450,7 @@ func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *frame
pod := e2epod.MakePod(ns, nil, []*v1.PersistentVolumeClaim{pvc}, true, command)
runPod, err := c.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("pod Create API error: %v", err)
return fmt.Errorf("pod Create API error: %w", err)
}
defer func() {
delErr := e2epod.DeletePodWithWait(ctx, c, runPod)
@ -461,7 +461,7 @@ func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *frame
err = testPodSuccessOrFail(ctx, c, t, ns, runPod)
if err != nil {
return fmt.Errorf("pod %q did not exit with Success: %v", runPod.Name, err)
return fmt.Errorf("pod %q did not exit with Success: %w", runPod.Name, err)
}
return // note: named return value
}
@ -470,7 +470,7 @@ func createWaitAndDeletePod(ctx context.Context, c clientset.Interface, t *frame
func testPodSuccessOrFail(ctx context.Context, c clientset.Interface, t *framework.TimeoutContext, ns string, pod *v1.Pod) error {
framework.Logf("Pod should terminate with exitcode 0 (success)")
if err := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, c, pod.Name, ns, t.PodStart); err != nil {
return fmt.Errorf("pod %q failed to reach Success: %v", pod.Name, err)
return fmt.Errorf("pod %q failed to reach Success: %w", pod.Name, err)
}
framework.Logf("Pod %v succeeded ", pod.Name)
return nil

View File

@ -176,7 +176,7 @@ func waitForPVCStorageClass(ctx context.Context, c clientset.Interface, namespac
})
if err != nil {
return watchedPVC, fmt.Errorf("error waiting for claim %s to have StorageClass set to %s: %v", pvcName, scName, err)
return watchedPVC, fmt.Errorf("error waiting for claim %s to have StorageClass set to %s: %w", pvcName, scName, err)
}
return watchedPVC, nil

View File

@ -303,7 +303,7 @@ func ExpandPVCSize(ctx context.Context, origPVC *v1.PersistentVolumeClaim, size
var err error
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(origPVC.Namespace).Get(ctx, pvcName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for resizing: %v", pvcName, err)
return false, fmt.Errorf("error fetching pvc %q for resizing: %w", pvcName, err)
}
updatedPVC.Spec.Resources.Requests[v1.ResourceStorage] = size
@ -331,7 +331,7 @@ func WaitForResizingCondition(ctx context.Context, pvc *v1.PersistentVolumeClaim
updatedPVC, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %v", pvc.Name, err)
return false, fmt.Errorf("error fetching pvc %q for checking for resize status: %w", pvc.Name, err)
}
pvcConditions := updatedPVC.Status.Conditions
@ -381,7 +381,7 @@ func WaitForPendingFSResizeCondition(ctx context.Context, pvc *v1.PersistentVolu
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err)
return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %w", pvc.Name, err)
}
inProgressConditions := updatedPVC.Status.Conditions
@ -409,7 +409,7 @@ func WaitForFSResize(ctx context.Context, pvc *v1.PersistentVolumeClaim, c clien
updatedPVC, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(ctx, pvc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %v", pvc.Name, err)
return false, fmt.Errorf("error fetching pvc %q for checking for resize status : %w", pvc.Name, err)
}
pvcSize := updatedPVC.Spec.Resources.Requests[v1.ResourceStorage]

View File

@ -263,7 +263,7 @@ func verifyFile(f *framework.Framework, pod *v1.Pod, fpath string, expectSize in
}
size, err := strconv.Atoi(strings.TrimSuffix(rtnstr, "\n"))
if err != nil {
return fmt.Errorf("unable to convert string %q to int: %v", rtnstr, err)
return fmt.Errorf("unable to convert string %q to int: %w", rtnstr, err)
}
if int64(size) != expectSize {
return fmt.Errorf("size of file %s is %d, expected %d", fpath, size, expectSize)
@ -320,7 +320,7 @@ func testVolumeIO(ctx context.Context, f *framework.Framework, cs clientset.Inte
podsNamespacer := cs.CoreV1().Pods(config.Namespace)
clientPod, err = podsNamespacer.Create(ctx, clientPod, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create client pod %q: %v", clientPod.Name, err)
return fmt.Errorf("failed to create client pod %q: %w", clientPod.Name, err)
}
ginkgo.DeferCleanup(func(ctx context.Context) {
deleteFile(f, clientPod, ddInput)
@ -339,7 +339,7 @@ func testVolumeIO(ctx context.Context, f *framework.Framework, cs clientset.Inte
err = e2epod.WaitTimeoutForPodRunningInNamespace(ctx, cs, clientPod.Name, clientPod.Namespace, f.Timeouts.PodStart)
if err != nil {
return fmt.Errorf("client pod %q not running: %v", clientPod.Name, err)
return fmt.Errorf("client pod %q not running: %w", clientPod.Name, err)
}
// create files of the passed-in file sizes and verify test file size and content

View File

@ -342,7 +342,7 @@ func waitForAllPVCsBound(ctx context.Context, cs clientset.Interface, timeout ti
return true, nil
})
if err != nil {
return nil, fmt.Errorf("error waiting for all PVCs to be bound: %v", err)
return nil, fmt.Errorf("error waiting for all PVCs to be bound: %w", err)
}
return pvNames, nil
}
@ -411,7 +411,7 @@ func getCSINodeLimits(ctx context.Context, cs clientset.Interface, config *stora
return true, nil
})
if err != nil {
return 0, fmt.Errorf("could not get CSINode limit for driver %s: %v", driverInfo.Name, err)
return 0, fmt.Errorf("could not get CSINode limit for driver %s: %w", driverInfo.Name, err)
}
return limit, nil
}

View File

@ -506,7 +506,7 @@ func listPodDirectory(ctx context.Context, h storageutils.HostExec, path string,
cmd := fmt.Sprintf("find %s -mindepth 2 -maxdepth 2", path)
out, err := h.IssueCommandWithResult(ctx, cmd, node)
if err != nil {
return nil, fmt.Errorf("error checking directory %s on node %s: %s", path, node.Name, err)
return nil, fmt.Errorf("error checking directory %s on node %s: %w", path, node.Name, err)
}
return strings.Split(out, "\n"), nil
}

View File

@ -64,16 +64,16 @@ func NewEBSUtil(client *ec2.EC2) *EBSUtil {
func (ebs *EBSUtil) AttachDisk(volumeID string, nodeName string) error {
instance, err := findInstanceByNodeName(nodeName, ebs.client)
if err != nil {
return fmt.Errorf("error finding node %s: %v", nodeName, err)
return fmt.Errorf("error finding node %s: %w", nodeName, err)
}
err = ebs.waitForAvailable(volumeID)
if err != nil {
return fmt.Errorf("error waiting volume %s to be available: %v", volumeID, err)
return fmt.Errorf("error waiting volume %s to be available: %w", volumeID, err)
}
device, err := ebs.findFreeDevice(instance)
if err != nil {
return fmt.Errorf("error finding free device on node %s: %v", nodeName, err)
return fmt.Errorf("error finding free device on node %s: %w", nodeName, err)
}
hostDevice := "/dev/xvd" + string(device)
attachInput := &ec2.AttachVolumeInput{
@ -83,7 +83,7 @@ func (ebs *EBSUtil) AttachDisk(volumeID string, nodeName string) error {
}
_, err = ebs.client.AttachVolume(attachInput)
if err != nil {
return fmt.Errorf("error attaching volume %s to node %s: %v", volumeID, nodeName, err)
return fmt.Errorf("error attaching volume %s to node %s: %w", volumeID, nodeName, err)
}
return ebs.waitForAttach(volumeID)
}
@ -245,7 +245,7 @@ func describeInstances(request *ec2.DescribeInstancesInput, cloud *ec2.EC2) ([]*
for {
response, err := cloud.DescribeInstances(request)
if err != nil {
return nil, fmt.Errorf("error listing AWS instances: %v", err)
return nil, fmt.Errorf("error listing AWS instances: %w", err)
}
for _, reservation := range response.Reservations {

View File

@ -72,7 +72,7 @@ func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool)
awsSession, err := session.NewSession()
if err != nil {
return fmt.Errorf("error creating session: %v", err)
return fmt.Errorf("error creating session: %w", err)
}
if len(zone) > 0 {
@ -90,7 +90,7 @@ func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool)
}
info, err := client.DescribeVolumes(request)
if err != nil {
return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err)
return fmt.Errorf("error querying ec2 for volume %q: %w", volumeID, err)
}
if len(info.Volumes) == 0 {
return fmt.Errorf("no volumes found for volume %q", volumeID)
@ -737,7 +737,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
err = wait.Poll(time.Second, framework.ClaimProvisionTimeout, func() (bool, error) {
events, err := c.CoreV1().Events(claim.Namespace).List(ctx, metav1.ListOptions{})
if err != nil {
return false, fmt.Errorf("could not list PVC events in %s: %v", claim.Namespace, err)
return false, fmt.Errorf("could not list PVC events in %s: %w", claim.Namespace, err)
}
for _, event := range events.Items {
if strings.Contains(event.Message, "failed to create encrypted volume: the volume disappeared after creation, most likely due to inaccessible KMS encryption key") {
@ -894,7 +894,7 @@ func waitForProvisionedVolumesDeleted(ctx context.Context, c clientset.Interface
return true, nil // No PVs remain
})
if err != nil {
return remainingPVs, fmt.Errorf("Error waiting for PVs to be deleted: %v", err)
return remainingPVs, fmt.Errorf("Error waiting for PVs to be deleted: %w", err)
}
return nil, nil
}

View File

@ -782,7 +782,7 @@ func invokeVCenterServiceControl(ctx context.Context, command, service, host str
result, err := e2essh.SSH(ctx, sshCmd, host, framework.TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
return fmt.Errorf("couldn't execute command: %s on vCenter host: %v", sshCmd, err)
return fmt.Errorf("couldn't execute command: %s on vCenter host: %w", sshCmd, err)
}
return nil
}

View File

@ -84,7 +84,7 @@ func restartKubelet(ctx context.Context, host string) error {
result, err := e2essh.SSH(ctx, cmd, host, framework.TestContext.Provider)
if err != nil || result.Code != 0 {
e2essh.LogResult(result)
return fmt.Errorf("couldn't restart kubelet: %v", err)
return fmt.Errorf("couldn't restart kubelet: %w", err)
}
return nil
}

View File

@ -49,22 +49,22 @@ func gatherTestSuiteMetrics(ctx context.Context) error {
framework.Logf("Gathering metrics")
config, err := framework.LoadConfig()
if err != nil {
return fmt.Errorf("error loading client config: %v", err)
return fmt.Errorf("error loading client config: %w", err)
}
c, err := clientset.NewForConfig(config)
if err != nil {
return fmt.Errorf("error creating client: %v", err)
return fmt.Errorf("error creating client: %w", err)
}
// Grab metrics for apiserver, scheduler, controller-manager, kubelet (for non-kubemark case) and cluster autoscaler (optionally).
grabber, err := e2emetrics.NewMetricsGrabber(ctx, c, nil, config, !framework.ProviderIs("kubemark"), true, true, true, framework.TestContext.IncludeClusterAutoscalerMetrics, false)
if err != nil {
return fmt.Errorf("failed to create MetricsGrabber: %v", err)
return fmt.Errorf("failed to create MetricsGrabber: %w", err)
}
received, err := grabber.Grab(ctx)
if err != nil {
return fmt.Errorf("failed to grab metrics: %v", err)
return fmt.Errorf("failed to grab metrics: %w", err)
}
metricsForE2E := (*e2emetrics.ComponentCollection)(&received)
@ -72,7 +72,7 @@ func gatherTestSuiteMetrics(ctx context.Context) error {
if framework.TestContext.ReportDir != "" {
filePath := path.Join(framework.TestContext.ReportDir, "MetricsForE2ESuite_"+time.Now().Format(time.RFC3339)+".json")
if err := os.WriteFile(filePath, []byte(metricsJSON), 0644); err != nil {
return fmt.Errorf("error writing to %q: %v", filePath, err)
return fmt.Errorf("error writing to %q: %w", filePath, err)
}
} else {
framework.Logf("\n\nTest Suite Metrics:\n%s\n", metricsJSON)

View File

@ -184,7 +184,7 @@ func waitForDeploymentRevision(ctx context.Context, c clientset.Interface, d *ap
return revision == targetRevision, nil
})
if err != nil {
return fmt.Errorf("error waiting for revision to become %q for deployment %q: %v", targetRevision, d.Name, err)
return fmt.Errorf("error waiting for revision to become %q for deployment %q: %w", targetRevision, d.Name, err)
}
return nil
}

View File

@ -100,7 +100,7 @@ func inClusterClientMustWork(ctx context.Context, f *framework.Framework, pod *v
numTokens, err := e2eauth.ParseInClusterClientLogs(logs)
if err != nil {
framework.Logf("Error parsing inclusterclient logs: %v", err)
return false, fmt.Errorf("inclusterclient reported an error: %v", err)
return false, fmt.Errorf("inclusterclient reported an error: %w", err)
}
if numTokens == 0 {
framework.Logf("No authenticated API calls found")

View File

@ -143,7 +143,7 @@ func waitForKubeProxyStaticPodsRunning(ctx context.Context, c clientset.Interfac
}
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy static pods running: %v", err)
return fmt.Errorf("error waiting for kube-proxy static pods running: %w", err)
}
return nil
}
@ -166,7 +166,7 @@ func waitForKubeProxyStaticPodsDisappear(ctx context.Context, c clientset.Interf
}
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy static pods disappear: %v", err)
return fmt.Errorf("error waiting for kube-proxy static pods disappear: %w", err)
}
return nil
}
@ -190,7 +190,7 @@ func waitForKubeProxyDaemonSetRunning(ctx context.Context, f *framework.Framewor
}
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy DaemonSet running: %v", err)
return fmt.Errorf("error waiting for kube-proxy DaemonSet running: %w", err)
}
return nil
}
@ -213,7 +213,7 @@ func waitForKubeProxyDaemonSetDisappear(ctx context.Context, c clientset.Interfa
}
if err := wait.PollImmediate(5*time.Second, defaultTestTimeout, condition); err != nil {
return fmt.Errorf("error waiting for kube-proxy DaemonSet disappear: %v", err)
return fmt.Errorf("error waiting for kube-proxy DaemonSet disappear: %w", err)
}
return nil
}

View File

@ -231,7 +231,7 @@ func eventOccurred(ctx context.Context, c clientset.Interface, namespace, eventS
events, err := c.CoreV1().Events(namespace).List(ctx, options)
if err != nil {
return false, fmt.Errorf("got error while getting events: %v", err)
return false, fmt.Errorf("got error while getting events: %w", err)
}
for _, event := range events.Items {
if strings.Contains(event.Message, msg) {

View File

@ -122,13 +122,13 @@ profile e2e-node-apparmor-test-audit-write flags=(attach_disconnected) {
func loadTestProfiles() error {
f, err := os.CreateTemp("/tmp", "apparmor")
if err != nil {
return fmt.Errorf("failed to open temp file: %v", err)
return fmt.Errorf("failed to open temp file: %w", err)
}
defer os.Remove(f.Name())
defer f.Close()
if _, err := f.WriteString(testProfiles); err != nil {
return fmt.Errorf("failed to write profiles to file: %v", err)
return fmt.Errorf("failed to write profiles to file: %w", err)
}
cmd := exec.Command("apparmor_parser", "-r", "-W", f.Name())
@ -143,7 +143,7 @@ func loadTestProfiles() error {
if len(out) > 0 {
klog.Infof("apparmor_parser: %s", out)
}
return fmt.Errorf("failed to load profiles: %v", err)
return fmt.Errorf("failed to load profiles: %w", err)
}
klog.V(2).Infof("Loaded profiles: %v", out)
return nil

View File

@ -55,7 +55,7 @@ func getOOMScoreForPid(pid int) (int, error) {
func validateOOMScoreAdjSetting(pid int, expectedOOMScoreAdj int) error {
oomScore, err := getOOMScoreForPid(pid)
if err != nil {
return fmt.Errorf("failed to get oom_score_adj for %d: %v", pid, err)
return fmt.Errorf("failed to get oom_score_adj for %d: %w", pid, err)
}
if expectedOOMScoreAdj != oomScore {
return fmt.Errorf("expected pid %d's oom_score_adj to be %d; found %d", pid, expectedOOMScoreAdj, oomScore)
@ -129,7 +129,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
gomega.Eventually(ctx, func() error {
pausePids, err = getPidsForProcess("pause", "")
if err != nil {
return fmt.Errorf("failed to get list of pause pids: %v", err)
return fmt.Errorf("failed to get list of pause pids: %w", err)
}
for _, pid := range pausePids {
if existingPausePIDSet.Has(pid) {
@ -147,7 +147,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
gomega.Eventually(ctx, func() error {
shPids, err = getPidsForProcess("agnhost", "")
if err != nil {
return fmt.Errorf("failed to get list of serve hostname process pids: %v", err)
return fmt.Errorf("failed to get list of serve hostname process pids: %w", err)
}
if len(shPids) != 1 {
return fmt.Errorf("expected only one agnhost process; found %d", len(shPids))
@ -203,7 +203,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
gomega.Eventually(ctx, func() error {
ngPids, err = getPidsForProcess("nginx", "")
if err != nil {
return fmt.Errorf("failed to get list of nginx process pids: %v", err)
return fmt.Errorf("failed to get list of nginx process pids: %w", err)
}
for _, pid := range ngPids {
if err := validateOOMScoreAdjSetting(pid, -998); err != nil {
@ -245,7 +245,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
gomega.Eventually(ctx, func() error {
wsPids, err = getPidsForProcess("agnhost", "")
if err != nil {
return fmt.Errorf("failed to get list of test-webserver process pids: %v", err)
return fmt.Errorf("failed to get list of test-webserver process pids: %w", err)
}
for _, pid := range wsPids {
if err := validateOOMScoreAdjSettingIsInRange(pid, 2, 1000); err != nil {

View File

@ -253,14 +253,14 @@ var _ = ginkgo.SynchronizedAfterSuite(func() {}, func() {
func validateSystem() error {
testBin, err := os.Executable()
if err != nil {
return fmt.Errorf("can't get current binary: %v", err)
return fmt.Errorf("can't get current binary: %w", err)
}
// Pass all flags into the child process, so that it will see the same flag set.
output, err := exec.Command(testBin, append([]string{"--system-validate-mode"}, os.Args[1:]...)...).CombinedOutput()
// The output of system validation should have been formatted, directly print here.
fmt.Print(string(output))
if err != nil {
return fmt.Errorf("system validation failed: %v", err)
return fmt.Errorf("system validation failed: %w", err)
}
return nil
}
@ -291,7 +291,7 @@ func waitForNodeReady(ctx context.Context) {
gomega.Eventually(ctx, func() error {
node, err := getNode(client)
if err != nil {
return fmt.Errorf("failed to get node: %v", err)
return fmt.Errorf("failed to get node: %w", err)
}
if !isNodeReady(node) {
return fmt.Errorf("node is not ready: %+v", node)
@ -307,12 +307,12 @@ func updateTestContext(ctx context.Context) error {
client, err := getAPIServerClient()
if err != nil {
return fmt.Errorf("failed to get apiserver client: %v", err)
return fmt.Errorf("failed to get apiserver client: %w", err)
}
// Update test context with current node object.
node, err := getNode(client)
if err != nil {
return fmt.Errorf("failed to get node: %v", err)
return fmt.Errorf("failed to get node: %w", err)
}
framework.TestContext.NodeName = node.Name // Set node name.
// Update test context with current kubelet configuration.
@ -320,7 +320,7 @@ func updateTestContext(ctx context.Context) error {
// must: 1) run in serial; 2) restore kubelet configuration after test.
kubeletCfg, err := getCurrentKubeletConfig(ctx)
if err != nil {
return fmt.Errorf("failed to get kubelet configuration: %v", err)
return fmt.Errorf("failed to get kubelet configuration: %w", err)
}
framework.TestContext.KubeletConfig = *kubeletCfg // Set kubelet config
return nil
@ -344,11 +344,11 @@ func getNode(c *clientset.Clientset) (*v1.Node, error) {
func getAPIServerClient() (*clientset.Clientset, error) {
config, err := framework.LoadConfig()
if err != nil {
return nil, fmt.Errorf("failed to load config: %v", err)
return nil, fmt.Errorf("failed to load config: %w", err)
}
client, err := clientset.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create client: %v", err)
return nil, fmt.Errorf("failed to create client: %w", err)
}
return client, nil
}

View File

@ -230,12 +230,12 @@ func getGPUDevicePluginImage(ctx context.Context) (string, error) {
func getSampleDevicePluginImage() (string, error) {
data, err := e2etestfiles.Read(SampleDevicePluginDSYAML)
if err != nil {
return "", fmt.Errorf("failed to read the sample plugin yaml: %v", err)
return "", fmt.Errorf("failed to read the sample plugin yaml: %w", err)
}
ds, err := e2emanifest.DaemonSetFromData(data)
if err != nil {
return "", fmt.Errorf("failed to parse daemon set for sample plugin: %v", err)
return "", fmt.Errorf("failed to parse daemon set for sample plugin: %w", err)
}
if len(ds.Spec.Template.Spec.Containers) < 1 {

View File

@ -144,12 +144,12 @@ func getMemoryManagerState() (*state.MemoryManagerCheckpoint, error) {
out, err := exec.Command("/bin/sh", "-c", fmt.Sprintf("cat %s", memoryManagerStateFile)).Output()
if err != nil {
return nil, fmt.Errorf("failed to run command 'cat %s': out: %s, err: %v", memoryManagerStateFile, out, err)
return nil, fmt.Errorf("failed to run command 'cat %s': out: %s, err: %w", memoryManagerStateFile, out, err)
}
memoryManagerCheckpoint := &state.MemoryManagerCheckpoint{}
if err := json.Unmarshal(out, memoryManagerCheckpoint); err != nil {
return nil, fmt.Errorf("failed to unmarshal memory manager state file: %v", err)
return nil, fmt.Errorf("failed to unmarshal memory manager state file: %w", err)
}
return memoryManagerCheckpoint, nil
}

View File

@ -187,7 +187,7 @@ spec:
func checkMirrorPodRunningWithUID(ctx context.Context, cl clientset.Interface, name, namespace string, oUID types.UID) error {
pod, err := cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
return fmt.Errorf("expected the mirror pod %q to appear: %w", name, err)
}
if pod.UID != oUID {
return fmt.Errorf("expected the uid of mirror pod %q to be same, got %q", name, pod.UID)

View File

@ -244,7 +244,7 @@ func checkMirrorPodDisappear(ctx context.Context, cl clientset.Interface, name,
func checkMirrorPodRunning(ctx context.Context, cl clientset.Interface, name, namespace string) error {
pod, err := cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
return fmt.Errorf("expected the mirror pod %q to appear: %w", name, err)
}
if pod.Status.Phase != v1.PodRunning {
return fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase)
@ -263,7 +263,7 @@ func checkMirrorPodRunningWithRestartCount(ctx context.Context, interval time.Du
err = wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
pod, err = cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
return false, fmt.Errorf("expected the mirror pod %q to appear: %w", name, err)
}
if pod.Status.Phase != v1.PodRunning {
return false, fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase)
@ -292,7 +292,7 @@ func checkMirrorPodRunningWithRestartCount(ctx context.Context, interval time.Du
func checkMirrorPodRecreatedAndRunning(ctx context.Context, cl clientset.Interface, name, namespace string, oUID types.UID) error {
pod, err := cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
return fmt.Errorf("expected the mirror pod %q to appear: %w", name, err)
}
if pod.UID == oUID {
return fmt.Errorf("expected the uid of mirror pod %q to be changed, got %q", name, pod.UID)
@ -328,7 +328,7 @@ func validateMirrorPod(ctx context.Context, cl clientset.Interface, mirrorPod *v
}
node, err := cl.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to fetch test node: %v", err)
return fmt.Errorf("failed to fetch test node: %w", err)
}
controller := true

View File

@ -31,7 +31,7 @@ func runCmd(cmd string, args []string) error {
func getMatchingLineFromLog(log string, pattern string) (line string, err error) {
regex, err := regexp.Compile(pattern)
if err != nil {
return line, fmt.Errorf("failed to compile regexp %v: %v", pattern, err)
return line, fmt.Errorf("failed to compile regexp %v: %w", pattern, err)
}
logLines := strings.Split(log, "\n")

View File

@ -57,7 +57,7 @@ func runCommand(command string, args ...string) error {
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("failed to run command %s. error: %v", command, err)
return fmt.Errorf("failed to run command %s. error: %w", command, err)
}
return nil
}

View File

@ -79,7 +79,7 @@ func buildConformanceTest(binDir, systemSpecName string) error {
// Get node conformance directory.
conformancePath, err := getConformanceDirectory()
if err != nil {
return fmt.Errorf("failed to get node conformance directory: %v", err)
return fmt.Errorf("failed to get node conformance directory: %w", err)
}
// Build docker image.
cmd := exec.Command("make", "-C", conformancePath, "BIN_DIR="+binDir,
@ -104,7 +104,7 @@ func buildConformanceTest(binDir, systemSpecName string) error {
func (c *ConformanceRemote) SetupTestPackage(tardir, systemSpecName string) error {
// Build the executables
if err := builder.BuildGo(); err != nil {
return fmt.Errorf("failed to build the dependencies: %v", err)
return fmt.Errorf("failed to build the dependencies: %w", err)
}
// Make sure we can find the newly built binaries
@ -115,7 +115,7 @@ func (c *ConformanceRemote) SetupTestPackage(tardir, systemSpecName string) erro
// Build node conformance tarball.
if err := buildConformanceTest(buildOutputDir, systemSpecName); err != nil {
return fmt.Errorf("failed to build node conformance test: %v", err)
return fmt.Errorf("failed to build node conformance test: %w", err)
}
// Copy files
@ -123,7 +123,7 @@ func (c *ConformanceRemote) SetupTestPackage(tardir, systemSpecName string) erro
for _, file := range requiredFiles {
source := filepath.Join(buildOutputDir, file)
if _, err := os.Stat(source); err != nil {
return fmt.Errorf("failed to locate test file %s: %v", file, err)
return fmt.Errorf("failed to locate test file %s: %w", file, err)
}
output, err := exec.Command("cp", source, filepath.Join(tardir, file)).CombinedOutput()
if err != nil {
@ -188,7 +188,7 @@ func launchKubelet(host, workspace, results, testArgs, bearerToken string) error
var cmd []string
systemd, err := isSystemd(host)
if err != nil {
return fmt.Errorf("failed to check systemd: %v", err)
return fmt.Errorf("failed to check systemd: %w", err)
}
if systemd {
cmd = []string{

View File

@ -45,18 +45,18 @@ func InitNodeE2ERemote() TestSuite {
func (n *NodeE2ERemote) SetupTestPackage(tardir, systemSpecName string) error {
// Build the executables
if err := builder.BuildGo(); err != nil {
return fmt.Errorf("failed to build the dependencies: %v", err)
return fmt.Errorf("failed to build the dependencies: %w", err)
}
// Make sure we can find the newly built binaries
buildOutputDir, err := utils.GetK8sBuildOutputDir()
if err != nil {
return fmt.Errorf("failed to locate kubernetes build output directory: %v", err)
return fmt.Errorf("failed to locate kubernetes build output directory: %w", err)
}
rootDir, err := utils.GetK8sRootDir()
if err != nil {
return fmt.Errorf("failed to locate kubernetes root directory: %v", err)
return fmt.Errorf("failed to locate kubernetes root directory: %w", err)
}
// Copy binaries
@ -64,7 +64,7 @@ func (n *NodeE2ERemote) SetupTestPackage(tardir, systemSpecName string) error {
for _, bin := range requiredBins {
source := filepath.Join(buildOutputDir, bin)
if _, err := os.Stat(source); err != nil {
return fmt.Errorf("failed to locate test binary %s: %v", bin, err)
return fmt.Errorf("failed to locate test binary %s: %w", bin, err)
}
out, err := exec.Command("cp", source, filepath.Join(tardir, bin)).CombinedOutput()
if err != nil {
@ -76,7 +76,7 @@ func (n *NodeE2ERemote) SetupTestPackage(tardir, systemSpecName string) error {
// Copy system spec file
source := filepath.Join(rootDir, system.SystemSpecPath, systemSpecName+".yaml")
if _, err := os.Stat(source); err != nil {
return fmt.Errorf("failed to locate system spec %q: %v", source, err)
return fmt.Errorf("failed to locate system spec %q: %w", source, err)
}
out, err := exec.Command("cp", source, tardir).CombinedOutput()
if err != nil {

View File

@ -78,13 +78,13 @@ func CreateTestArchive(suite TestSuite, systemSpecName, kubeletConfigFile string
err = copyKubeletConfigIfExists(kubeletConfigFile, tardir)
if err != nil {
return "", fmt.Errorf("failed to copy kubelet config: %v", err)
return "", fmt.Errorf("failed to copy kubelet config: %w", err)
}
// Call the suite function to setup the test package.
err = suite.SetupTestPackage(tardir, systemSpecName)
if err != nil {
return "", fmt.Errorf("failed to setup test package %q: %v", tardir, err)
return "", fmt.Errorf("failed to setup test package %q: %w", tardir, err)
}
// Build the tar
@ -196,7 +196,7 @@ func GetTimestampFromWorkspaceDir(dir string) string {
func getTestArtifacts(host, testDir string) error {
logPath := filepath.Join(*resultsDir, host)
if err := os.MkdirAll(logPath, 0755); err != nil {
return fmt.Errorf("failed to create log directory %q: %v", logPath, err)
return fmt.Errorf("failed to create log directory %q: %w", logPath, err)
}
// Copy logs to artifacts/hostname
if _, err := runSSHCommand("scp", "-r", fmt.Sprintf("%s:%s/results/*.log", GetHostnameOrIP(host), testDir), logPath); err != nil {
@ -250,7 +250,7 @@ func collectSystemLog(host string) {
func WriteLog(host, filename, content string) error {
logPath := filepath.Join(*resultsDir, host)
if err := os.MkdirAll(logPath, 0755); err != nil {
return fmt.Errorf("failed to create log directory %q: %v", logPath, err)
return fmt.Errorf("failed to create log directory %q: %w", logPath, err)
}
f, err := os.Create(filepath.Join(logPath, filename))
if err != nil {

View File

@ -121,7 +121,7 @@ func runSSHCommand(cmd string, args ...string) (string, error) {
output, err := exec.Command(cmd, args...).CombinedOutput()
if err != nil {
klog.Errorf("failed to run SSH command: out: %s, err: %v", output, err)
return string(output), fmt.Errorf("command [%s %s] failed with error: %v", cmd, strings.Join(args, " "), err)
return string(output), fmt.Errorf("command [%s %s] failed with error: %w", cmd, strings.Join(args, " "), err)
}
return string(output), nil
}

View File

@ -480,18 +480,18 @@ func getPidsForProcess(name, pidFile string) ([]int, error) {
func getPidFromPidFile(pidFile string) (int, error) {
file, err := os.Open(pidFile)
if err != nil {
return 0, fmt.Errorf("error opening pid file %s: %v", pidFile, err)
return 0, fmt.Errorf("error opening pid file %s: %w", pidFile, err)
}
defer file.Close()
data, err := io.ReadAll(file)
if err != nil {
return 0, fmt.Errorf("error reading pid file %s: %v", pidFile, err)
return 0, fmt.Errorf("error reading pid file %s: %w", pidFile, err)
}
pid, err := strconv.Atoi(string(data))
if err != nil {
return 0, fmt.Errorf("error parsing %s as a number: %v", string(data), err)
return 0, fmt.Errorf("error parsing %s as a number: %w", string(data), err)
}
return pid, nil

View File

@ -326,14 +326,14 @@ func prepareGceImages() (*internalImageConfig, error) {
imageConfigData, err := os.ReadFile(configPath)
if err != nil {
return nil, fmt.Errorf("Could not read image config file provided: %v", err)
return nil, fmt.Errorf("Could not read image config file provided: %w", err)
}
// Unmarshal the given image config file. All images for this test run will be organized into a map.
// shortName->GCEImage, e.g cos-stable->cos-stable-81-12871-103-0.
externalImageConfig := ImageConfig{Images: make(map[string]GCEImage)}
err = yaml.Unmarshal(imageConfigData, &externalImageConfig)
if err != nil {
return nil, fmt.Errorf("Could not parse image config file: %v", err)
return nil, fmt.Errorf("Could not parse image config file: %w", err)
}
for shortName, imageConfig := range externalImageConfig.Images {
@ -472,7 +472,7 @@ func testHost(host string, deleteFiles bool, imageDesc, junitFileName, ginkgoFla
if err != nil {
// Don't log fatal because we need to do any needed cleanup contained in "defer" statements
return &TestResult{
err: fmt.Errorf("unable to create test archive: %v", err),
err: fmt.Errorf("unable to create test archive: %w", err),
}
}
@ -511,7 +511,7 @@ func getGCEImage(imageRegex, imageFamily string, project string) (string, error)
}
creationTime, err := time.Parse(time.RFC3339, instance.CreationTimestamp)
if err != nil {
return fmt.Errorf("failed to parse instance creation timestamp %q: %v", instance.CreationTimestamp, err)
return fmt.Errorf("failed to parse instance creation timestamp %q: %w", instance.CreationTimestamp, err)
}
io := imageObj{
creationTime: creationTime,
@ -522,7 +522,7 @@ func getGCEImage(imageRegex, imageFamily string, project string) (string, error)
return nil
},
); err != nil {
return "", fmt.Errorf("failed to list images in project %q: %v", project, err)
return "", fmt.Errorf("failed to list images in project %q: %w", project, err)
}
// Pick the latest image after sorting.
@ -590,7 +590,7 @@ func testImage(imageConfig *internalGCEImage, junitFileName string) *TestResult
func createInstance(imageConfig *internalGCEImage) (string, error) {
p, err := computeService.Projects.Get(*project).Do()
if err != nil {
return "", fmt.Errorf("failed to get project info %q: %v", *project, err)
return "", fmt.Errorf("failed to get project info %q: %w", *project, err)
}
// Use default service account
serviceAccount := p.DefaultServiceAccount

View File

@ -92,7 +92,7 @@ var _ = SIGDescribe("Container Runtime Conformance Test", func() {
checkContainerStatus := func(ctx context.Context) error {
status, err := container.GetStatus(ctx)
if err != nil {
return fmt.Errorf("failed to get container status: %v", err)
return fmt.Errorf("failed to get container status: %w", err)
}
// We need to check container state first. The default pod status is pending, If we check
// pod phase first, and the expected pod phase is Pending, the container status may not
@ -118,7 +118,7 @@ var _ = SIGDescribe("Container Runtime Conformance Test", func() {
// Check pod phase
phase, err := container.GetPhase(ctx)
if err != nil {
return fmt.Errorf("failed to get pod phase: %v", err)
return fmt.Errorf("failed to get pod phase: %w", err)
}
if phase != testCase.phase {
return fmt.Errorf("expected pod phase: %q, got: %q", testCase.phase, phase)

View File

@ -70,18 +70,18 @@ func (a *APIServer) Start() error {
o.ServiceClusterIPRanges = ipnet.String()
o.AllowPrivileged = true
if err := generateTokenFile(tokenFilePath); err != nil {
return fmt.Errorf("failed to generate token file %s: %v", tokenFilePath, err)
return fmt.Errorf("failed to generate token file %s: %w", tokenFilePath, err)
}
o.Authentication.TokenFile.TokenFile = tokenFilePath
o.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"}
saSigningKeyFile, err := os.CreateTemp("/tmp", "insecure_test_key")
if err != nil {
return fmt.Errorf("create temp file failed: %v", err)
return fmt.Errorf("create temp file failed: %w", err)
}
defer os.RemoveAll(saSigningKeyFile.Name())
if err = os.WriteFile(saSigningKeyFile.Name(), []byte(ecdsaPrivateKey), 0666); err != nil {
return fmt.Errorf("write file %s failed: %v", saSigningKeyFile.Name(), err)
return fmt.Errorf("write file %s failed: %w", saSigningKeyFile.Name(), err)
}
o.ServiceAccountSigningKeyFile = saSigningKeyFile.Name()
o.Authentication.APIAudiences = []string{"https://foo.bar.example.com"}
@ -95,7 +95,7 @@ func (a *APIServer) Start() error {
defer close(errCh)
completedOptions, err := apiserver.Complete(o)
if err != nil {
errCh <- fmt.Errorf("set apiserver default options error: %v", err)
errCh <- fmt.Errorf("set apiserver default options error: %w", err)
return
}
if errs := completedOptions.Validate(); len(errs) != 0 {
@ -105,7 +105,7 @@ func (a *APIServer) Start() error {
err = apiserver.Run(completedOptions, a.stopCh)
if err != nil {
errCh <- fmt.Errorf("run apiserver error: %v", err)
errCh <- fmt.Errorf("run apiserver error: %w", err)
return
}
}()

Some files were not shown because too many files have changed in this diff Show More