Merge pull request #113298 from pohly/e2e-wait-for-pods-with-gomega

e2e: wait for pods with gomega
This commit is contained in:
Kubernetes Prow Robot
2023-02-04 05:26:29 -08:00
committed by GitHub
126 changed files with 1426 additions and 1143 deletions

View File

@@ -122,13 +122,13 @@ profile e2e-node-apparmor-test-audit-write flags=(attach_disconnected) {
func loadTestProfiles() error {
f, err := os.CreateTemp("/tmp", "apparmor")
if err != nil {
return fmt.Errorf("failed to open temp file: %v", err)
return fmt.Errorf("failed to open temp file: %w", err)
}
defer os.Remove(f.Name())
defer f.Close()
if _, err := f.WriteString(testProfiles); err != nil {
return fmt.Errorf("failed to write profiles to file: %v", err)
return fmt.Errorf("failed to write profiles to file: %w", err)
}
cmd := exec.Command("apparmor_parser", "-r", "-W", f.Name())
@@ -143,7 +143,7 @@ func loadTestProfiles() error {
if len(out) > 0 {
klog.Infof("apparmor_parser: %s", out)
}
return fmt.Errorf("failed to load profiles: %v", err)
return fmt.Errorf("failed to load profiles: %w", err)
}
klog.V(2).Infof("Loaded profiles: %v", out)
return nil

View File

@@ -55,7 +55,7 @@ func getOOMScoreForPid(pid int) (int, error) {
func validateOOMScoreAdjSetting(pid int, expectedOOMScoreAdj int) error {
oomScore, err := getOOMScoreForPid(pid)
if err != nil {
return fmt.Errorf("failed to get oom_score_adj for %d: %v", pid, err)
return fmt.Errorf("failed to get oom_score_adj for %d: %w", pid, err)
}
if expectedOOMScoreAdj != oomScore {
return fmt.Errorf("expected pid %d's oom_score_adj to be %d; found %d", pid, expectedOOMScoreAdj, oomScore)
@@ -129,7 +129,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
gomega.Eventually(ctx, func() error {
pausePids, err = getPidsForProcess("pause", "")
if err != nil {
return fmt.Errorf("failed to get list of pause pids: %v", err)
return fmt.Errorf("failed to get list of pause pids: %w", err)
}
for _, pid := range pausePids {
if existingPausePIDSet.Has(pid) {
@@ -147,7 +147,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
gomega.Eventually(ctx, func() error {
shPids, err = getPidsForProcess("agnhost", "")
if err != nil {
return fmt.Errorf("failed to get list of serve hostname process pids: %v", err)
return fmt.Errorf("failed to get list of serve hostname process pids: %w", err)
}
if len(shPids) != 1 {
return fmt.Errorf("expected only one agnhost process; found %d", len(shPids))
@@ -203,7 +203,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
gomega.Eventually(ctx, func() error {
ngPids, err = getPidsForProcess("nginx", "")
if err != nil {
return fmt.Errorf("failed to get list of nginx process pids: %v", err)
return fmt.Errorf("failed to get list of nginx process pids: %w", err)
}
for _, pid := range ngPids {
if err := validateOOMScoreAdjSetting(pid, -998); err != nil {
@@ -245,7 +245,7 @@ var _ = SIGDescribe("Container Manager Misc [Serial]", func() {
gomega.Eventually(ctx, func() error {
wsPids, err = getPidsForProcess("agnhost", "")
if err != nil {
return fmt.Errorf("failed to get list of test-webserver process pids: %v", err)
return fmt.Errorf("failed to get list of test-webserver process pids: %w", err)
}
for _, pid := range wsPids {
if err := validateOOMScoreAdjSettingIsInRange(pid, 2, 1000); err != nil {

View File

@@ -253,14 +253,14 @@ var _ = ginkgo.SynchronizedAfterSuite(func() {}, func() {
func validateSystem() error {
testBin, err := os.Executable()
if err != nil {
return fmt.Errorf("can't get current binary: %v", err)
return fmt.Errorf("can't get current binary: %w", err)
}
// Pass all flags into the child process, so that it will see the same flag set.
output, err := exec.Command(testBin, append([]string{"--system-validate-mode"}, os.Args[1:]...)...).CombinedOutput()
// The output of system validation should have been formatted, directly print here.
fmt.Print(string(output))
if err != nil {
return fmt.Errorf("system validation failed: %v", err)
return fmt.Errorf("system validation failed: %w", err)
}
return nil
}
@@ -291,7 +291,7 @@ func waitForNodeReady(ctx context.Context) {
gomega.Eventually(ctx, func() error {
node, err := getNode(client)
if err != nil {
return fmt.Errorf("failed to get node: %v", err)
return fmt.Errorf("failed to get node: %w", err)
}
if !isNodeReady(node) {
return fmt.Errorf("node is not ready: %+v", node)
@@ -307,12 +307,12 @@ func updateTestContext(ctx context.Context) error {
client, err := getAPIServerClient()
if err != nil {
return fmt.Errorf("failed to get apiserver client: %v", err)
return fmt.Errorf("failed to get apiserver client: %w", err)
}
// Update test context with current node object.
node, err := getNode(client)
if err != nil {
return fmt.Errorf("failed to get node: %v", err)
return fmt.Errorf("failed to get node: %w", err)
}
framework.TestContext.NodeName = node.Name // Set node name.
// Update test context with current kubelet configuration.
@@ -320,7 +320,7 @@ func updateTestContext(ctx context.Context) error {
// must: 1) run in serial; 2) restore kubelet configuration after test.
kubeletCfg, err := getCurrentKubeletConfig(ctx)
if err != nil {
return fmt.Errorf("failed to get kubelet configuration: %v", err)
return fmt.Errorf("failed to get kubelet configuration: %w", err)
}
framework.TestContext.KubeletConfig = *kubeletCfg // Set kubelet config
return nil
@@ -344,11 +344,11 @@ func getNode(c *clientset.Clientset) (*v1.Node, error) {
func getAPIServerClient() (*clientset.Clientset, error) {
config, err := framework.LoadConfig()
if err != nil {
return nil, fmt.Errorf("failed to load config: %v", err)
return nil, fmt.Errorf("failed to load config: %w", err)
}
client, err := clientset.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("failed to create client: %v", err)
return nil, fmt.Errorf("failed to create client: %w", err)
}
return client, nil
}

View File

@@ -230,12 +230,12 @@ func getGPUDevicePluginImage(ctx context.Context) (string, error) {
func getSampleDevicePluginImage() (string, error) {
data, err := e2etestfiles.Read(SampleDevicePluginDSYAML)
if err != nil {
return "", fmt.Errorf("failed to read the sample plugin yaml: %v", err)
return "", fmt.Errorf("failed to read the sample plugin yaml: %w", err)
}
ds, err := e2emanifest.DaemonSetFromData(data)
if err != nil {
return "", fmt.Errorf("failed to parse daemon set for sample plugin: %v", err)
return "", fmt.Errorf("failed to parse daemon set for sample plugin: %w", err)
}
if len(ds.Spec.Template.Spec.Containers) < 1 {

View File

@@ -144,12 +144,12 @@ func getMemoryManagerState() (*state.MemoryManagerCheckpoint, error) {
out, err := exec.Command("/bin/sh", "-c", fmt.Sprintf("cat %s", memoryManagerStateFile)).Output()
if err != nil {
return nil, fmt.Errorf("failed to run command 'cat %s': out: %s, err: %v", memoryManagerStateFile, out, err)
return nil, fmt.Errorf("failed to run command 'cat %s': out: %s, err: %w", memoryManagerStateFile, out, err)
}
memoryManagerCheckpoint := &state.MemoryManagerCheckpoint{}
if err := json.Unmarshal(out, memoryManagerCheckpoint); err != nil {
return nil, fmt.Errorf("failed to unmarshal memory manager state file: %v", err)
return nil, fmt.Errorf("failed to unmarshal memory manager state file: %w", err)
}
return memoryManagerCheckpoint, nil
}

View File

@@ -187,7 +187,7 @@ spec:
func checkMirrorPodRunningWithUID(ctx context.Context, cl clientset.Interface, name, namespace string, oUID types.UID) error {
pod, err := cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
return fmt.Errorf("expected the mirror pod %q to appear: %w", name, err)
}
if pod.UID != oUID {
return fmt.Errorf("expected the uid of mirror pod %q to be same, got %q", name, pod.UID)

View File

@@ -244,7 +244,7 @@ func checkMirrorPodDisappear(ctx context.Context, cl clientset.Interface, name,
func checkMirrorPodRunning(ctx context.Context, cl clientset.Interface, name, namespace string) error {
pod, err := cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
return fmt.Errorf("expected the mirror pod %q to appear: %w", name, err)
}
if pod.Status.Phase != v1.PodRunning {
return fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase)
@@ -263,7 +263,7 @@ func checkMirrorPodRunningWithRestartCount(ctx context.Context, interval time.Du
err = wait.PollImmediateWithContext(ctx, interval, timeout, func(ctx context.Context) (bool, error) {
pod, err = cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
return false, fmt.Errorf("expected the mirror pod %q to appear: %w", name, err)
}
if pod.Status.Phase != v1.PodRunning {
return false, fmt.Errorf("expected the mirror pod %q to be running, got %q", name, pod.Status.Phase)
@@ -292,7 +292,7 @@ func checkMirrorPodRunningWithRestartCount(ctx context.Context, interval time.Du
func checkMirrorPodRecreatedAndRunning(ctx context.Context, cl clientset.Interface, name, namespace string, oUID types.UID) error {
pod, err := cl.CoreV1().Pods(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("expected the mirror pod %q to appear: %v", name, err)
return fmt.Errorf("expected the mirror pod %q to appear: %w", name, err)
}
if pod.UID == oUID {
return fmt.Errorf("expected the uid of mirror pod %q to be changed, got %q", name, pod.UID)
@@ -328,7 +328,7 @@ func validateMirrorPod(ctx context.Context, cl clientset.Interface, mirrorPod *v
}
node, err := cl.CoreV1().Nodes().Get(ctx, framework.TestContext.NodeName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("failed to fetch test node: %v", err)
return fmt.Errorf("failed to fetch test node: %w", err)
}
controller := true

View File

@@ -31,7 +31,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
@@ -434,7 +433,7 @@ current-context: local-context
ginkgo.By("Delete the node problem detector")
framework.ExpectNoError(e2epod.NewPodClient(f).Delete(ctx, name, *metav1.NewDeleteOptions(0)))
ginkgo.By("Wait for the node problem detector to disappear")
gomega.Expect(e2epod.WaitForPodToDisappear(ctx, c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(gomega.Succeed())
gomega.Expect(e2epod.WaitForPodNotFoundInNamespace(ctx, c, ns, name, pollTimeout)).To(gomega.Succeed())
ginkgo.By("Delete the config map")
framework.ExpectNoError(c.CoreV1().ConfigMaps(ns).Delete(ctx, configName, metav1.DeleteOptions{}))
ginkgo.By("Clean up the events")

View File

@@ -31,7 +31,7 @@ func runCmd(cmd string, args []string) error {
func getMatchingLineFromLog(log string, pattern string) (line string, err error) {
regex, err := regexp.Compile(pattern)
if err != nil {
return line, fmt.Errorf("failed to compile regexp %v: %v", pattern, err)
return line, fmt.Errorf("failed to compile regexp %v: %w", pattern, err)
}
logLines := strings.Split(log, "\n")

View File

@@ -57,7 +57,7 @@ func runCommand(command string, args ...string) error {
cmd.Stderr = os.Stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("failed to run command %s. error: %v", command, err)
return fmt.Errorf("failed to run command %s. error: %w", command, err)
}
return nil
}

View File

@@ -79,7 +79,7 @@ func buildConformanceTest(binDir, systemSpecName string) error {
// Get node conformance directory.
conformancePath, err := getConformanceDirectory()
if err != nil {
return fmt.Errorf("failed to get node conformance directory: %v", err)
return fmt.Errorf("failed to get node conformance directory: %w", err)
}
// Build docker image.
cmd := exec.Command("make", "-C", conformancePath, "BIN_DIR="+binDir,
@@ -104,7 +104,7 @@ func buildConformanceTest(binDir, systemSpecName string) error {
func (c *ConformanceRemote) SetupTestPackage(tardir, systemSpecName string) error {
// Build the executables
if err := builder.BuildGo(); err != nil {
return fmt.Errorf("failed to build the dependencies: %v", err)
return fmt.Errorf("failed to build the dependencies: %w", err)
}
// Make sure we can find the newly built binaries
@@ -115,7 +115,7 @@ func (c *ConformanceRemote) SetupTestPackage(tardir, systemSpecName string) erro
// Build node conformance tarball.
if err := buildConformanceTest(buildOutputDir, systemSpecName); err != nil {
return fmt.Errorf("failed to build node conformance test: %v", err)
return fmt.Errorf("failed to build node conformance test: %w", err)
}
// Copy files
@@ -123,7 +123,7 @@ func (c *ConformanceRemote) SetupTestPackage(tardir, systemSpecName string) erro
for _, file := range requiredFiles {
source := filepath.Join(buildOutputDir, file)
if _, err := os.Stat(source); err != nil {
return fmt.Errorf("failed to locate test file %s: %v", file, err)
return fmt.Errorf("failed to locate test file %s: %w", file, err)
}
output, err := exec.Command("cp", source, filepath.Join(tardir, file)).CombinedOutput()
if err != nil {
@@ -188,7 +188,7 @@ func launchKubelet(host, workspace, results, testArgs, bearerToken string) error
var cmd []string
systemd, err := isSystemd(host)
if err != nil {
return fmt.Errorf("failed to check systemd: %v", err)
return fmt.Errorf("failed to check systemd: %w", err)
}
if systemd {
cmd = []string{

View File

@@ -45,18 +45,18 @@ func InitNodeE2ERemote() TestSuite {
func (n *NodeE2ERemote) SetupTestPackage(tardir, systemSpecName string) error {
// Build the executables
if err := builder.BuildGo(); err != nil {
return fmt.Errorf("failed to build the dependencies: %v", err)
return fmt.Errorf("failed to build the dependencies: %w", err)
}
// Make sure we can find the newly built binaries
buildOutputDir, err := utils.GetK8sBuildOutputDir()
if err != nil {
return fmt.Errorf("failed to locate kubernetes build output directory: %v", err)
return fmt.Errorf("failed to locate kubernetes build output directory: %w", err)
}
rootDir, err := utils.GetK8sRootDir()
if err != nil {
return fmt.Errorf("failed to locate kubernetes root directory: %v", err)
return fmt.Errorf("failed to locate kubernetes root directory: %w", err)
}
// Copy binaries
@@ -64,7 +64,7 @@ func (n *NodeE2ERemote) SetupTestPackage(tardir, systemSpecName string) error {
for _, bin := range requiredBins {
source := filepath.Join(buildOutputDir, bin)
if _, err := os.Stat(source); err != nil {
return fmt.Errorf("failed to locate test binary %s: %v", bin, err)
return fmt.Errorf("failed to locate test binary %s: %w", bin, err)
}
out, err := exec.Command("cp", source, filepath.Join(tardir, bin)).CombinedOutput()
if err != nil {
@@ -76,7 +76,7 @@ func (n *NodeE2ERemote) SetupTestPackage(tardir, systemSpecName string) error {
// Copy system spec file
source := filepath.Join(rootDir, system.SystemSpecPath, systemSpecName+".yaml")
if _, err := os.Stat(source); err != nil {
return fmt.Errorf("failed to locate system spec %q: %v", source, err)
return fmt.Errorf("failed to locate system spec %q: %w", source, err)
}
out, err := exec.Command("cp", source, tardir).CombinedOutput()
if err != nil {

View File

@@ -78,13 +78,13 @@ func CreateTestArchive(suite TestSuite, systemSpecName, kubeletConfigFile string
err = copyKubeletConfigIfExists(kubeletConfigFile, tardir)
if err != nil {
return "", fmt.Errorf("failed to copy kubelet config: %v", err)
return "", fmt.Errorf("failed to copy kubelet config: %w", err)
}
// Call the suite function to setup the test package.
err = suite.SetupTestPackage(tardir, systemSpecName)
if err != nil {
return "", fmt.Errorf("failed to setup test package %q: %v", tardir, err)
return "", fmt.Errorf("failed to setup test package %q: %w", tardir, err)
}
// Build the tar
@@ -196,7 +196,7 @@ func GetTimestampFromWorkspaceDir(dir string) string {
func getTestArtifacts(host, testDir string) error {
logPath := filepath.Join(*resultsDir, host)
if err := os.MkdirAll(logPath, 0755); err != nil {
return fmt.Errorf("failed to create log directory %q: %v", logPath, err)
return fmt.Errorf("failed to create log directory %q: %w", logPath, err)
}
// Copy logs to artifacts/hostname
if _, err := runSSHCommand("scp", "-r", fmt.Sprintf("%s:%s/results/*.log", GetHostnameOrIP(host), testDir), logPath); err != nil {
@@ -250,7 +250,7 @@ func collectSystemLog(host string) {
func WriteLog(host, filename, content string) error {
logPath := filepath.Join(*resultsDir, host)
if err := os.MkdirAll(logPath, 0755); err != nil {
return fmt.Errorf("failed to create log directory %q: %v", logPath, err)
return fmt.Errorf("failed to create log directory %q: %w", logPath, err)
}
f, err := os.Create(filepath.Join(logPath, filename))
if err != nil {

View File

@@ -121,7 +121,7 @@ func runSSHCommand(cmd string, args ...string) (string, error) {
output, err := exec.Command(cmd, args...).CombinedOutput()
if err != nil {
klog.Errorf("failed to run SSH command: out: %s, err: %v", output, err)
return string(output), fmt.Errorf("command [%s %s] failed with error: %v", cmd, strings.Join(args, " "), err)
return string(output), fmt.Errorf("command [%s %s] failed with error: %w", cmd, strings.Join(args, " "), err)
}
return string(output), nil
}

View File

@@ -39,7 +39,6 @@ import (
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
@@ -384,8 +383,7 @@ func deletePodsSync(ctx context.Context, f *framework.Framework, pods []*v1.Pod)
framework.Failf("Unexpected error trying to delete pod %s: %v", pod.Name, err)
}
gomega.Expect(e2epod.WaitForPodToDisappear(ctx, f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, labels.Everything(),
30*time.Second, 10*time.Minute)).NotTo(gomega.HaveOccurred())
framework.ExpectNoError(e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, 10*time.Minute))
}()
}
wg.Wait()
@@ -482,18 +480,18 @@ func getPidsForProcess(name, pidFile string) ([]int, error) {
func getPidFromPidFile(pidFile string) (int, error) {
file, err := os.Open(pidFile)
if err != nil {
return 0, fmt.Errorf("error opening pid file %s: %v", pidFile, err)
return 0, fmt.Errorf("error opening pid file %s: %w", pidFile, err)
}
defer file.Close()
data, err := io.ReadAll(file)
if err != nil {
return 0, fmt.Errorf("error reading pid file %s: %v", pidFile, err)
return 0, fmt.Errorf("error reading pid file %s: %w", pidFile, err)
}
pid, err := strconv.Atoi(string(data))
if err != nil {
return 0, fmt.Errorf("error parsing %s as a number: %v", string(data), err)
return 0, fmt.Errorf("error parsing %s as a number: %w", string(data), err)
}
return pid, nil

View File

@@ -326,14 +326,14 @@ func prepareGceImages() (*internalImageConfig, error) {
imageConfigData, err := os.ReadFile(configPath)
if err != nil {
return nil, fmt.Errorf("Could not read image config file provided: %v", err)
return nil, fmt.Errorf("Could not read image config file provided: %w", err)
}
// Unmarshal the given image config file. All images for this test run will be organized into a map.
// shortName->GCEImage, e.g cos-stable->cos-stable-81-12871-103-0.
externalImageConfig := ImageConfig{Images: make(map[string]GCEImage)}
err = yaml.Unmarshal(imageConfigData, &externalImageConfig)
if err != nil {
return nil, fmt.Errorf("Could not parse image config file: %v", err)
return nil, fmt.Errorf("Could not parse image config file: %w", err)
}
for shortName, imageConfig := range externalImageConfig.Images {
@@ -472,7 +472,7 @@ func testHost(host string, deleteFiles bool, imageDesc, junitFileName, ginkgoFla
if err != nil {
// Don't log fatal because we need to do any needed cleanup contained in "defer" statements
return &TestResult{
err: fmt.Errorf("unable to create test archive: %v", err),
err: fmt.Errorf("unable to create test archive: %w", err),
}
}
@@ -511,7 +511,7 @@ func getGCEImage(imageRegex, imageFamily string, project string) (string, error)
}
creationTime, err := time.Parse(time.RFC3339, instance.CreationTimestamp)
if err != nil {
return fmt.Errorf("failed to parse instance creation timestamp %q: %v", instance.CreationTimestamp, err)
return fmt.Errorf("failed to parse instance creation timestamp %q: %w", instance.CreationTimestamp, err)
}
io := imageObj{
creationTime: creationTime,
@@ -522,7 +522,7 @@ func getGCEImage(imageRegex, imageFamily string, project string) (string, error)
return nil
},
); err != nil {
return "", fmt.Errorf("failed to list images in project %q: %v", project, err)
return "", fmt.Errorf("failed to list images in project %q: %w", project, err)
}
// Pick the latest image after sorting.
@@ -590,7 +590,7 @@ func testImage(imageConfig *internalGCEImage, junitFileName string) *TestResult
func createInstance(imageConfig *internalGCEImage) (string, error) {
p, err := computeService.Projects.Get(*project).Do()
if err != nil {
return "", fmt.Errorf("failed to get project info %q: %v", *project, err)
return "", fmt.Errorf("failed to get project info %q: %w", *project, err)
}
// Use default service account
serviceAccount := p.DefaultServiceAccount

View File

@@ -92,7 +92,7 @@ var _ = SIGDescribe("Container Runtime Conformance Test", func() {
checkContainerStatus := func(ctx context.Context) error {
status, err := container.GetStatus(ctx)
if err != nil {
return fmt.Errorf("failed to get container status: %v", err)
return fmt.Errorf("failed to get container status: %w", err)
}
// We need to check container state first. The default pod status is pending, If we check
// pod phase first, and the expected pod phase is Pending, the container status may not
@@ -118,7 +118,7 @@ var _ = SIGDescribe("Container Runtime Conformance Test", func() {
// Check pod phase
phase, err := container.GetPhase(ctx)
if err != nil {
return fmt.Errorf("failed to get pod phase: %v", err)
return fmt.Errorf("failed to get pod phase: %w", err)
}
if phase != testCase.phase {
return fmt.Errorf("expected pod phase: %q, got: %q", testCase.phase, phase)

View File

@@ -70,18 +70,18 @@ func (a *APIServer) Start() error {
o.ServiceClusterIPRanges = ipnet.String()
o.AllowPrivileged = true
if err := generateTokenFile(tokenFilePath); err != nil {
return fmt.Errorf("failed to generate token file %s: %v", tokenFilePath, err)
return fmt.Errorf("failed to generate token file %s: %w", tokenFilePath, err)
}
o.Authentication.TokenFile.TokenFile = tokenFilePath
o.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"}
saSigningKeyFile, err := os.CreateTemp("/tmp", "insecure_test_key")
if err != nil {
return fmt.Errorf("create temp file failed: %v", err)
return fmt.Errorf("create temp file failed: %w", err)
}
defer os.RemoveAll(saSigningKeyFile.Name())
if err = os.WriteFile(saSigningKeyFile.Name(), []byte(ecdsaPrivateKey), 0666); err != nil {
return fmt.Errorf("write file %s failed: %v", saSigningKeyFile.Name(), err)
return fmt.Errorf("write file %s failed: %w", saSigningKeyFile.Name(), err)
}
o.ServiceAccountSigningKeyFile = saSigningKeyFile.Name()
o.Authentication.APIAudiences = []string{"https://foo.bar.example.com"}
@@ -95,7 +95,7 @@ func (a *APIServer) Start() error {
defer close(errCh)
completedOptions, err := apiserver.Complete(o)
if err != nil {
errCh <- fmt.Errorf("set apiserver default options error: %v", err)
errCh <- fmt.Errorf("set apiserver default options error: %w", err)
return
}
if errs := completedOptions.Validate(); len(errs) != 0 {
@@ -105,7 +105,7 @@ func (a *APIServer) Start() error {
err = apiserver.Run(completedOptions, a.stopCh)
if err != nil {
errCh <- fmt.Errorf("run apiserver error: %v", err)
errCh <- fmt.Errorf("run apiserver error: %w", err)
return
}
}()

View File

@@ -178,7 +178,7 @@ func (e *E2EServices) startKubelet(featureGates map[string]bool) (*server, error
kc, err := baseKubeConfiguration(kubeletConfigFile)
if err != nil {
return nil, fmt.Errorf("failed to load base kubelet configuration: %v", err)
return nil, fmt.Errorf("failed to load base kubelet configuration: %w", err)
}
// Apply overrides to allow access to the Kubelet API from the test suite.
@@ -327,11 +327,11 @@ func writeKubeletConfigFile(internal *kubeletconfig.KubeletConfiguration, path s
func createPodDirectory() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get current working directory: %v", err)
return "", fmt.Errorf("failed to get current working directory: %w", err)
}
path, err := os.MkdirTemp(cwd, "static-pods")
if err != nil {
return "", fmt.Errorf("failed to create static pod directory: %v", err)
return "", fmt.Errorf("failed to create static pod directory: %w", err)
}
return path, nil
}
@@ -375,7 +375,7 @@ func createRootDirectory(path string) error {
func kubeconfigCWDPath() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get current working directory: %v", err)
return "", fmt.Errorf("failed to get current working directory: %w", err)
}
return filepath.Join(cwd, "kubeconfig"), nil
}
@@ -383,7 +383,7 @@ func kubeconfigCWDPath() (string, error) {
func kubeletConfigCWDPath() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get current working directory: %v", err)
return "", fmt.Errorf("failed to get current working directory: %w", err)
}
// DO NOT name this file "kubelet" - you will overwrite the kubelet binary and be very confused :)
return filepath.Join(cwd, "kubelet-config"), nil

View File

@@ -156,7 +156,7 @@ func (s *server) start() error {
// Start the command
err = s.startCommand.Start()
if err != nil {
errCh <- fmt.Errorf("failed to run %s: %v", s, err)
errCh <- fmt.Errorf("failed to run %s: %w", s, err)
return
}
if !s.restartOnExit {
@@ -165,7 +165,7 @@ func (s *server) start() error {
// Otherwise, we Wait() in the restart loop.
err = s.startCommand.Wait()
if err != nil {
errCh <- fmt.Errorf("failed to run start command for server %q: %v", s.name, err)
errCh <- fmt.Errorf("failed to run start command for server %q: %w", s.name, err)
return
}
} else {
@@ -305,7 +305,7 @@ func (s *server) kill() error {
select {
case err := <-waitChan:
if err != nil {
return fmt.Errorf("error stopping %q: %v", name, err)
return fmt.Errorf("error stopping %q: %w", name, err)
}
// Success!
return nil
@@ -322,7 +322,7 @@ func (s *server) stopUnit() error {
if s.systemdUnitName != "" {
err := exec.Command("sudo", "systemctl", "stop", s.systemdUnitName).Run()
if err != nil {
return fmt.Errorf("Failed to stop systemd unit name: %q: %v", s.systemdUnitName, err)
return fmt.Errorf("Failed to stop systemd unit name: %q: %w", s.systemdUnitName, err)
}
}
return nil

View File

@@ -63,7 +63,7 @@ func NewE2EServices(monitorParent bool) *E2EServices {
func (e *E2EServices) Start(featureGates map[string]bool) error {
var err error
if e.services, err = e.startInternalServices(); err != nil {
return fmt.Errorf("failed to start internal services: %v", err)
return fmt.Errorf("failed to start internal services: %w", err)
}
klog.Infof("Node services started.")
// running the kubelet depends on whether we are running conformance test-suite
@@ -73,7 +73,7 @@ func (e *E2EServices) Start(featureGates map[string]bool) error {
// Start kubelet
e.kubelet, err = e.startKubelet(featureGates)
if err != nil {
return fmt.Errorf("failed to start kubelet: %v", err)
return fmt.Errorf("failed to start kubelet: %w", err)
}
klog.Infof("Kubelet started.")
}
@@ -130,7 +130,7 @@ const (
func (e *E2EServices) startInternalServices() (*server, error) {
testBin, err := os.Executable()
if err != nil {
return nil, fmt.Errorf("can't get current binary: %v", err)
return nil, fmt.Errorf("can't get current binary: %w", err)
}
// Pass all flags into the child process, so that it will see the same flag set.
startCmd := exec.Command(testBin,

View File

@@ -89,14 +89,14 @@ func getNodeSummary(ctx context.Context) (*stats.Summary, error) {
}
req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/stats/summary", net.JoinHostPort(kubeletConfig.Address, strconv.Itoa(int(kubeletConfig.ReadOnlyPort)))), nil)
if err != nil {
return nil, fmt.Errorf("failed to build http request: %v", err)
return nil, fmt.Errorf("failed to build http request: %w", err)
}
req.Header.Add("Accept", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to get /stats/summary: %v", err)
return nil, fmt.Errorf("failed to get /stats/summary: %w", err)
}
defer resp.Body.Close()
@@ -117,11 +117,11 @@ func getNodeSummary(ctx context.Context) (*stats.Summary, error) {
func getV1alpha1NodeDevices(ctx context.Context) (*kubeletpodresourcesv1alpha1.ListPodResourcesResponse, error) {
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
if err != nil {
return nil, fmt.Errorf("Error getting local endpoint: %v", err)
return nil, fmt.Errorf("Error getting local endpoint: %w", err)
}
client, conn, err := podresources.GetV1alpha1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
if err != nil {
return nil, fmt.Errorf("Error getting grpc client: %v", err)
return nil, fmt.Errorf("Error getting grpc client: %w", err)
}
defer conn.Close()
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
@@ -136,11 +136,11 @@ func getV1alpha1NodeDevices(ctx context.Context) (*kubeletpodresourcesv1alpha1.L
func getV1NodeDevices(ctx context.Context) (*kubeletpodresourcesv1.ListPodResourcesResponse, error) {
endpoint, err := util.LocalEndpoint(defaultPodResourcesPath, podresources.Socket)
if err != nil {
return nil, fmt.Errorf("Error getting local endpoint: %v", err)
return nil, fmt.Errorf("Error getting local endpoint: %w", err)
}
client, conn, err := podresources.GetV1Client(endpoint, defaultPodResourcesTimeout, defaultPodResourcesMaxSize)
if err != nil {
return nil, fmt.Errorf("Error getting gRPC client: %v", err)
return nil, fmt.Errorf("Error getting gRPC client: %w", err)
}
defer conn.Close()
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)