e2e framework: eliminate redundant framework/[log|ginkgowrapper]

These sub packages were created by mistake. Logging and failure handling are
core features and must be implemented in the framework package.
This commit is contained in:
Patrick Ohly 2022-08-25 20:01:40 +02:00
parent a46fea53e6
commit 2d21acb1be
16 changed files with 158 additions and 273 deletions

View File

@ -29,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
v1authorization "k8s.io/client-go/kubernetes/typed/authorization/v1" v1authorization "k8s.io/client-go/kubernetes/typed/authorization/v1"
v1rbac "k8s.io/client-go/kubernetes/typed/rbac/v1" v1rbac "k8s.io/client-go/kubernetes/typed/rbac/v1"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -152,13 +152,13 @@ func IsRBACEnabled(crGetter v1rbac.ClusterRolesGetter) bool {
isRBACEnabledOnce.Do(func() { isRBACEnabledOnce.Do(func() {
crs, err := crGetter.ClusterRoles().List(context.TODO(), metav1.ListOptions{}) crs, err := crGetter.ClusterRoles().List(context.TODO(), metav1.ListOptions{})
if err != nil { if err != nil {
e2elog.Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err) framework.Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
isRBACEnabled = false isRBACEnabled = false
} else if crs == nil || len(crs.Items) == 0 { } else if crs == nil || len(crs.Items) == 0 {
e2elog.Logf("No ClusterRoles found; assuming RBAC is disabled.") framework.Logf("No ClusterRoles found; assuming RBAC is disabled.")
isRBACEnabled = false isRBACEnabled = false
} else { } else {
e2elog.Logf("Found ClusterRoles; assuming RBAC is enabled.") framework.Logf("Found ClusterRoles; assuming RBAC is enabled.")
isRBACEnabled = true isRBACEnabled = true
} }
}) })

View File

@ -1,106 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ginkgowrapper wraps Ginkgo Fail and Skip functions to panic
// with structured data instead of a constant string.
package ginkgowrapper
import (
"bufio"
"bytes"
"regexp"
"runtime"
"runtime/debug"
"strings"
"github.com/onsi/ginkgo/v2"
)
// FailurePanic is the value that will be panicked from Fail.
type FailurePanic struct {
Message string // The failure message passed to Fail
Filename string // The filename that is the source of the failure
Line int // The line number of the filename that is the source of the failure
FullStackTrace string // A full stack trace starting at the source of the failure
}
const ginkgoFailurePanic = `
Your test failed.
Ginkgo panics to prevent subsequent assertions from running.
Normally Ginkgo rescues this panic so you shouldn't see it.
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
To circumvent this, you should call
defer GinkgoRecover()
at the top of the goroutine that caused this panic.
`
// String makes FailurePanic look like the old Ginkgo panic when printed.
func (FailurePanic) String() string { return ginkgoFailurePanic }
// Fail wraps ginkgo.Fail so that it panics with more useful
// information about the failure. This function will panic with a
// FailurePanic.
func Fail(message string, callerSkip ...int) {
skip := 1
if len(callerSkip) > 0 {
skip += callerSkip[0]
}
_, file, line, _ := runtime.Caller(skip)
fp := FailurePanic{
Message: message,
Filename: file,
Line: line,
FullStackTrace: pruneStack(skip),
}
defer func() {
e := recover()
if e != nil {
panic(fp)
}
}()
ginkgo.Fail(message, skip)
}
// ginkgo adds a lot of test running infrastructure to the stack, so
// we filter those out
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo/v2`)
func pruneStack(skip int) string {
skip += 2 // one for pruneStack and one for debug.Stack
stack := debug.Stack()
scanner := bufio.NewScanner(bytes.NewBuffer(stack))
var prunedStack []string
// skip the top of the stack
for i := 0; i < 2*skip+1; i++ {
scanner.Scan()
}
for scanner.Scan() {
if stackSkipPattern.Match(scanner.Bytes()) {
scanner.Scan() // these come in pairs
} else {
prunedStack = append(prunedStack, scanner.Text())
scanner.Scan() // these come in pairs
prunedStack = append(prunedStack, scanner.Text())
}
}
return strings.Join(prunedStack, "\n")
}

View File

@ -29,7 +29,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/clientcmd"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -109,7 +109,7 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri
logFunc("Running kubectl logs on non-ready containers in %v", ns) logFunc("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items { for _, pod := range podList.Items {
if res, err := testutils.PodRunningReady(&pod); !res || err != nil { if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "", e2elog.Logf) kubectlLogPod(c, pod, "", framework.Logf)
} }
} }
} }
@ -144,7 +144,7 @@ func (tk *TestKubeconfig) WriteFileViaContainer(podName, containerName string, p
command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path) command := fmt.Sprintf("echo '%s' > '%s'; sync", contents, path)
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "/bin/sh", "-c", command) stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "/bin/sh", "-c", command)
if err != nil { if err != nil {
e2elog.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr)) framework.Logf("error running kubectl exec to write file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
} }
return err return err
} }
@ -155,7 +155,7 @@ func (tk *TestKubeconfig) ReadFileViaContainer(podName, containerName string, pa
stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "cat", path) stdout, stderr, err := tk.kubectlExecWithRetry(tk.Namespace, podName, containerName, "--", "cat", path)
if err != nil { if err != nil {
e2elog.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr)) framework.Logf("error running kubectl exec to read file: %v\nstdout=%v\nstderr=%v)", err, string(stdout), string(stderr))
} }
return string(stdout), err return string(stdout), err
} }
@ -163,19 +163,19 @@ func (tk *TestKubeconfig) ReadFileViaContainer(podName, containerName string, pa
func (tk *TestKubeconfig) kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) { func (tk *TestKubeconfig) kubectlExecWithRetry(namespace string, podName, containerName string, args ...string) ([]byte, []byte, error) {
for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ { for numRetries := 0; numRetries < maxKubectlExecRetries; numRetries++ {
if numRetries > 0 { if numRetries > 0 {
e2elog.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries) framework.Logf("Retrying kubectl exec (retry count=%v/%v)", numRetries+1, maxKubectlExecRetries)
} }
stdOutBytes, stdErrBytes, err := tk.kubectlExec(namespace, podName, containerName, args...) stdOutBytes, stdErrBytes, err := tk.kubectlExec(namespace, podName, containerName, args...)
if err != nil { if err != nil {
if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") { if strings.Contains(strings.ToLower(string(stdErrBytes)), "i/o timeout") {
// Retry on "i/o timeout" errors // Retry on "i/o timeout" errors
e2elog.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes)) framework.Logf("Warning: kubectl exec encountered i/o timeout.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
continue continue
} }
if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") { if strings.Contains(strings.ToLower(string(stdErrBytes)), "container not found") {
// Retry on "container not found" errors // Retry on "container not found" errors
e2elog.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes)) framework.Logf("Warning: kubectl exec encountered container not found.\nerr=%v\nstdout=%v\nstderr=%v)", err, string(stdOutBytes), string(stdErrBytes))
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
continue continue
} }
@ -200,7 +200,7 @@ func (tk *TestKubeconfig) kubectlExec(namespace string, podName, containerName s
cmd := tk.KubectlCmd(cmdArgs...) cmd := tk.KubectlCmd(cmdArgs...)
cmd.Stdout, cmd.Stderr = &stdout, &stderr cmd.Stdout, cmd.Stderr = &stdout, &stderr
e2elog.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " ")) framework.Logf("Running '%s %s'", cmd.Path, strings.Join(cmdArgs, " "))
err := cmd.Run() err := cmd.Run()
return stdout.Bytes(), stderr.Bytes(), err return stdout.Bytes(), stderr.Bytes(), err
} }

View File

@ -20,13 +20,12 @@ import (
"bytes" "bytes"
"fmt" "fmt"
"regexp" "regexp"
"runtime"
"runtime/debug" "runtime/debug"
"time" "time"
"github.com/onsi/ginkgo/v2" "github.com/onsi/ginkgo/v2"
// TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245) // TODO: Remove the following imports (ref: https://github.com/kubernetes/kubernetes/issues/81245)
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
) )
func nowStamp() string { func nowStamp() string {
@ -48,7 +47,7 @@ func Failf(format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...) msg := fmt.Sprintf(format, args...)
skip := 2 skip := 2
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip)) log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip) fail(nowStamp()+": "+msg, skip)
panic("unreachable") panic("unreachable")
} }
@ -60,7 +59,55 @@ func Fail(msg string, callerSkip ...int) {
skip += callerSkip[0] skip += callerSkip[0]
} }
log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip)) log("FAIL", "%s\n\nFull Stack Trace\n%s", msg, PrunedStack(skip))
e2eginkgowrapper.Fail(nowStamp()+": "+msg, skip) fail(nowStamp()+": "+msg, skip)
}
// FailurePanic is the value that will be panicked from Fail.
type FailurePanic struct {
Message string // The failure message passed to Fail
Filename string // The filename that is the source of the failure
Line int // The line number of the filename that is the source of the failure
FullStackTrace string // A full stack trace starting at the source of the failure
}
const ginkgoFailurePanic = `
Your test failed.
Ginkgo panics to prevent subsequent assertions from running.
Normally Ginkgo rescues this panic so you shouldn't see it.
But, if you make an assertion in a goroutine, Ginkgo can't capture the panic.
To circumvent this, you should call
defer GinkgoRecover()
at the top of the goroutine that caused this panic.
`
// String makes FailurePanic look like the old Ginkgo panic when printed.
func (FailurePanic) String() string { return ginkgoFailurePanic }
// fail wraps ginkgo.Fail so that it panics with more useful
// information about the failure. This function will panic with a
// FailurePanic.
func fail(message string, callerSkip ...int) {
skip := 1
if len(callerSkip) > 0 {
skip += callerSkip[0]
}
_, file, line, _ := runtime.Caller(skip)
fp := FailurePanic{
Message: message,
Filename: file,
Line: line,
FullStackTrace: string(PrunedStack(skip)),
}
defer func() {
e := recover()
if e != nil {
panic(fp)
}
}()
ginkgo.Fail(message, skip)
} }
var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`) var codeFilterRE = regexp.MustCompile(`/github.com/onsi/ginkgo/v2/`)

View File

@ -1,54 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package log will be removed after switching to use core framework log.
// Do not make further changes here!
package log
import (
"fmt"
"time"
"github.com/onsi/ginkgo/v2"
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
)
func nowStamp() string {
return time.Now().Format(time.StampMilli)
}
func log(level string, format string, args ...interface{}) {
fmt.Fprintf(ginkgo.GinkgoWriter, nowStamp()+": "+level+": "+format+"\n", args...)
}
// Logf logs the info.
func Logf(format string, args ...interface{}) {
log("INFO", format, args...)
}
// Failf logs the fail info.
func Failf(format string, args ...interface{}) {
FailfWithOffset(1, format, args...)
}
// FailfWithOffset calls "Fail" and logs the error at "offset" levels above its caller
// (for example, for call chain f -> g -> FailfWithOffset(1, ...) error would be logged for "f").
func FailfWithOffset(offset int, format string, args ...interface{}) {
msg := fmt.Sprintf(format, args...)
log("FAIL", msg)
e2eginkgowrapper.Fail(nowStamp()+": "+msg, 1+offset)
}

View File

@ -22,7 +22,7 @@ import (
"fmt" "fmt"
"k8s.io/component-base/metrics/testutil" "k8s.io/component-base/metrics/testutil"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -94,12 +94,12 @@ func (m *ComponentCollection) PrintHumanReadable() string {
func PrettyPrintJSON(metrics interface{}) string { func PrettyPrintJSON(metrics interface{}) string {
output := &bytes.Buffer{} output := &bytes.Buffer{}
if err := json.NewEncoder(output).Encode(metrics); err != nil { if err := json.NewEncoder(output).Encode(metrics); err != nil {
e2elog.Logf("Error building encoder: %v", err) framework.Logf("Error building encoder: %v", err)
return "" return ""
} }
formatted := &bytes.Buffer{} formatted := &bytes.Buffer{}
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil { if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
e2elog.Logf("Error indenting: %v", err) framework.Logf("Error indenting: %v", err)
return "" return ""
} }
return string(formatted.Bytes()) return string(formatted.Bytes())

View File

@ -29,7 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/component-base/metrics/testutil" "k8s.io/component-base/metrics/testutil"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -226,7 +226,7 @@ func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration
for _, m := range latencyMetrics { for _, m := range latencyMetrics {
if m.Latency > threshold { if m.Latency > threshold {
badMetrics = append(badMetrics, m) badMetrics = append(badMetrics, m)
e2elog.Logf("%+v", m) framework.Logf("%+v", m)
} }
} }
return badMetrics, nil return badMetrics, nil

View File

@ -40,7 +40,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
clientretry "k8s.io/client-go/util/retry" clientretry "k8s.io/client-go/util/retry"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
netutil "k8s.io/utils/net" netutil "k8s.io/utils/net"
) )
@ -128,7 +128,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
conditionType, node.Name, cond.Status == v1.ConditionTrue, taints) conditionType, node.Name, cond.Status == v1.ConditionTrue, taints)
} }
if !silent { if !silent {
e2elog.Logf(msg) framework.Logf(msg)
} }
return false return false
} }
@ -137,7 +137,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
return true return true
} }
if !silent { if !silent {
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v", framework.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message) conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
} }
return false return false
@ -146,7 +146,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
return true return true
} }
if !silent { if !silent {
e2elog.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v", framework.Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message) conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
} }
return false return false
@ -154,7 +154,7 @@ func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionT
} }
if !silent { if !silent {
e2elog.Logf("Couldn't find condition %v on node %v", conditionType, node.Name) framework.Logf("Couldn't find condition %v on node %v", conditionType, node.Name)
} }
return false return false
} }
@ -196,7 +196,7 @@ func Filter(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
func TotalRegistered(c clientset.Interface) (int, error) { func TotalRegistered(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c) nodes, err := waitListSchedulableNodes(c)
if err != nil { if err != nil {
e2elog.Logf("Failed to list nodes: %v", err) framework.Logf("Failed to list nodes: %v", err)
return 0, err return 0, err
} }
return len(nodes.Items), nil return len(nodes.Items), nil
@ -206,7 +206,7 @@ func TotalRegistered(c clientset.Interface) (int, error) {
func TotalReady(c clientset.Interface) (int, error) { func TotalReady(c clientset.Interface) (int, error) {
nodes, err := waitListSchedulableNodes(c) nodes, err := waitListSchedulableNodes(c)
if err != nil { if err != nil {
e2elog.Logf("Failed to list nodes: %v", err) framework.Logf("Failed to list nodes: %v", err)
return 0, err return 0, err
} }
@ -220,7 +220,7 @@ func TotalReady(c clientset.Interface) (int, error) {
// GetExternalIP returns node external IP concatenated with port 22 for ssh // GetExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22 // e.g. 1.2.3.4:22
func GetExternalIP(node *v1.Node) (string, error) { func GetExternalIP(node *v1.Node) (string, error) {
e2elog.Logf("Getting external IP address for %s", node.Name) framework.Logf("Getting external IP address for %s", node.Name)
host := "" host := ""
for _, a := range node.Status.Addresses { for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP && a.Address != "" { if a.Type == v1.NodeExternalIP && a.Address != "" {
@ -628,7 +628,7 @@ func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName str
"app": appName + "-pod", "app": appName + "-pod",
} }
for i, node := range nodes.Items { for i, node := range nodes.Items {
e2elog.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName) framework.Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{ _, err := c.CoreV1().Pods(namespace).Create(context.TODO(), &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i), Name: fmt.Sprintf(appName+"-pod-%v", i),
@ -884,7 +884,7 @@ func verifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Tai
// TODO use wrapper methods in expect.go after removing core e2e dependency on node // TODO use wrapper methods in expect.go after removing core e2e dependency on node
gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred()) gomega.ExpectWithOffset(2, err).NotTo(gomega.HaveOccurred())
if taintExists(nodeUpdated.Spec.Taints, taint) { if taintExists(nodeUpdated.Spec.Taints, taint) {
e2elog.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName) framework.Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
} }
} }

View File

@ -27,7 +27,7 @@ import (
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
) )
const sleepTime = 20 * time.Second const sleepTime = 20 * time.Second
@ -47,7 +47,7 @@ func WaitForReadyNodes(c clientset.Interface, size int, timeout time.Duration) e
// WaitForTotalHealthy checks whether all registered nodes are ready and all required Pods are running on them. // WaitForTotalHealthy checks whether all registered nodes are ready and all required Pods are running on them.
func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error { func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
e2elog.Logf("Waiting up to %v for all nodes to be ready", timeout) framework.Logf("Waiting up to %v for all nodes to be ready", timeout)
var notReady []v1.Node var notReady []v1.Node
var missingPodsPerNode map[string][]string var missingPodsPerNode map[string][]string
@ -115,11 +115,11 @@ func WaitForTotalHealthy(c clientset.Interface, timeout time.Duration) error {
// is ConditionTrue; if it's false, it ensures the node condition is in any state // is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown). // other than ConditionTrue (e.g. not true or unknown).
func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool { func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
e2elog.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue) framework.Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Logf("Couldn't get node %s", name) framework.Logf("Couldn't get node %s", name)
continue continue
} }
@ -127,7 +127,7 @@ func WaitConditionToBe(c clientset.Interface, name string, conditionType v1.Node
return true return true
} }
} }
e2elog.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout) framework.Logf("Node %s didn't reach desired %s condition status (%t) within %v", name, conditionType, wantTrue, timeout)
return false return false
} }
@ -149,7 +149,7 @@ func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.No
for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(sleepTime) {
nodes, err := waitListSchedulableNodes(c) nodes, err := waitListSchedulableNodes(c)
if err != nil { if err != nil {
e2elog.Logf("Failed to list nodes: %v", err) framework.Logf("Failed to list nodes: %v", err)
continue continue
} }
numNodes := len(nodes.Items) numNodes := len(nodes.Items)
@ -163,10 +163,10 @@ func CheckReady(c clientset.Interface, size int, timeout time.Duration) ([]v1.No
numReady := len(nodes.Items) numReady := len(nodes.Items)
if numNodes == size && numReady == size { if numNodes == size && numReady == size {
e2elog.Logf("Cluster has reached the desired number of ready nodes %d", size) framework.Logf("Cluster has reached the desired number of ready nodes %d", size)
return nodes.Items, nil return nodes.Items, nil
} }
e2elog.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady) framework.Logf("Waiting for ready nodes %d, current ready %d, not ready nodes %d", size, numReady, numNodes-numReady)
} }
return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size) return nil, fmt.Errorf("timeout waiting %v for number of ready nodes to be %d", timeout, size)
} }
@ -215,7 +215,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts) allNodes, err := c.CoreV1().Nodes().List(context.TODO(), opts)
if err != nil { if err != nil {
var terminalListNodesErr error var terminalListNodesErr error
e2elog.Logf("Unexpected error listing nodes: %v", err) framework.Logf("Unexpected error listing nodes: %v", err)
if attempt >= 3 { if attempt >= 3 {
terminalListNodesErr = err terminalListNodesErr = err
} }
@ -236,9 +236,9 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
if len(nodesNotReadyYet) > 0 { if len(nodesNotReadyYet) > 0 {
// In large clusters, log them only every 10th pass. // In large clusters, log them only every 10th pass.
if len(nodesNotReadyYet) < largeClusterThreshold || attempt%10 == 0 { if len(nodesNotReadyYet) < largeClusterThreshold || attempt%10 == 0 {
e2elog.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes) framework.Logf("Unschedulable nodes= %v, maximum value for starting tests= %v", len(nodesNotReadyYet), allowedNotReadyNodes)
for _, node := range nodesNotReadyYet { for _, node := range nodesNotReadyYet {
e2elog.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]", framework.Logf(" -> Node %s [[[ Ready=%t, Network(available)=%t, Taints=%v, NonblockingTaints=%v ]]]",
node.Name, node.Name,
IsConditionSetAsExpectedSilent(&node, v1.NodeReady, true), IsConditionSetAsExpectedSilent(&node, v1.NodeReady, true),
IsConditionSetAsExpectedSilent(&node, v1.NodeNetworkUnavailable, false), IsConditionSetAsExpectedSilent(&node, v1.NodeNetworkUnavailable, false),
@ -250,7 +250,7 @@ func CheckReadyForTests(c clientset.Interface, nonblockingTaints string, allowed
if len(nodesNotReadyYet) > allowedNotReadyNodes { if len(nodesNotReadyYet) > allowedNotReadyNodes {
ready := len(allNodes.Items) - len(nodesNotReadyYet) ready := len(allNodes.Items) - len(nodesNotReadyYet)
remaining := len(nodesNotReadyYet) - allowedNotReadyNodes remaining := len(nodesNotReadyYet) - allowedNotReadyNodes
e2elog.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining) framework.Logf("==== node wait: %v out of %v nodes are ready, max notReady allowed %v. Need %v more before starting.", ready, len(allNodes.Items), allowedNotReadyNodes, remaining)
} }
} }
} }

View File

@ -27,7 +27,7 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -59,7 +59,7 @@ func DeletePodWithWait(c clientset.Interface, pod *v1.Pod) error {
// DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod // DeletePodWithWaitByName deletes the named and namespaced pod and waits for the pod to be terminated. Resilient to the pod
// not existing. // not existing.
func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error { func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string) error {
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace) framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, metav1.DeleteOptions{}) err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, metav1.DeleteOptions{})
if err != nil { if err != nil {
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {
@ -67,7 +67,7 @@ func DeletePodWithWaitByName(c clientset.Interface, podName, podNamespace string
} }
return fmt.Errorf("pod Delete API error: %v", err) return fmt.Errorf("pod Delete API error: %v", err)
} }
e2elog.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName) framework.Logf("Wait up to %v for pod %q to be fully deleted", PodDeleteTimeout, podName)
err = WaitForPodNotFoundInNamespace(c, podName, podNamespace, PodDeleteTimeout) err = WaitForPodNotFoundInNamespace(c, podName, podNamespace, PodDeleteTimeout)
if err != nil { if err != nil {
return fmt.Errorf("pod %q was not deleted: %v", podName, err) return fmt.Errorf("pod %q was not deleted: %v", podName, err)
@ -92,7 +92,7 @@ func DeletePodsWithGracePeriod(c clientset.Interface, pods []v1.Pod, grace int64
// DeletePodWithGracePeriodByName deletes a pod by name and namespace. Resilient to the pod not existing. // DeletePodWithGracePeriodByName deletes a pod by name and namespace. Resilient to the pod not existing.
func DeletePodWithGracePeriodByName(c clientset.Interface, podName, podNamespace string, grace int64) error { func DeletePodWithGracePeriodByName(c clientset.Interface, podName, podNamespace string, grace int64) error {
e2elog.Logf("Deleting pod %q in namespace %q", podName, podNamespace) framework.Logf("Deleting pod %q in namespace %q", podName, podNamespace)
err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(grace)) err := c.CoreV1().Pods(podNamespace).Delete(context.TODO(), podName, *metav1.NewDeleteOptions(grace))
if err != nil { if err != nil {
if apierrors.IsNotFound(err) { if apierrors.IsNotFound(err) {

View File

@ -35,7 +35,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog/v2" "k8s.io/klog/v2"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -64,7 +64,7 @@ func expectNoError(err error, explain ...interface{}) {
// (for example, for call chain f -> g -> expectNoErrorWithOffset(1, ...) error would be logged for "f"). // (for example, for call chain f -> g -> expectNoErrorWithOffset(1, ...) error would be logged for "f").
func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) { func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
if err != nil { if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err) framework.Logf("Unexpected error occurred: %v", err)
} }
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...) gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
} }
@ -122,10 +122,10 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
if ctx.Err() != nil { if ctx.Err() != nil {
// We may encounter errors here because of a race between the pod readiness and apiserver // We may encounter errors here because of a race between the pod readiness and apiserver
// proxy. So, we log the error and retry if this occurs. // proxy. So, we log the error and retry if this occurs.
e2elog.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status) framework.Logf("Controller %s: Failed to Get from replica %d [%s]: %v\n pod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
return false, nil return false, nil
} }
e2elog.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status) framework.Logf("Controller %s: Failed to GET from replica %d [%s]: %v\npod status: %#v", r.controllerName, i+1, pod.Name, err, pod.Status)
continue continue
} }
// The response checker expects the pod's name unless !respondName, in // The response checker expects the pod's name unless !respondName, in
@ -136,20 +136,20 @@ func (r ProxyResponseChecker) CheckAllResponses() (done bool, err error) {
what = "expected" what = "expected"
want := pod.Name want := pod.Name
if got != want { if got != want {
e2elog.Logf("Controller %s: Replica %d [%s] expected response %q but got %q", framework.Logf("Controller %s: Replica %d [%s] expected response %q but got %q",
r.controllerName, i+1, pod.Name, want, got) r.controllerName, i+1, pod.Name, want, got)
continue continue
} }
} else { } else {
what = "non-empty" what = "non-empty"
if len(got) == 0 { if len(got) == 0 {
e2elog.Logf("Controller %s: Replica %d [%s] expected non-empty response", framework.Logf("Controller %s: Replica %d [%s] expected non-empty response",
r.controllerName, i+1, pod.Name) r.controllerName, i+1, pod.Name)
continue continue
} }
} }
successes++ successes++
e2elog.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far", framework.Logf("Controller %s: Got %s result from replica %d [%s]: %q, %d of %d required successes so far",
r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items)) r.controllerName, what, i+1, pod.Name, got, successes, len(r.pods.Items))
} }
if successes < len(r.pods.Items) { if successes < len(r.pods.Items) {
@ -183,7 +183,7 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32,
} }
created = append(created, pod) created = append(created, pod)
} }
e2elog.Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas) framework.Logf("Pod name %s: Found %d pods out of %d", name, len(created), replicas)
if int32(len(created)) == replicas { if int32(len(created)) == replicas {
pods.Items = created pods.Items = created
@ -267,17 +267,17 @@ func LogPodStates(pods []v1.Pod) {
maxGraceW++ maxGraceW++
// Log pod info. * does space padding, - makes them left-aligned. // Log pod info. * does space padding, - makes them left-aligned.
e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s", framework.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS") maxPodW, "POD", maxNodeW, "NODE", maxPhaseW, "PHASE", maxGraceW, "GRACE", "CONDITIONS")
for _, pod := range pods { for _, pod := range pods {
grace := "" grace := ""
if pod.DeletionGracePeriodSeconds != nil { if pod.DeletionGracePeriodSeconds != nil {
grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds) grace = fmt.Sprintf("%ds", *pod.DeletionGracePeriodSeconds)
} }
e2elog.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s", framework.Logf("%-[1]*[2]s %-[3]*[4]s %-[5]*[6]s %-[7]*[8]s %[9]s",
maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions) maxPodW, pod.ObjectMeta.Name, maxNodeW, pod.Spec.NodeName, maxPhaseW, pod.Status.Phase, maxGraceW, grace, pod.Status.Conditions)
} }
e2elog.Logf("") // Final empty line helps for readability. framework.Logf("") // Final empty line helps for readability.
} }
// logPodTerminationMessages logs termination messages for failing pods. It's a short snippet (much smaller than full logs), but it often shows // logPodTerminationMessages logs termination messages for failing pods. It's a short snippet (much smaller than full logs), but it often shows
@ -286,12 +286,12 @@ func logPodTerminationMessages(pods []v1.Pod) {
for _, pod := range pods { for _, pod := range pods {
for _, status := range pod.Status.InitContainerStatuses { for _, status := range pod.Status.InitContainerStatuses {
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 { if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
e2elog.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message) framework.Logf("%s[%s].initContainer[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
} }
} }
for _, status := range pod.Status.ContainerStatuses { for _, status := range pod.Status.ContainerStatuses {
if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 { if status.LastTerminationState.Terminated != nil && len(status.LastTerminationState.Terminated.Message) > 0 {
e2elog.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message) framework.Logf("%s[%s].container[%s]=%s", pod.Name, pod.Namespace, status.Name, status.LastTerminationState.Terminated.Message)
} }
} }
} }
@ -330,21 +330,21 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
logs, err := getPodLogsInternal(c, namespace, pod.Name, container.Name, false, nil, &tailLen) logs, err := getPodLogsInternal(c, namespace, pod.Name, container.Name, false, nil, &tailLen)
if err != nil { if err != nil {
e2elog.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err) framework.Logf("Unable to fetch %s/%s/%s logs: %v", pod.Namespace, pod.Name, container.Name, err)
continue continue
} }
logDir := filepath.Join(reportDir, namespace, pod.Name, container.Name) logDir := filepath.Join(reportDir, namespace, pod.Name, container.Name)
err = os.MkdirAll(logDir, 0755) err = os.MkdirAll(logDir, 0755)
if err != nil { if err != nil {
e2elog.Logf("Unable to create path '%s'. Err: %v", logDir, err) framework.Logf("Unable to create path '%s'. Err: %v", logDir, err)
continue continue
} }
logPath := filepath.Join(logDir, "logs.txt") logPath := filepath.Join(logDir, "logs.txt")
err = os.WriteFile(logPath, []byte(logs), 0644) err = os.WriteFile(logPath, []byte(logs), 0644)
if err != nil { if err != nil {
e2elog.Logf("Could not write the container logs in: %s. Err: %v", logPath, err) framework.Logf("Could not write the container logs in: %s. Err: %v", logPath, err)
} }
} }
} }
@ -354,7 +354,7 @@ func logPodLogs(c clientset.Interface, namespace string, pods []v1.Pod, reportDi
func DumpAllPodInfoForNamespace(c clientset.Interface, namespace, reportDir string) { func DumpAllPodInfoForNamespace(c clientset.Interface, namespace, reportDir string) {
pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{}) pods, err := c.CoreV1().Pods(namespace).List(context.TODO(), metav1.ListOptions{})
if err != nil { if err != nil {
e2elog.Logf("unable to fetch pod debug info: %v", err) framework.Logf("unable to fetch pod debug info: %v", err)
} }
LogPodStates(pods.Items) LogPodStates(pods.Items)
logPodTerminationMessages(pods.Items) logPodTerminationMessages(pods.Items)
@ -443,7 +443,7 @@ func newExecPodSpec(ns, generateName string) *v1.Pod {
// CreateExecPodOrFail creates a agnhost pause pod used as a vessel for kubectl exec commands. // CreateExecPodOrFail creates a agnhost pause pod used as a vessel for kubectl exec commands.
// Pod name is uniquely generated. // Pod name is uniquely generated.
func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) *v1.Pod { func CreateExecPodOrFail(client clientset.Interface, ns, generateName string, tweak func(*v1.Pod)) *v1.Pod {
e2elog.Logf("Creating new exec pod") framework.Logf("Creating new exec pod")
pod := newExecPodSpec(ns, generateName) pod := newExecPodSpec(ns, generateName)
if tweak != nil { if tweak != nil {
tweak(pod) tweak(pod)
@ -495,7 +495,7 @@ func CheckPodsRunningReadyOrSucceeded(c clientset.Interface, ns string, podNames
// in namespace ns are in the condition, using c and waiting at most timeout. // in namespace ns are in the condition, using c and waiting at most timeout.
func checkPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool { func checkPodsCondition(c clientset.Interface, ns string, podNames []string, timeout time.Duration, condition podCondition, desc string) bool {
np := len(podNames) np := len(podNames)
e2elog.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames) framework.Logf("Waiting up to %v for %d pods to be %s: %s", timeout, np, desc, podNames)
type waitPodResult struct { type waitPodResult struct {
success bool success bool
podName string podName string
@ -513,11 +513,11 @@ func checkPodsCondition(c clientset.Interface, ns string, podNames []string, tim
for range podNames { for range podNames {
res := <-result res := <-result
if !res.success { if !res.success {
e2elog.Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc) framework.Logf("Pod %[1]s failed to be %[2]s.", res.podName, desc)
success = false success = false
} }
} }
e2elog.Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames) framework.Logf("Wanted all %d pods to be %s. Result: %t. Pods: %v", np, desc, success, podNames)
return success return success
} }
@ -600,7 +600,7 @@ func GetPodSecretUpdateTimeout(c clientset.Interface) time.Duration {
// secret(configmap) that's based on cluster size + additional time as a fudge factor. // secret(configmap) that's based on cluster size + additional time as a fudge factor.
secretTTL, err := getNodeTTLAnnotationValue(c) secretTTL, err := getNodeTTLAnnotationValue(c)
if err != nil { if err != nil {
e2elog.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err) framework.Logf("Couldn't get node TTL annotation (using default value of 0): %v", err)
} }
podLogTimeout := 240*time.Second + secretTTL podLogTimeout := 240*time.Second + secretTTL
return podLogTimeout return podLogTimeout

View File

@ -33,7 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubectl/pkg/util/podutils" "k8s.io/kubectl/pkg/util/podutils"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
@ -186,7 +186,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
ignoreSelector := labels.SelectorFromSet(map[string]string{}) ignoreSelector := labels.SelectorFromSet(map[string]string{})
start := time.Now() start := time.Now()
e2elog.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready", framework.Logf("Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready",
timeout, minPods, ns) timeout, minPods, ns)
var ignoreNotReady bool var ignoreNotReady bool
badPods := []v1.Pod{} badPods := []v1.Pod{}
@ -241,25 +241,25 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
case res && err == nil: case res && err == nil:
nOk++ nOk++
case pod.Status.Phase == v1.PodSucceeded: case pod.Status.Phase == v1.PodSucceeded:
e2elog.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name) framework.Logf("The status of Pod %s is Succeeded, skipping waiting", pod.ObjectMeta.Name)
// it doesn't make sense to wait for this pod // it doesn't make sense to wait for this pod
continue continue
case pod.Status.Phase != v1.PodFailed: case pod.Status.Phase != v1.PodFailed:
e2elog.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase) framework.Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
notReady++ notReady++
badPods = append(badPods, pod) badPods = append(badPods, pod)
default: default:
if metav1.GetControllerOf(&pod) == nil { if metav1.GetControllerOf(&pod) == nil {
e2elog.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name) framework.Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
badPods = append(badPods, pod) badPods = append(badPods, pod)
} }
//ignore failed pods that are controlled by some controller //ignore failed pods that are controlled by some controller
} }
} }
e2elog.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)", framework.Logf("%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)",
nOk, len(podList.Items), ns, int(time.Since(start).Seconds())) nOk, len(podList.Items), ns, int(time.Since(start).Seconds()))
e2elog.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk) framework.Logf("expected %d pod replicas in namespace '%s', %d are Running and Ready.", replicas, ns, replicaOk)
if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 { if replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {
return true, nil return true, nil
@ -271,7 +271,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
if !ignoreNotReady { if !ignoreNotReady {
return errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout, lastAPIError) return errorBadPodsStates(badPods, desiredPods, ns, "RUNNING and READY", timeout, lastAPIError)
} }
e2elog.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods) framework.Logf("Number of not-ready pods (%d) is below the allowed threshold (%d).", notReady, allowedNotReadyPods)
} }
return nil return nil
} }
@ -280,7 +280,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedN
// If the condition callback returns an error that matches FinalErr (checked with IsFinal), // If the condition callback returns an error that matches FinalErr (checked with IsFinal),
// then polling aborts early. // then polling aborts early.
func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error { func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc string, timeout time.Duration, condition podCondition) error {
e2elog.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, conditionDesc) framework.Logf("Waiting up to %v for pod %q in namespace %q to be %q", timeout, podName, ns, conditionDesc)
var ( var (
lastPodError error lastPodError error
lastPod *v1.Pod lastPod *v1.Pod
@ -295,15 +295,15 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc strin
lastPod = pod // Don't overwrite if an error occurs after successfully retrieving. lastPod = pod // Don't overwrite if an error occurs after successfully retrieving.
// log now so that current pod info is reported before calling `condition()` // log now so that current pod info is reported before calling `condition()`
e2elog.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v", framework.Logf("Pod %q: Phase=%q, Reason=%q, readiness=%t. Elapsed: %v",
podName, pod.Status.Phase, pod.Status.Reason, podutils.IsPodReady(pod), time.Since(start)) podName, pod.Status.Phase, pod.Status.Reason, podutils.IsPodReady(pod), time.Since(start))
if done, err := condition(pod); done { if done, err := condition(pod); done {
if err == nil { if err == nil {
e2elog.Logf("Pod %q satisfied condition %q", podName, conditionDesc) framework.Logf("Pod %q satisfied condition %q", podName, conditionDesc)
} }
return true, err return true, err
} else if err != nil { } else if err != nil {
e2elog.Logf("Error evaluating pod condition %s: %v", conditionDesc, err) framework.Logf("Error evaluating pod condition %s: %v", conditionDesc, err)
if IsFinal(err) { if IsFinal(err) {
return false, err return false, err
} }
@ -329,7 +329,7 @@ func WaitForPodCondition(c clientset.Interface, ns, podName, conditionDesc strin
// WaitForPodsCondition waits for the listed pods to match the given condition. // WaitForPodsCondition waits for the listed pods to match the given condition.
// To succeed, at least minPods must be listed, and all listed pods must match the condition. // To succeed, at least minPods must be listed, and all listed pods must match the condition.
func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListOptions, minPods int, conditionDesc string, timeout time.Duration, condition podCondition) (*v1.PodList, error) { func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListOptions, minPods int, conditionDesc string, timeout time.Duration, condition podCondition) (*v1.PodList, error) {
e2elog.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc) framework.Logf("Waiting up to %v for at least %d pods in namespace %s to be %s", timeout, minPods, ns, conditionDesc)
var pods *v1.PodList var pods *v1.PodList
matched := 0 matched := 0
err := wait.PollImmediate(poll, timeout, func() (done bool, err error) { err := wait.PollImmediate(poll, timeout, func() (done bool, err error) {
@ -338,7 +338,7 @@ func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListO
return handleWaitingAPIError(err, true, "listing pods") return handleWaitingAPIError(err, true, "listing pods")
} }
if len(pods.Items) < minPods { if len(pods.Items) < minPods {
e2elog.Logf("found %d pods, waiting for at least %d", len(pods.Items), minPods) framework.Logf("found %d pods, waiting for at least %d", len(pods.Items), minPods)
return false, nil return false, nil
} }
@ -356,7 +356,7 @@ func WaitForAllPodsCondition(c clientset.Interface, ns string, opts metav1.ListO
if len(nonMatchingPods) <= 0 { if len(nonMatchingPods) <= 0 {
return true, nil // All pods match. return true, nil // All pods match.
} }
e2elog.Logf("%d pods are not %s: %v", len(nonMatchingPods), conditionDesc, nonMatchingPods) framework.Logf("%d pods are not %s: %v", len(nonMatchingPods), conditionDesc, nonMatchingPods)
return false, nil return false, nil
}) })
return pods, maybeTimeoutError(err, "waiting for at least %d pods to be %s (matched %d)", minPods, conditionDesc, matched) return pods, maybeTimeoutError(err, "waiting for at least %d pods to be %s (matched %d)", minPods, conditionDesc, matched)
@ -481,16 +481,16 @@ func WaitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace
return WaitForPodCondition(c, namespace, podName, "running and ready", timeout, func(pod *v1.Pod) (bool, error) { return WaitForPodCondition(c, namespace, podName, "running and ready", timeout, func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase { switch pod.Status.Phase {
case v1.PodFailed: case v1.PodFailed:
e2elog.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status) framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
return false, errPodFailed return false, errPodFailed
case v1.PodSucceeded: case v1.PodSucceeded:
e2elog.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status) framework.Logf("The phase of Pod %s is %s which is unexpected, pod status: %#v", pod.Name, pod.Status.Phase, pod.Status)
return false, errPodCompleted return false, errPodCompleted
case v1.PodRunning: case v1.PodRunning:
e2elog.Logf("The phase of Pod %s is %s (Ready = %v)", pod.Name, pod.Status.Phase, podutils.IsPodReady(pod)) framework.Logf("The phase of Pod %s is %s (Ready = %v)", pod.Name, pod.Status.Phase, podutils.IsPodReady(pod))
return podutils.IsPodReady(pod), nil return podutils.IsPodReady(pod), nil
} }
e2elog.Logf("The phase of Pod %s is %s, waiting for it to be Running (with Ready = true)", pod.Name, pod.Status.Phase) framework.Logf("The phase of Pod %s is %s, waiting for it to be Running (with Ready = true)", pod.Name, pod.Status.Phase)
return false, nil return false, nil
}) })
} }
@ -551,7 +551,7 @@ func WaitForPodNotFoundInNamespace(c clientset.Interface, podName, ns string, ti
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error { func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
var lastPod *v1.Pod var lastPod *v1.Pod
err := wait.PollImmediate(interval, timeout, func() (bool, error) { err := wait.PollImmediate(interval, timeout, func() (bool, error) {
e2elog.Logf("Waiting for pod %s to disappear", podName) framework.Logf("Waiting for pod %s to disappear", podName)
options := metav1.ListOptions{LabelSelector: label.String()} options := metav1.ListOptions{LabelSelector: label.String()}
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options) pods, err := c.CoreV1().Pods(ns).List(context.TODO(), options)
if err != nil { if err != nil {
@ -560,14 +560,14 @@ func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labe
found := false found := false
for i, pod := range pods.Items { for i, pod := range pods.Items {
if pod.Name == podName { if pod.Name == podName {
e2elog.Logf("Pod %s still exists", podName) framework.Logf("Pod %s still exists", podName)
found = true found = true
lastPod = &(pods.Items[i]) lastPod = &(pods.Items[i])
break break
} }
} }
if !found { if !found {
e2elog.Logf("Pod %s no longer exists", podName) framework.Logf("Pod %s no longer exists", podName)
return true, nil return true, nil
} }
return false, nil return false, nil
@ -644,7 +644,7 @@ func WaitForNRestartablePods(ps *testutils.PodStore, expect int, timeout time.Du
pods = FilterNonRestartablePods(allPods) pods = FilterNonRestartablePods(allPods)
if len(pods) != expect { if len(pods) != expect {
errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods)) errLast = fmt.Errorf("expected to find %d pods but found only %d", expect, len(pods))
e2elog.Logf("Error getting pods: %v", errLast) framework.Logf("Error getting pods: %v", errLast)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -734,17 +734,17 @@ func WaitForContainerRunning(c clientset.Interface, namespace, podName, containe
func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) { func handleWaitingAPIError(err error, retryNotFound bool, taskFormat string, taskArgs ...interface{}) (bool, error) {
taskDescription := fmt.Sprintf(taskFormat, taskArgs...) taskDescription := fmt.Sprintf(taskFormat, taskArgs...)
if retryNotFound && apierrors.IsNotFound(err) { if retryNotFound && apierrors.IsNotFound(err) {
e2elog.Logf("Ignoring NotFound error while " + taskDescription) framework.Logf("Ignoring NotFound error while " + taskDescription)
return false, nil return false, nil
} }
if retry, delay := shouldRetry(err); retry { if retry, delay := shouldRetry(err); retry {
e2elog.Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err) framework.Logf("Retryable error while %s, retrying after %v: %v", taskDescription, delay, err)
if delay > 0 { if delay > 0 {
time.Sleep(delay) time.Sleep(delay)
} }
return false, nil return false, nil
} }
e2elog.Logf("Encountered non-retryable error while %s: %v", taskDescription, err) framework.Logf("Encountered non-retryable error while %s: %v", taskDescription, err)
return false, err return false, err
} }

View File

@ -35,7 +35,7 @@ import (
"k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -125,7 +125,7 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
hosts := nodeAddresses(nodelist, v1.NodeExternalIP) hosts := nodeAddresses(nodelist, v1.NodeExternalIP)
// If ExternalIPs aren't available for all nodes, try falling back to the InternalIPs. // If ExternalIPs aren't available for all nodes, try falling back to the InternalIPs.
if len(hosts) < len(nodelist.Items) { if len(hosts) < len(nodelist.Items) {
e2elog.Logf("No external IP address on nodes, falling back to internal IPs") framework.Logf("No external IP address on nodes, falling back to internal IPs")
hosts = nodeAddresses(nodelist, v1.NodeInternalIP) hosts = nodeAddresses(nodelist, v1.NodeInternalIP)
} }
@ -146,12 +146,12 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
go func(host string) { go func(host string) {
defer wg.Done() defer wg.Done()
if canConnect(host) { if canConnect(host) {
e2elog.Logf("Assuming SSH on host %s", host) framework.Logf("Assuming SSH on host %s", host)
sshHostsLock.Lock() sshHostsLock.Lock()
sshHosts = append(sshHosts, net.JoinHostPort(host, SSHPort)) sshHosts = append(sshHosts, net.JoinHostPort(host, SSHPort))
sshHostsLock.Unlock() sshHostsLock.Unlock()
} else { } else {
e2elog.Logf("Skipping host %s because it does not run anything on port %s", host, SSHPort) framework.Logf("Skipping host %s because it does not run anything on port %s", host, SSHPort)
} }
}(host) }(host)
} }
@ -168,7 +168,7 @@ func canConnect(host string) bool {
hostPort := net.JoinHostPort(host, SSHPort) hostPort := net.JoinHostPort(host, SSHPort)
conn, err := net.DialTimeout("tcp", hostPort, 3*time.Second) conn, err := net.DialTimeout("tcp", hostPort, 3*time.Second)
if err != nil { if err != nil {
e2elog.Logf("cannot dial %s: %v", hostPort, err) framework.Logf("cannot dial %s: %v", hostPort, err)
return false return false
} }
conn.Close() conn.Close()
@ -352,15 +352,15 @@ func runSSHCommandViaBastion(cmd, user, bastion, host string, signer ssh.Signer)
// LogResult records result log // LogResult records result log
func LogResult(result Result) { func LogResult(result Result) {
remote := fmt.Sprintf("%s@%s", result.User, result.Host) remote := fmt.Sprintf("%s@%s", result.User, result.Host)
e2elog.Logf("ssh %s: command: %s", remote, result.Cmd) framework.Logf("ssh %s: command: %s", remote, result.Cmd)
e2elog.Logf("ssh %s: stdout: %q", remote, result.Stdout) framework.Logf("ssh %s: stdout: %q", remote, result.Stdout)
e2elog.Logf("ssh %s: stderr: %q", remote, result.Stderr) framework.Logf("ssh %s: stderr: %q", remote, result.Stderr)
e2elog.Logf("ssh %s: exit code: %d", remote, result.Code) framework.Logf("ssh %s: exit code: %d", remote, result.Code)
} }
// IssueSSHCommandWithResult tries to execute a SSH command and returns the execution result // IssueSSHCommandWithResult tries to execute a SSH command and returns the execution result
func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, error) { func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, error) {
e2elog.Logf("Getting external IP address for %s", node.Name) framework.Logf("Getting external IP address for %s", node.Name)
host := "" host := ""
for _, a := range node.Status.Addresses { for _, a := range node.Status.Addresses {
if a.Type == v1.NodeExternalIP && a.Address != "" { if a.Type == v1.NodeExternalIP && a.Address != "" {
@ -383,7 +383,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*Result, er
return nil, fmt.Errorf("couldn't find any IP address for node %s", node.Name) return nil, fmt.Errorf("couldn't find any IP address for node %s", node.Name)
} }
e2elog.Logf("SSH %q on %s(%s)", cmd, node.Name, host) framework.Logf("SSH %q on %s(%s)", cmd, node.Name, host)
result, err := SSH(cmd, host, provider) result, err := SSH(cmd, host, provider)
LogResult(result) LogResult(result)
@ -454,7 +454,7 @@ func expectNoError(err error, explain ...interface{}) {
// (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f"). // (for example, for call chain f -> g -> ExpectNoErrorWithOffset(1, ...) error would be logged for "f").
func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) { func expectNoErrorWithOffset(offset int, err error, explain ...interface{}) {
if err != nil { if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err) framework.Logf("Unexpected error occurred: %v", err)
} }
gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...) gomega.ExpectWithOffset(1+offset, err).NotTo(gomega.HaveOccurred(), explain...)
} }

View File

@ -20,13 +20,13 @@ import (
"time" "time"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/framework"
e2etodokubectl "k8s.io/kubernetes/test/e2e/framework/todo/kubectl" e2etodokubectl "k8s.io/kubernetes/test/e2e/framework/todo/kubectl"
) )
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod. // WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
func WaitForSSHTunnels(namespace string) { func WaitForSSHTunnels(namespace string) {
e2elog.Logf("Waiting for SSH tunnels to establish") framework.Logf("Waiting for SSH tunnels to establish")
e2etodokubectl.RunKubectl(namespace, "run", "ssh-tunnel-test", e2etodokubectl.RunKubectl(namespace, "run", "ssh-tunnel-test",
"--image=busybox", "--image=busybox",
"--restart=Never", "--restart=Never",

View File

@ -34,7 +34,6 @@ import (
klog "k8s.io/klog/v2" klog "k8s.io/klog/v2"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2econfig "k8s.io/kubernetes/test/e2e/framework/config" e2econfig "k8s.io/kubernetes/test/e2e/framework/config"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
@ -162,7 +161,7 @@ func (t testDriverParameter) Set(filename string) error {
// to define the tests. // to define the tests.
func AddDriverDefinition(filename string) error { func AddDriverDefinition(filename string) error {
driver, err := loadDriverDefinition(filename) driver, err := loadDriverDefinition(filename)
e2elog.Logf("Driver loaded from path [%s]: %+v", filename, driver) framework.Logf("Driver loaded from path [%s]: %+v", filename, driver)
if err != nil { if err != nil {
return err return err
} }

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/test/e2e/chaosmonkey" "k8s.io/kubernetes/test/e2e/chaosmonkey"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2eginkgowrapper "k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper" e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/utils/junit" "k8s.io/kubernetes/test/utils/junit"
admissionapi "k8s.io/pod-security-admission/api" admissionapi "k8s.io/pod-security-admission/api"
@ -75,7 +74,7 @@ func FinalizeUpgradeTest(start time.Time, tc *junit.TestCase) {
} }
switch r := r.(type) { switch r := r.(type) {
case e2eginkgowrapper.FailurePanic: case framework.FailurePanic:
tc.Failures = []*junit.Failure{ tc.Failures = []*junit.Failure{
{ {
Message: r.Message, Message: r.Message,