clean up test code

This commit is contained in:
carlory 2019-08-27 17:18:43 +08:00
parent c4c64673d7
commit d1290ffdef
41 changed files with 216 additions and 260 deletions

View File

@ -31,7 +31,6 @@ import (
commonutils "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/auth"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
@ -83,14 +82,14 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
pod, err := c.CoreV1().Pods(ns).Get(podName, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
stat := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
e2elog.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
if stat.RestartCount > 0 {
e2elog.Logf("Saw %v restart, succeeded...", podName)
framework.Logf("Saw %v restart, succeeded...", podName)
wg.Done()
return
}
}
e2elog.Logf("Failed waiting for %v restart! ", podName)
framework.Logf("Failed waiting for %v restart! ", podName)
passed = false
wg.Done()
}
@ -106,7 +105,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
}
wg.Wait()
if !passed {
e2elog.Failf("At least one liveness example failed. See the logs above.")
framework.Failf("At least one liveness example failed. See the logs above.")
}
})
})

View File

@ -24,7 +24,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
)
@ -38,23 +37,23 @@ var _ = framework.KubeDescribe("GKE local SSD [Feature:GKELocalSSD]", func() {
})
ginkgo.It("should write and read from node local SSD [Feature:GKELocalSSD]", func() {
e2elog.Logf("Start local SSD test")
framework.Logf("Start local SSD test")
createNodePoolWithLocalSsds("np-ssd")
doTestWriteAndReadToLocalSsd(f)
})
})
func createNodePoolWithLocalSsds(nodePoolName string) {
e2elog.Logf("Create node pool: %s with local SSDs in cluster: %s ",
framework.Logf("Create node pool: %s with local SSDs in cluster: %s ",
nodePoolName, framework.TestContext.CloudConfig.Cluster)
out, err := exec.Command("gcloud", "alpha", "container", "node-pools", "create",
nodePoolName,
fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster),
"--local-ssd-count=1").CombinedOutput()
if err != nil {
e2elog.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out))
framework.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out))
}
e2elog.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out))
framework.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out))
}
func doTestWriteAndReadToLocalSsd(f *framework.Framework) {

View File

@ -42,7 +42,6 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/perf:go_default_library",

View File

@ -19,7 +19,6 @@ package node
import (
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
)
@ -36,7 +35,7 @@ var _ = SIGDescribe("AppArmor", func() {
if !ginkgo.CurrentGinkgoTestDescription().Failed {
return
}
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
})
ginkgo.It("should enforce an AppArmor profile", func() {

View File

@ -21,7 +21,6 @@ import (
"strings"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"github.com/onsi/ginkgo"
@ -42,7 +41,7 @@ var _ = SIGDescribe("crictl", func() {
ginkgo.By("Getting all nodes' SSH-able IP addresses")
hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
if err != nil {
e2elog.Failf("Error getting node hostnames: %v", err)
framework.Failf("Error getting node hostnames: %v", err)
}
testCases := []struct {
@ -60,15 +59,15 @@ var _ = SIGDescribe("crictl", func() {
result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)
if err != nil {
e2elog.Failf("Ran %q on %q, got error %v", testCase.cmd, host, err)
framework.Failf("Ran %q on %q, got error %v", testCase.cmd, host, err)
}
// Log the stdout/stderr output.
// TODO: Verify the output.
if len(stdout) > 0 {
e2elog.Logf("Got stdout from %q:\n %s\n", host, strings.TrimSpace(stdout))
framework.Logf("Got stdout from %q:\n %s\n", host, strings.TrimSpace(stdout))
}
if len(stderr) > 0 {
e2elog.Logf("Got stderr from %q:\n %s\n", host, strings.TrimSpace(stderr))
framework.Logf("Got stderr from %q:\n %s\n", host, strings.TrimSpace(stderr))
}
}
})

View File

@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
)
@ -73,7 +72,7 @@ var _ = SIGDescribe("Events", func() {
podClient.Delete(pod.Name, nil)
}()
if _, err := podClient.Create(pod); err != nil {
e2elog.Failf("Failed to create pod: %v", err)
framework.Failf("Failed to create pod: %v", err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
@ -87,9 +86,9 @@ var _ = SIGDescribe("Events", func() {
ginkgo.By("retrieving the pod")
podWithUID, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
e2elog.Failf("Failed to get pod: %v", err)
framework.Failf("Failed to get pod: %v", err)
}
e2elog.Logf("%+v\n", podWithUID)
framework.Logf("%+v\n", podWithUID)
var events *v1.EventList
// Check for scheduler event about the pod.
ginkgo.By("checking for scheduler event about the pod")
@ -106,7 +105,7 @@ var _ = SIGDescribe("Events", func() {
return false, err
}
if len(events.Items) > 0 {
e2elog.Logf("Saw scheduler event for our pod.")
framework.Logf("Saw scheduler event for our pod.")
return true, nil
}
return false, nil
@ -126,7 +125,7 @@ var _ = SIGDescribe("Events", func() {
return false, err
}
if len(events.Items) > 0 {
e2elog.Logf("Saw kubelet event for our pod.")
framework.Logf("Saw kubelet event for our pod.")
return true, nil
}
return false, nil

View File

@ -30,7 +30,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/framework/volume"
@ -53,10 +52,10 @@ const (
// podNamePrefix and namespace.
func getPodMatches(c clientset.Interface, nodeName string, podNamePrefix string, namespace string) sets.String {
matches := sets.NewString()
e2elog.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
runningPods, err := e2ekubelet.GetKubeletPods(c, nodeName)
if err != nil {
e2elog.Logf("Error checking running pods on %v: %v", nodeName, err)
framework.Logf("Error checking running pods on %v: %v", nodeName, err)
return matches
}
for _, pod := range runningPods.Items {
@ -93,7 +92,7 @@ func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, p
if seen.Len() == targetNumPods {
return true, nil
}
e2elog.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
framework.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
return false, nil
})
}
@ -214,7 +213,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
}
for _, test := range tests {
e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
framework.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
err = wait.Poll(poll, timeout, func() (bool, error) {
result, err := e2essh.NodeExec(nodeIP, test.cmd, framework.TestContext.Provider)
framework.ExpectNoError(err)
@ -232,9 +231,9 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
}
if expectClean {
e2elog.Logf("Pod's host has been cleaned up")
framework.Logf("Pod's host has been cleaned up")
} else {
e2elog.Logf("Pod's host has not been cleaned up (per expectation)")
framework.Logf("Pod's host has not been cleaned up (per expectation)")
}
}
@ -349,7 +348,7 @@ var _ = SIGDescribe("kubelet", func() {
start := time.Now()
err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0, itArg.timeout)
framework.ExpectNoError(err)
e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
time.Since(start))
if resourceMonitor != nil {
resourceMonitor.LogCPUSummary()

View File

@ -27,7 +27,6 @@ import (
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
"k8s.io/kubernetes/test/e2e/perftype"
@ -58,10 +57,10 @@ func logPodsOnNodes(c clientset.Interface, nodeNames []string) {
for _, n := range nodeNames {
podList, err := e2ekubelet.GetKubeletRunningPods(c, n)
if err != nil {
e2elog.Logf("Unable to retrieve kubelet pods for node %v", n)
framework.Logf("Unable to retrieve kubelet pods for node %v", n)
continue
}
e2elog.Logf("%d pods are running on node %v", len(podList.Items), n)
framework.Logf("%d pods are running on node %v", len(podList.Items), n)
}
}
@ -95,7 +94,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
deadline := time.Now().Add(monitoringTime)
for time.Now().Before(deadline) {
timeLeft := deadline.Sub(time.Now())
e2elog.Logf("Still running...%v left", timeLeft)
framework.Logf("Still running...%v left", timeLeft)
if timeLeft < reportingPeriod {
time.Sleep(timeLeft)
} else {
@ -109,13 +108,13 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
usageSummary, err := rm.GetLatest()
framework.ExpectNoError(err)
// TODO(random-liu): Remove the original log when we migrate to new perfdash
e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary))
framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
// Log perf result
printPerfData(e2eperf.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
cpuSummary := rm.GetCPUSummary()
e2elog.Logf("%s", rm.FormatCPUSummary(cpuSummary))
framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
// Log perf result
printPerfData(e2eperf.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
verifyCPULimits(expectedCPU, cpuSummary)
@ -149,14 +148,14 @@ func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsage
errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
heapStats, err := e2ekubelet.GetKubeletHeapStats(c, nodeName)
if err != nil {
e2elog.Logf("Unable to get heap stats from %q", nodeName)
framework.Logf("Unable to get heap stats from %q", nodeName)
} else {
e2elog.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
framework.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
}
}
}
if len(errList) > 0 {
e2elog.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n"))
framework.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n"))
}
}
@ -190,7 +189,7 @@ func verifyCPULimits(expected e2ekubelet.ContainersCPUSummary, actual e2ekubelet
}
}
if len(errList) > 0 {
e2elog.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n"))
framework.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n"))
}
}
@ -215,7 +214,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
ginkgo.AfterEach(func() {
rm.Stop()
result := om.GetLatestRuntimeOperationErrorRate()
e2elog.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result))
framework.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result))
})
SIGDescribe("regular resource usage tracking [Feature:RegularResourceUsageTracking]", func() {
// We assume that the scheduler will make reasonable scheduling choices
@ -287,6 +286,6 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
func printPerfData(p *perftype.PerfData) {
// Notice that we must make sure the perftype.PerfResultEnd is in a new line.
if str := e2emetrics.PrettyPrintJSON(p); str != "" {
e2elog.Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
framework.Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
}
}

View File

@ -23,7 +23,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -166,7 +165,7 @@ var _ = SIGDescribe("Mount propagation", func() {
for _, mountName := range dirNames {
cmd := fmt.Sprintf("cat /mnt/test/%s/file", mountName)
stdout, stderr, err := f.ExecShellInPodWithFullOutput(podName, cmd)
e2elog.Logf("pod %s mount %s: stdout: %q, stderr: %q error: %v", podName, mountName, stdout, stderr, err)
framework.Logf("pod %s mount %s: stdout: %q, stderr: %q error: %v", podName, mountName, stdout, stderr, err)
msg := fmt.Sprintf("When checking pod %s and directory %s", podName, mountName)
shouldBeVisible := mounts.Has(mountName)
if shouldBeVisible {

View File

@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/fields"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils"
@ -188,7 +187,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
workingSetStatsMsg += fmt.Sprintf(" %s[%.1f|%.1f|%.1f];", nodes.Items[i].Name,
workingSetStats[host][0], workingSetStats[host][len(workingSetStats[host])/2], workingSetStats[host][len(workingSetStats[host])-1])
}
e2elog.Logf("Node-Problem-Detector CPU and Memory Stats:\n\t%s\n\t%s\n\t%s", cpuStatsMsg, rssStatsMsg, workingSetStatsMsg)
framework.Logf("Node-Problem-Detector CPU and Memory Stats:\n\t%s\n\t%s\n\t%s", cpuStatsMsg, rssStatsMsg, workingSetStatsMsg)
})
})

View File

@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -44,16 +43,16 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]"
pod.Status.Phase = v1.PodFailed
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).UpdateStatus(pod)
if err != nil {
e2elog.Failf("err failing pod: %v", err)
framework.Failf("err failing pod: %v", err)
}
count++
if count%50 == 0 {
e2elog.Logf("count: %v", count)
framework.Logf("count: %v", count)
}
}
e2elog.Logf("created: %v", count)
framework.Logf("created: %v", count)
// The gc controller polls every 30s and fires off a goroutine per
// pod to terminate.
@ -66,17 +65,17 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]"
pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) {
pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
e2elog.Logf("Failed to list pod %v", err)
framework.Logf("Failed to list pod %v", err)
return false, nil
}
if len(pods.Items) != gcThreshold {
e2elog.Logf("Number of observed pods %v, waiting for %v", len(pods.Items), gcThreshold)
framework.Logf("Number of observed pods %v, waiting for %v", len(pods.Items), gcThreshold)
return false, nil
}
return true, nil
})
if pollErr != nil {
e2elog.Failf("Failed to GC pods within %v, %v pods remaining, error: %v", timeout, len(pods.Items), err)
framework.Failf("Failed to GC pods within %v, %v pods remaining, error: %v", timeout, len(pods.Items), err)
}
})
})

View File

@ -33,10 +33,9 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = SIGDescribe("Pods Extended", func() {
@ -144,7 +143,7 @@ var _ = SIGDescribe("Pods Extended", func() {
err = wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
podList, err := e2ekubelet.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
if err != nil {
e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
return false, nil
}
for _, kubeletPod := range podList.Items {
@ -152,12 +151,12 @@ var _ = SIGDescribe("Pods Extended", func() {
continue
}
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
e2elog.Logf("deletion has not yet been observed")
framework.Logf("deletion has not yet been observed")
return false, nil
}
return false, nil
}
e2elog.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
return true, nil
})
framework.ExpectNoError(err, "kubelet never observed the termination notice")

View File

@ -31,7 +31,6 @@ import (
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -146,16 +145,16 @@ func testPreStop(c clientset.Interface, ns string) {
if err != nil {
if ctx.Err() != nil {
e2elog.Failf("Error validating prestop: %v", err)
framework.Failf("Error validating prestop: %v", err)
return true, err
}
ginkgo.By(fmt.Sprintf("Error validating prestop: %v", err))
} else {
e2elog.Logf("Saw: %s", string(body))
framework.Logf("Saw: %s", string(body))
state := State{}
err := json.Unmarshal(body, &state)
if err != nil {
e2elog.Logf("Error parsing: %v", err)
framework.Logf("Error parsing: %v", err)
return false, nil
}
if state.Received["prestop"] != 0 {
@ -218,7 +217,7 @@ var _ = SIGDescribe("PreStop", func() {
if pod.Name != kubeletPod.Name {
continue
} else if kubeletPod.Status.Phase == v1.PodRunning {
e2elog.Logf("pod is running")
framework.Logf("pod is running")
return true, err
}
}

View File

@ -21,7 +21,6 @@ import (
"strings"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"github.com/onsi/ginkgo"
@ -47,7 +46,7 @@ var _ = SIGDescribe("SSH", func() {
ginkgo.By("Getting all nodes' SSH-able IP addresses")
hosts, err := e2essh.NodeSSHHosts(f.ClientSet)
if err != nil {
e2elog.Failf("Error getting node hostnames: %v", err)
framework.Failf("Error getting node hostnames: %v", err)
}
testCases := []struct {
@ -82,23 +81,23 @@ var _ = SIGDescribe("SSH", func() {
result, err := e2essh.SSH(testCase.cmd, host, framework.TestContext.Provider)
stdout, stderr := strings.TrimSpace(result.Stdout), strings.TrimSpace(result.Stderr)
if err != testCase.expectedError {
e2elog.Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError)
framework.Failf("Ran %s on %s, got error %v, expected %v", testCase.cmd, host, err, testCase.expectedError)
}
if testCase.checkStdout && stdout != testCase.expectedStdout {
e2elog.Failf("Ran %s on %s, got stdout '%s', expected '%s'", testCase.cmd, host, stdout, testCase.expectedStdout)
framework.Failf("Ran %s on %s, got stdout '%s', expected '%s'", testCase.cmd, host, stdout, testCase.expectedStdout)
}
if stderr != testCase.expectedStderr {
e2elog.Failf("Ran %s on %s, got stderr '%s', expected '%s'", testCase.cmd, host, stderr, testCase.expectedStderr)
framework.Failf("Ran %s on %s, got stderr '%s', expected '%s'", testCase.cmd, host, stderr, testCase.expectedStderr)
}
if result.Code != testCase.expectedCode {
e2elog.Failf("Ran %s on %s, got exit code %d, expected %d", testCase.cmd, host, result.Code, testCase.expectedCode)
framework.Failf("Ran %s on %s, got exit code %d, expected %d", testCase.cmd, host, result.Code, testCase.expectedCode)
}
// Show stdout, stderr for logging purposes.
if len(stdout) > 0 {
e2elog.Logf("Got stdout from %s: %s", host, strings.TrimSpace(stdout))
framework.Logf("Got stdout from %s: %s", host, strings.TrimSpace(stdout))
}
if len(stderr) > 0 {
e2elog.Logf("Got stderr from %s: %s", host, strings.TrimSpace(stderr))
framework.Logf("Got stderr from %s: %s", host, strings.TrimSpace(stderr))
}
}
}
@ -106,7 +105,7 @@ var _ = SIGDescribe("SSH", func() {
// Quickly test that SSH itself errors correctly.
ginkgo.By("SSH'ing to a nonexistent host")
if _, err = e2essh.SSH(`echo "hello"`, "i.do.not.exist", framework.TestContext.Provider); err == nil {
e2elog.Failf("Expected error trying to SSH to nonexistent host.")
framework.Failf("Expected error trying to SSH to nonexistent host.")
}
})
})

View File

@ -25,7 +25,6 @@ import (
"k8s.io/kubernetes/pkg/util/slice"
"k8s.io/kubernetes/test/e2e/framework"
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
)
@ -44,7 +43,7 @@ func cleanupJob(f *framework.Framework, job *batchv1.Job) {
ns := f.Namespace.Name
c := f.ClientSet
e2elog.Logf("Remove the Job's dummy finalizer; the Job should be deleted cascadingly")
framework.Logf("Remove the Job's dummy finalizer; the Job should be deleted cascadingly")
removeFinalizerFunc := func(j *batchv1.Job) {
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
}
@ -70,19 +69,19 @@ func testFinishedJob(f *framework.Framework) {
job.ObjectMeta.Finalizers = []string{dummyFinalizer}
defer cleanupJob(f, job)
e2elog.Logf("Create a Job %s/%s with TTL", ns, job.Name)
framework.Logf("Create a Job %s/%s with TTL", ns, job.Name)
job, err := jobutil.CreateJob(c, ns, job)
framework.ExpectNoError(err)
e2elog.Logf("Wait for the Job to finish")
framework.Logf("Wait for the Job to finish")
err = jobutil.WaitForJobFinish(c, ns, job.Name)
framework.ExpectNoError(err)
e2elog.Logf("Wait for TTL after finished controller to delete the Job")
framework.Logf("Wait for TTL after finished controller to delete the Job")
err = jobutil.WaitForJobDeleting(c, ns, job.Name)
framework.ExpectNoError(err)
e2elog.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
job, err = jobutil.GetJob(c, ns, job.Name)
framework.ExpectNoError(err)
finishTime := jobutil.FinishTime(job)

View File

@ -46,7 +46,6 @@ go_library(
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/blang/semver:go_default_library",
@ -170,7 +169,6 @@ go_test(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/deviceplugin:go_default_library",
"//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",

View File

@ -35,7 +35,6 @@ import (
watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/davecgh/go-spew/spew"
@ -60,7 +59,7 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor
ginkgo.It("should enforce a profile blocking writes", func() {
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"deny-write")
if len(status.ContainerStatuses) == 0 {
e2elog.Failf("Unexpected pod status: %s", spew.Sdump(status))
framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
return
}
state := status.ContainerStatuses[0].State.Terminated
@ -71,7 +70,7 @@ var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor][NodeFeature:AppArmor
ginkgo.It("should enforce a permissive profile", func() {
status := runAppArmorTest(f, true, apparmor.ProfileNamePrefix+apparmorProfilePrefix+"audit-write")
if len(status.ContainerStatuses) == 0 {
e2elog.Failf("Unexpected pod status: %s", spew.Sdump(status))
framework.Failf("Unexpected pod status: %s", spew.Sdump(status))
return
}
state := status.ContainerStatuses[0].State.Terminated

View File

@ -29,7 +29,6 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
"k8s.io/kubernetes/test/e2e/perftype"
@ -47,9 +46,9 @@ func dumpDataToFile(data interface{}, labels map[string]string, prefix string) {
testName := labels["test"]
fileName := path.Join(framework.TestContext.ReportDir, fmt.Sprintf("%s-%s-%s.json", prefix, framework.TestContext.ReportPrefix, testName))
labels["timestamp"] = strconv.FormatInt(time.Now().UTC().Unix(), 10)
e2elog.Logf("Dumping perf data for test %q to %q.", testName, fileName)
framework.Logf("Dumping perf data for test %q to %q.", testName, fileName)
if err := ioutil.WriteFile(fileName, []byte(e2emetrics.PrettyPrintJSON(data)), 0644); err != nil {
e2elog.Logf("Failed to write perf data for test %q to %q: %v", testName, fileName, err)
framework.Logf("Failed to write perf data for test %q to %q: %v", testName, fileName, err)
}
}
@ -83,7 +82,7 @@ func logDensityTimeSeries(rc *ResourceCollector, create, watch map[string]metav1
timeSeries.ResourceData = rc.GetResourceTimeSeries()
if framework.TestContext.ReportDir == "" {
e2elog.Logf("%s %s\n%s", TimeSeriesTag, e2emetrics.PrettyPrintJSON(timeSeries), TimeSeriesEnd)
framework.Logf("%s %s\n%s", TimeSeriesTag, e2emetrics.PrettyPrintJSON(timeSeries), TimeSeriesEnd)
return
}
dumpDataToFile(timeSeries, timeSeries.Labels, "time_series")
@ -160,22 +159,22 @@ func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[stri
cpu, ok := node.Status.Capacity[v1.ResourceCPU]
if !ok {
e2elog.Failf("Fail to fetch CPU capacity value of test node.")
framework.Failf("Fail to fetch CPU capacity value of test node.")
}
memory, ok := node.Status.Capacity[v1.ResourceMemory]
if !ok {
e2elog.Failf("Fail to fetch Memory capacity value of test node.")
framework.Failf("Fail to fetch Memory capacity value of test node.")
}
cpuValue, ok := cpu.AsInt64()
if !ok {
e2elog.Failf("Fail to fetch CPU capacity value as Int64.")
framework.Failf("Fail to fetch CPU capacity value as Int64.")
}
memoryValue, ok := memory.AsInt64()
if !ok {
e2elog.Failf("Fail to fetch Memory capacity value as Int64.")
framework.Failf("Fail to fetch Memory capacity value as Int64.")
}
image := node.Status.NodeInfo.OSImage
@ -196,6 +195,6 @@ func getTestNodeInfo(f *framework.Framework, testName, testDesc string) map[stri
func printPerfData(p *perftype.PerfData) {
// Notice that we must make sure the perftype.PerfResultEnd is in a new line.
if str := e2emetrics.PrettyPrintJSON(p); str != "" {
e2elog.Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
framework.Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
}
}

View File

@ -33,11 +33,10 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1alpha2"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
imageutils "k8s.io/kubernetes/test/utils/image"
)
func getOOMScoreForPid(pid int) (int, error) {
@ -162,9 +161,9 @@ var _ = framework.KubeDescribe("Container Manager Misc [Serial]", func() {
},
})
framework.ExpectNoError(err)
e2elog.Logf("Running containers:")
framework.Logf("Running containers:")
for _, c := range containers {
e2elog.Logf("%+v", c)
framework.Logf("%+v", c)
}
}
})

View File

@ -36,7 +36,6 @@ import (
kubemetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -200,7 +199,7 @@ var _ = framework.KubeDescribe("Density [Serial] [Slow]", func() {
// Here we set API QPS limit from default 5 to 60 in order to test real Kubelet performance.
// Note that it will cause higher resource usage.
tempSetCurrentKubeletConfig(f, func(cfg *kubeletconfig.KubeletConfiguration) {
e2elog.Logf("Old QPS limit is: %d", cfg.KubeAPIQPS)
framework.Logf("Old QPS limit is: %d", cfg.KubeAPIQPS)
// Set new API QPS limit
cfg.KubeAPIQPS = int32(itArg.APIQPSLimit)
})
@ -360,7 +359,7 @@ func runDensityBatchTest(f *framework.Framework, rc *ResourceCollector, testArg
}, 10*time.Minute, 10*time.Second).Should(gomega.BeTrue())
if len(watchTimes) < testArg.podsNr {
e2elog.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
framework.Failf("Timeout reached waiting for all Pods to be observed by the watch.")
}
// Analyze results
@ -541,7 +540,7 @@ func logAndVerifyLatency(batchLag time.Duration, e2eLags []e2emetrics.PodLatency
// TODO(coufon): do not trust 'kubelet' metrics since they are not reset!
latencyMetrics, _ := getPodStartLatency(kubeletAddr)
e2elog.Logf("Kubelet Prometheus metrics (not reset):\n%s", e2emetrics.PrettyPrintJSON(latencyMetrics))
framework.Logf("Kubelet Prometheus metrics (not reset):\n%s", e2emetrics.PrettyPrintJSON(latencyMetrics))
podStartupLatency := e2emetrics.ExtractLatencyMetrics(e2eLags)

View File

@ -28,14 +28,12 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/features"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
kubeletpodresourcesv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
dputil "k8s.io/kubernetes/test/e2e/framework/deviceplugin"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
kubeletpodresourcesv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/podresources/v1alpha1"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
)
@ -71,7 +69,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
dp.Spec.Containers[0].Env[i].Value = pluginSockDir
}
}
e2elog.Logf("env %v", dp.Spec.Containers[0].Env)
framework.Logf("env %v", dp.Spec.Containers[0].Env)
dp.Spec.NodeName = framework.TestContext.NodeName
ginkgo.By("Create sample device plugin pod")
devicePluginPod, err := f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)
@ -81,7 +79,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
gomega.Eventually(func() bool {
return dputil.NumberOfSampleResources(getLocalNode(f)) > 0
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
e2elog.Logf("Successfully created device plugin pod")
framework.Logf("Successfully created device plugin pod")
ginkgo.By("Waiting for the resource exported by the sample device plugin to become available on the local node")
// TODO(vikasc): Instead of hard-coding number of devices, provide number of devices in the sample-device-plugin using configmap
@ -103,7 +101,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
podResources, err := getNodeDevices()
var resourcesForOurPod *kubeletpodresourcesv1alpha1.PodResources
e2elog.Logf("pod resources %v", podResources)
framework.Logf("pod resources %v", podResources)
gomega.Expect(err).To(gomega.BeNil())
framework.ExpectEqual(len(podResources.PodResources), 2)
for _, res := range podResources.GetPodResources() {
@ -111,7 +109,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
resourcesForOurPod = res
}
}
e2elog.Logf("resourcesForOurPod %v", resourcesForOurPod)
framework.Logf("resourcesForOurPod %v", resourcesForOurPod)
gomega.Expect(resourcesForOurPod).NotTo(gomega.BeNil())
framework.ExpectEqual(resourcesForOurPod.Name, pod1.Name)
framework.ExpectEqual(resourcesForOurPod.Namespace, pod1.Namespace)
@ -158,7 +156,7 @@ func testDevicePlugin(f *framework.Framework, pluginSockDir string) {
framework.ExpectNoError(err)
waitForContainerRemoval(devicePluginPod.Spec.Containers[0].Name, devicePluginPod.Name, devicePluginPod.Namespace)
_, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Get(dp.Name, getOptions)
e2elog.Logf("Trying to get dp pod after deletion. err must be non-nil. err: %v", err)
framework.Logf("Trying to get dp pod after deletion. err must be non-nil. err: %v", err)
framework.ExpectError(err)
devicePluginPod, err = f.ClientSet.CoreV1().Pods(metav1.NamespaceSystem).Create(dp)
@ -266,7 +264,7 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName
var currentCount int32
p, err := f.PodClient().Get(podName, metav1.GetOptions{})
if err != nil || len(p.Status.ContainerStatuses) < 1 {
e2elog.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err)
framework.Failf("ensurePodContainerRestart failed for pod %q: %v", podName, err)
}
initialCount = p.Status.ContainerStatuses[0].RestartCount
gomega.Eventually(func() bool {
@ -275,7 +273,7 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName
return false
}
currentCount = p.Status.ContainerStatuses[0].RestartCount
e2elog.Logf("initial %v, current %v", initialCount, currentCount)
framework.Logf("initial %v, current %v", initialCount, currentCount)
return currentCount > initialCount
}, 5*time.Minute, framework.Poll).Should(gomega.BeTrue())
}
@ -284,10 +282,10 @@ func ensurePodContainerRestart(f *framework.Framework, podName string, contName
func parseLog(f *framework.Framework, podName string, contName string, re string) string {
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, contName)
if err != nil {
e2elog.Failf("GetPodLogs for pod %q failed: %v", podName, err)
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}
e2elog.Logf("got pod logs: %v", logs)
framework.Logf("got pod logs: %v", logs)
regex := regexp.MustCompile(re)
matches := regex.FindStringSubmatch(logs)
if len(matches) < 2 {

View File

@ -33,7 +33,6 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -56,7 +55,7 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
runPodCheckpointTest(f, podName, func() {
checkpoints := findCheckpoints(podName)
if len(checkpoints) == 0 {
e2elog.Failf("No checkpoint for the pod was found")
framework.Failf("No checkpoint for the pod was found")
}
})
})
@ -85,14 +84,14 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
runPodCheckpointTest(f, podName, func() {
checkpoints := findCheckpoints(podName)
if len(checkpoints) == 0 {
e2elog.Failf("No checkpoint for the pod was found")
framework.Failf("No checkpoint for the pod was found")
}
ginkgo.By("Removing checkpoint of test pod")
for _, filename := range checkpoints {
if len(filename) == 0 {
continue
}
e2elog.Logf("Removing checkpoint %q", filename)
framework.Logf("Removing checkpoint %q", filename)
_, err := exec.Command("sudo", "rm", filename).CombinedOutput()
framework.ExpectNoError(err, "Failed to remove checkpoint file %q: %v", string(filename), err)
}
@ -134,7 +133,7 @@ var _ = SIGDescribe("Dockershim [Serial] [Disruptive] [Feature:Docker][Legacy:Do
ginkgo.By("Corrupt checkpoint file")
checkpoints := findCheckpoints(podName)
if len(checkpoints) == 0 {
e2elog.Failf("No checkpoint for the pod was found")
framework.Failf("No checkpoint for the pod was found")
}
for _, file := range checkpoints {
f, err := os.OpenFile(file, os.O_WRONLY|os.O_APPEND, 0644)
@ -176,10 +175,10 @@ func runPodCheckpointTest(f *framework.Framework, podName string, twist func())
if len(checkpoints) == 0 {
return true, nil
}
e2elog.Logf("Checkpoint of %q still exists: %v", podName, checkpoints)
framework.Logf("Checkpoint of %q still exists: %v", podName, checkpoints)
return false, nil
}); err != nil {
e2elog.Failf("Failed to observe checkpoint being removed within timeout: %v", err)
framework.Failf("Failed to observe checkpoint being removed within timeout: %v", err)
}
}
@ -213,7 +212,7 @@ func findCheckpoints(match string) []string {
checkpoints := []string{}
stdout, err := exec.Command("sudo", "grep", "-rl", match, framework.TestContext.DockershimCheckpointDir).CombinedOutput()
if err != nil {
e2elog.Logf("grep from dockershim checkpoint directory returns error: %v", err)
framework.Logf("grep from dockershim checkpoint directory returns error: %v", err)
}
if stdout == nil {
return checkpoints

View File

@ -36,7 +36,6 @@ import (
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -480,9 +479,9 @@ func runEvictionTest(f *framework.Framework, pressureTimeout time.Duration, expe
gomega.Eventually(func() error {
if expectedNodeCondition != noPressure {
if hasNodeCondition(f, expectedNodeCondition) {
e2elog.Logf("Node has %s", expectedNodeCondition)
framework.Logf("Node has %s", expectedNodeCondition)
} else {
e2elog.Logf("Node does NOT have %s", expectedNodeCondition)
framework.Logf("Node does NOT have %s", expectedNodeCondition)
}
}
logKubeletLatencyMetrics(kubeletmetrics.EvictionStatsAgeKey)
@ -569,7 +568,7 @@ func verifyEvictionOrdering(f *framework.Framework, testSpecs []podEvictSpec) er
}
updatedPods := updatedPodList.Items
for _, p := range updatedPods {
e2elog.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
framework.Logf("fetching pod %s; phase= %v", p.Name, p.Status.Phase)
}
ginkgo.By("checking eviction ordering and ensuring important pods dont fail")
@ -690,25 +689,25 @@ func hasNodeCondition(f *framework.Framework, expectedNodeCondition v1.NodeCondi
func logInodeMetrics() {
summary, err := getNodeSummary()
if err != nil {
e2elog.Logf("Error getting summary: %v", err)
framework.Logf("Error getting summary: %v", err)
return
}
if summary.Node.Runtime != nil && summary.Node.Runtime.ImageFs != nil && summary.Node.Runtime.ImageFs.Inodes != nil && summary.Node.Runtime.ImageFs.InodesFree != nil {
e2elog.Logf("imageFsInfo.Inodes: %d, imageFsInfo.InodesFree: %d", *summary.Node.Runtime.ImageFs.Inodes, *summary.Node.Runtime.ImageFs.InodesFree)
framework.Logf("imageFsInfo.Inodes: %d, imageFsInfo.InodesFree: %d", *summary.Node.Runtime.ImageFs.Inodes, *summary.Node.Runtime.ImageFs.InodesFree)
}
if summary.Node.Fs != nil && summary.Node.Fs.Inodes != nil && summary.Node.Fs.InodesFree != nil {
e2elog.Logf("rootFsInfo.Inodes: %d, rootFsInfo.InodesFree: %d", *summary.Node.Fs.Inodes, *summary.Node.Fs.InodesFree)
framework.Logf("rootFsInfo.Inodes: %d, rootFsInfo.InodesFree: %d", *summary.Node.Fs.Inodes, *summary.Node.Fs.InodesFree)
}
for _, pod := range summary.Pods {
e2elog.Logf("Pod: %s", pod.PodRef.Name)
framework.Logf("Pod: %s", pod.PodRef.Name)
for _, container := range pod.Containers {
if container.Rootfs != nil && container.Rootfs.InodesUsed != nil {
e2elog.Logf("--- summary Container: %s inodeUsage: %d", container.Name, *container.Rootfs.InodesUsed)
framework.Logf("--- summary Container: %s inodeUsage: %d", container.Name, *container.Rootfs.InodesUsed)
}
}
for _, volume := range pod.VolumeStats {
if volume.FsStats.InodesUsed != nil {
e2elog.Logf("--- summary Volume: %s inodeUsage: %d", volume.Name, *volume.FsStats.InodesUsed)
framework.Logf("--- summary Volume: %s inodeUsage: %d", volume.Name, *volume.FsStats.InodesUsed)
}
}
}
@ -717,25 +716,25 @@ func logInodeMetrics() {
func logDiskMetrics() {
summary, err := getNodeSummary()
if err != nil {
e2elog.Logf("Error getting summary: %v", err)
framework.Logf("Error getting summary: %v", err)
return
}
if summary.Node.Runtime != nil && summary.Node.Runtime.ImageFs != nil && summary.Node.Runtime.ImageFs.CapacityBytes != nil && summary.Node.Runtime.ImageFs.AvailableBytes != nil {
e2elog.Logf("imageFsInfo.CapacityBytes: %d, imageFsInfo.AvailableBytes: %d", *summary.Node.Runtime.ImageFs.CapacityBytes, *summary.Node.Runtime.ImageFs.AvailableBytes)
framework.Logf("imageFsInfo.CapacityBytes: %d, imageFsInfo.AvailableBytes: %d", *summary.Node.Runtime.ImageFs.CapacityBytes, *summary.Node.Runtime.ImageFs.AvailableBytes)
}
if summary.Node.Fs != nil && summary.Node.Fs.CapacityBytes != nil && summary.Node.Fs.AvailableBytes != nil {
e2elog.Logf("rootFsInfo.CapacityBytes: %d, rootFsInfo.AvailableBytes: %d", *summary.Node.Fs.CapacityBytes, *summary.Node.Fs.AvailableBytes)
framework.Logf("rootFsInfo.CapacityBytes: %d, rootFsInfo.AvailableBytes: %d", *summary.Node.Fs.CapacityBytes, *summary.Node.Fs.AvailableBytes)
}
for _, pod := range summary.Pods {
e2elog.Logf("Pod: %s", pod.PodRef.Name)
framework.Logf("Pod: %s", pod.PodRef.Name)
for _, container := range pod.Containers {
if container.Rootfs != nil && container.Rootfs.UsedBytes != nil {
e2elog.Logf("--- summary Container: %s UsedBytes: %d", container.Name, *container.Rootfs.UsedBytes)
framework.Logf("--- summary Container: %s UsedBytes: %d", container.Name, *container.Rootfs.UsedBytes)
}
}
for _, volume := range pod.VolumeStats {
if volume.FsStats.InodesUsed != nil {
e2elog.Logf("--- summary Volume: %s UsedBytes: %d", volume.Name, *volume.FsStats.UsedBytes)
framework.Logf("--- summary Volume: %s UsedBytes: %d", volume.Name, *volume.FsStats.UsedBytes)
}
}
}
@ -744,22 +743,22 @@ func logDiskMetrics() {
func logMemoryMetrics() {
summary, err := getNodeSummary()
if err != nil {
e2elog.Logf("Error getting summary: %v", err)
framework.Logf("Error getting summary: %v", err)
return
}
if summary.Node.Memory != nil && summary.Node.Memory.WorkingSetBytes != nil && summary.Node.Memory.AvailableBytes != nil {
e2elog.Logf("Node.Memory.WorkingSetBytes: %d, Node.Memory.AvailableBytes: %d", *summary.Node.Memory.WorkingSetBytes, *summary.Node.Memory.AvailableBytes)
framework.Logf("Node.Memory.WorkingSetBytes: %d, Node.Memory.AvailableBytes: %d", *summary.Node.Memory.WorkingSetBytes, *summary.Node.Memory.AvailableBytes)
}
for _, sysContainer := range summary.Node.SystemContainers {
if sysContainer.Name == kubeletstatsv1alpha1.SystemContainerPods && sysContainer.Memory != nil && sysContainer.Memory.WorkingSetBytes != nil && sysContainer.Memory.AvailableBytes != nil {
e2elog.Logf("Allocatable.Memory.WorkingSetBytes: %d, Allocatable.Memory.AvailableBytes: %d", *sysContainer.Memory.WorkingSetBytes, *sysContainer.Memory.AvailableBytes)
framework.Logf("Allocatable.Memory.WorkingSetBytes: %d, Allocatable.Memory.AvailableBytes: %d", *sysContainer.Memory.WorkingSetBytes, *sysContainer.Memory.AvailableBytes)
}
}
for _, pod := range summary.Pods {
e2elog.Logf("Pod: %s", pod.PodRef.Name)
framework.Logf("Pod: %s", pod.PodRef.Name)
for _, container := range pod.Containers {
if container.Memory != nil && container.Memory.WorkingSetBytes != nil {
e2elog.Logf("--- summary Container: %s WorkingSetBytes: %d", container.Name, *container.Memory.WorkingSetBytes)
framework.Logf("--- summary Container: %s WorkingSetBytes: %d", container.Name, *container.Memory.WorkingSetBytes)
}
}
}
@ -768,11 +767,11 @@ func logMemoryMetrics() {
func logPidMetrics() {
summary, err := getNodeSummary()
if err != nil {
e2elog.Logf("Error getting summary: %v", err)
framework.Logf("Error getting summary: %v", err)
return
}
if summary.Node.Rlimit != nil && summary.Node.Rlimit.MaxPID != nil && summary.Node.Rlimit.NumOfRunningProcesses != nil {
e2elog.Logf("Node.Rlimit.MaxPID: %d, Node.Rlimit.RunningProcesses: %d", *summary.Node.Rlimit.MaxPID, *summary.Node.Rlimit.NumOfRunningProcesses)
framework.Logf("Node.Rlimit.MaxPID: %d, Node.Rlimit.RunningProcesses: %d", *summary.Node.Rlimit.MaxPID, *summary.Node.Rlimit.NumOfRunningProcesses)
}
}

View File

@ -26,7 +26,6 @@ import (
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/gpu"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics"
"github.com/onsi/ginkgo"
@ -143,7 +142,7 @@ func checkIfNvidiaGPUsExistOnNode() bool {
// Cannot use `lspci` because it is not installed on all distros by default.
err := exec.Command("/bin/sh", "-c", "find /sys/devices/pci* -type f | grep vendor | xargs cat | grep 0x10de").Run()
if err != nil {
e2elog.Logf("check for nvidia GPUs failed. Got Error: %v", err)
framework.Logf("check for nvidia GPUs failed. Got Error: %v", err)
return false
}
return true
@ -164,14 +163,14 @@ func logDevicePluginMetrics() {
if quantile, err = strconv.ParseFloat(string(val), 64); err != nil {
continue
}
e2elog.Logf("Metric: %v ResourceName: %v Quantile: %v Latency: %v", msKey, resource, quantile, latency)
framework.Logf("Metric: %v ResourceName: %v Quantile: %v Latency: %v", msKey, resource, quantile, latency)
}
}
case kubeletmetrics.KubeletSubsystem + "_" + kubeletmetrics.DevicePluginRegistrationCountKey:
for _, sample := range samples {
resource := string(sample.Metric["resource_name"])
count := sample.Value
e2elog.Logf("Metric: %v ResourceName: %v Count: %v", msKey, resource, count)
framework.Logf("Metric: %v ResourceName: %v Count: %v", msKey, resource, count)
}
}
}

View File

@ -30,7 +30,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -51,7 +50,7 @@ func makePodToVerifyHugePages(baseName string, hugePagesLimit resource.Quantity)
// this command takes the expected value and compares it against the actual value for the pod cgroup hugetlb.2MB.limit_in_bytes
command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/hugetlb/%v/hugetlb.2MB.limit_in_bytes); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", hugePagesLimit.Value(), cgroupFsName)
e2elog.Logf("Pod to run command: %v", command)
framework.Logf("Pod to run command: %v", command)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),
@ -98,7 +97,7 @@ func configureHugePages() error {
if err != nil {
return err
}
e2elog.Logf("HugePages_Total is set to %v", numHugePages)
framework.Logf("HugePages_Total is set to %v", numHugePages)
if numHugePages == 5 {
return nil
}
@ -124,7 +123,7 @@ func pollResourceAsString(f *framework.Framework, resourceName string) string {
node, err := f.ClientSet.CoreV1().Nodes().Get(framework.TestContext.NodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
amount := amountOfResourceAsString(node, resourceName)
e2elog.Logf("amount of %v: %v", resourceName, amount)
framework.Logf("amount of %v: %v", resourceName, amount)
return amount
}

View File

@ -20,7 +20,6 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/davecgh/go-spew/spew"
@ -59,7 +58,7 @@ var _ = framework.KubeDescribe("ImageID [NodeFeature: ImageID]", func() {
status := runningPod.Status
if len(status.ContainerStatuses) == 0 {
e2elog.Failf("Unexpected pod status; %s", spew.Sdump(status))
framework.Failf("Unexpected pod status; %s", spew.Sdump(status))
return
}

View File

@ -24,7 +24,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e_node/perf/workloads"
@ -98,7 +97,7 @@ var _ = SIGDescribe("Node Performance Testing [Serial] [Slow] [Flaky]", func() {
framework.ExpectNoError(err)
perf, err := wl.ExtractPerformanceFromLogs(podLogs)
framework.ExpectNoError(err)
e2elog.Logf("Time to complete workload %s: %v", wl.Name(), perf)
framework.Logf("Time to complete workload %s: %v", wl.Name(), perf)
}
ginkgo.Context("Run node performance testing with pre-defined workloads", func() {

View File

@ -34,7 +34,6 @@ import (
coreclientset "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
@ -372,7 +371,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector [NodeFeature:NodeProblemDete
ginkgo.By("Get node problem detector log")
log, err := e2epod.GetPodLogs(c, ns, name, name)
gomega.Expect(err).ShouldNot(gomega.HaveOccurred())
e2elog.Logf("Node Problem Detector logs:\n %s", log)
framework.Logf("Node Problem Detector logs:\n %s", log)
}
ginkgo.By("Delete the node problem detector")
f.PodClient().Delete(name, metav1.NewDeleteOptions(0))

View File

@ -28,7 +28,6 @@ import (
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -49,7 +48,7 @@ func makePodToVerifyPids(baseName string, pidsLimit resource.Quantity) *v1.Pod {
// this command takes the expected value and compares it against the actual value for the pod cgroup pids.max
command := fmt.Sprintf("expected=%v; actual=$(cat /tmp/pids/%v/pids.max); if [ \"$expected\" -ne \"$actual\" ]; then exit 1; fi; ", pidsLimit.Value(), cgroupFsName)
e2elog.Logf("Pod to run command: %v", command)
framework.Logf("Pod to run command: %v", command)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: "pod" + string(uuid.NewUUID()),

View File

@ -44,7 +44,6 @@ import (
"k8s.io/kubernetes/pkg/util/procfs"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e_node/perftype"
@ -97,7 +96,7 @@ func (r *ResourceCollector) Start() {
kubeletstatsv1alpha1.SystemContainerRuntime: runtimeContainer,
}
} else {
e2elog.Failf("Failed to get runtime container name in test-e2e-node resource collector.")
framework.Failf("Failed to get runtime container name in test-e2e-node resource collector.")
}
wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) {
@ -146,9 +145,9 @@ func (r *ResourceCollector) GetCPUSummary() e2ekubelet.ContainersCPUSummary {
func (r *ResourceCollector) LogLatest() {
summary, err := r.GetLatest()
if err != nil {
e2elog.Logf("%v", err)
framework.Logf("%v", err)
}
e2elog.Logf("%s", formatResourceUsageStats(summary))
framework.Logf("%s", formatResourceUsageStats(summary))
}
// collectStats collects resource usage from Cadvisor.
@ -156,12 +155,12 @@ func (r *ResourceCollector) collectStats(oldStatsMap map[string]*cadvisorapiv2.C
for _, name := range systemContainers {
ret, err := r.client.Stats(name, r.request)
if err != nil {
e2elog.Logf("Error getting container stats, err: %v", err)
framework.Logf("Error getting container stats, err: %v", err)
return
}
cStats, ok := ret[name]
if !ok {
e2elog.Logf("Missing info/stats for container %q", name)
framework.Logf("Missing info/stats for container %q", name)
return
}

View File

@ -23,7 +23,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletresourcemetricsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/framework/volume"
@ -102,7 +101,7 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
return
}
if framework.TestContext.DumpLogsOnFailure {
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
}
ginkgo.By("Recording processes in system cgroups")
recordSystemCgroupProcesses()

View File

@ -27,7 +27,6 @@ import (
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -59,7 +58,7 @@ var _ = SIGDescribe("Resource-usage [Serial] [Slow]", func() {
ginkgo.AfterEach(func() {
result := om.GetLatestRuntimeOperationErrorRate()
e2elog.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result))
framework.Logf("runtime operation error metrics:\n%s", e2ekubelet.FormatRuntimeOperationErrorRate(result))
})
// This test measures and verifies the steady resource usage of node is within limit
@ -171,7 +170,7 @@ func runResourceUsageTest(f *framework.Framework, rc *ResourceCollector, testArg
deadline := time.Now().Add(monitoringTime)
for time.Now().Before(deadline) {
timeLeft := deadline.Sub(time.Now())
e2elog.Logf("Still running...%v left", timeLeft)
framework.Logf("Still running...%v left", timeLeft)
if timeLeft < reportingPeriod {
time.Sleep(timeLeft)
} else {
@ -192,14 +191,14 @@ func logAndVerifyResource(f *framework.Framework, rc *ResourceCollector, cpuLimi
// Obtain memory PerfData
usagePerContainer, err := rc.GetLatest()
framework.ExpectNoError(err)
e2elog.Logf("%s", formatResourceUsageStats(usagePerContainer))
framework.Logf("%s", formatResourceUsageStats(usagePerContainer))
usagePerNode := make(e2ekubelet.ResourceUsagePerNode)
usagePerNode[nodeName] = usagePerContainer
// Obtain CPU PerfData
cpuSummary := rc.GetCPUSummary()
e2elog.Logf("%s", formatCPUSummary(cpuSummary))
framework.Logf("%s", formatCPUSummary(cpuSummary))
cpuSummaryPerNode := make(e2ekubelet.NodesCPUSummary)
cpuSummaryPerNode[nodeName] = cpuSummary
@ -240,14 +239,14 @@ func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsage
errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
heapStats, err := e2ekubelet.GetKubeletHeapStats(c, nodeName)
if err != nil {
e2elog.Logf("Unable to get heap stats from %q", nodeName)
framework.Logf("Unable to get heap stats from %q", nodeName)
} else {
e2elog.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
framework.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
}
}
}
if len(errList) > 0 {
e2elog.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n"))
framework.Failf("Memory usage exceeding limits:\n %s", strings.Join(errList, "\n"))
}
}
@ -281,7 +280,7 @@ func verifyCPULimits(expected e2ekubelet.ContainersCPUSummary, actual e2ekubelet
}
}
if len(errList) > 0 {
e2elog.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n"))
framework.Failf("CPU usage exceeding limits:\n %s", strings.Join(errList, "\n"))
}
}
@ -289,7 +288,7 @@ func logPods(c clientset.Interface) {
nodeName := framework.TestContext.NodeName
podList, err := e2ekubelet.GetKubeletRunningPods(c, nodeName)
if err != nil {
e2elog.Logf("Unable to retrieve kubelet pods for node %v", nodeName)
framework.Logf("Unable to retrieve kubelet pods for node %v", nodeName)
}
e2elog.Logf("%d pods are running on node %v", len(podList.Items), nodeName)
framework.Logf("%d pods are running on node %v", len(podList.Items), nodeName)
}

View File

@ -19,20 +19,18 @@ limitations under the License.
package e2e_node
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"fmt"
"os/exec"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
"k8s.io/api/core/v1"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
)
// waitForPods waits for timeout duration, for pod_count.
@ -41,7 +39,7 @@ func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
podList, err := f.PodClient().List(metav1.ListOptions{})
if err != nil {
e2elog.Logf("Failed to list pods on node: %v", err)
framework.Logf("Failed to list pods on node: %v", err)
continue
}
@ -52,7 +50,7 @@ func waitForPods(f *framework.Framework, pod_count int, timeout time.Duration) (
}
runningPods = append(runningPods, &pod)
}
e2elog.Logf("Running pod count %d", len(runningPods))
framework.Logf("Running pod count %d", len(runningPods))
if len(runningPods) >= pod_count {
break
}
@ -91,7 +89,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
// startTimeout fit on the node and the node is now saturated.
runningPods := waitForPods(f, podCount, startTimeout)
if len(runningPods) < minPods {
e2elog.Failf("Failed to start %d pods, cannot test that restarting container runtime doesn't leak IPs", minPods)
framework.Failf("Failed to start %d pods, cannot test that restarting container runtime doesn't leak IPs", minPods)
}
for i := 0; i < restartCount; i += 1 {
@ -114,7 +112,7 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
return nil
}, 1*time.Minute, 2*time.Second).Should(gomega.BeNil())
if stdout, err := exec.Command("sudo", "kill", fmt.Sprintf("%d", pid)).CombinedOutput(); err != nil {
e2elog.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout))
framework.Failf("Failed to kill container runtime (pid=%d): %v, stdout: %q", pid, err, string(stdout))
}
// Assume that container runtime will be restarted by systemd/supervisord etc.
time.Sleep(20 * time.Second)
@ -123,12 +121,12 @@ var _ = framework.KubeDescribe("Restart [Serial] [Slow] [Disruptive] [NodeFeatur
ginkgo.By("Checking currently Running/Ready pods")
postRestartRunningPods := waitForPods(f, len(runningPods), recoverTimeout)
if len(postRestartRunningPods) == 0 {
e2elog.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak")
framework.Failf("Failed to start *any* pods after container runtime restart, this might indicate an IP leak")
}
ginkgo.By("Confirm no containers have terminated")
for _, pod := range postRestartRunningPods {
if c := testutils.TerminatedContainers(pod); len(c) != 0 {
e2elog.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c)
framework.Failf("Pod %q has failed containers %+v after container runtime restart, this might indicate an IP leak", pod.Name, c)
}
}
ginkgo.By(fmt.Sprintf("Container runtime restart test passed with %d pods", len(postRestartRunningPods)))

View File

@ -27,7 +27,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e_node/services"
"github.com/onsi/ginkgo"
@ -142,9 +141,9 @@ var _ = framework.KubeDescribe("Container Runtime Conformance Test", func() {
break
}
if i < flakeRetry {
e2elog.Logf("No.%d attempt failed: %v, retrying...", i, err)
framework.Logf("No.%d attempt failed: %v, retrying...", i, err)
} else {
e2elog.Failf("All %d attempts failed: %v", flakeRetry, err)
framework.Failf("All %d attempts failed: %v", flakeRetry, err)
}
}
})

View File

@ -29,11 +29,10 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
imageutils "k8s.io/kubernetes/test/utils/image"
)
var _ = framework.KubeDescribe("Security Context", func() {
@ -69,7 +68,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
pid1 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
pid2 := f.ExecCommandInContainer("isolated-pid-ns-test-pod", "test-container-2", "/bin/pidof", "sleep")
if pid1 != "1" || pid2 != "1" {
e2elog.Failf("PIDs of different containers are not all 1: test-container-1=%v, test-container-2=%v", pid1, pid2)
framework.Failf("PIDs of different containers are not all 1: test-container-1=%v, test-container-2=%v", pid1, pid2)
}
})
@ -110,7 +109,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
pid1 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-1", "/bin/pidof", "top")
pid2 := f.ExecCommandInContainer("shared-pid-ns-test-pod", "test-container-2", "/bin/pidof", "top")
if pid1 != pid2 {
e2elog.Failf("PIDs are not the same in different containers: test-container-1=%v, test-container-2=%v", pid1, pid2)
framework.Failf("PIDs are not the same in different containers: test-container-1=%v, test-container-2=%v", pid1, pid2)
}
})
})
@ -163,18 +162,18 @@ var _ = framework.KubeDescribe("Security Context", func() {
createAndWaitHostPidPod(busyboxPodName, true)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
e2elog.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
pids := strings.TrimSpace(logs)
e2elog.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName)
framework.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName)
if pids == "" {
e2elog.Failf("nginx's pid should be seen by hostpid containers")
framework.Failf("nginx's pid should be seen by hostpid containers")
}
pidSets := sets.NewString(strings.Split(pids, " ")...)
if !pidSets.Has(nginxPid) {
e2elog.Failf("nginx's pid should be seen by hostpid containers")
framework.Failf("nginx's pid should be seen by hostpid containers")
}
})
@ -183,14 +182,14 @@ var _ = framework.KubeDescribe("Security Context", func() {
createAndWaitHostPidPod(busyboxPodName, false)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
e2elog.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
pids := strings.TrimSpace(logs)
e2elog.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName)
framework.Logf("Got nginx's pid %q from pod %q", pids, busyboxPodName)
pidSets := sets.NewString(strings.Split(pids, " ")...)
if pidSets.Has(nginxPid) {
e2elog.Failf("nginx's pid should not be seen by non-hostpid containers")
framework.Failf("nginx's pid should not be seen by non-hostpid containers")
}
})
})
@ -228,10 +227,10 @@ var _ = framework.KubeDescribe("Security Context", func() {
ginkgo.BeforeEach(func() {
output, err := exec.Command("sh", "-c", "ipcmk -M 1048576 | awk '{print $NF}'").Output()
if err != nil {
e2elog.Failf("Failed to create the shared memory on the host: %v", err)
framework.Failf("Failed to create the shared memory on the host: %v", err)
}
hostSharedMemoryID = strings.TrimSpace(string(output))
e2elog.Logf("Got host shared memory ID %q", hostSharedMemoryID)
framework.Logf("Got host shared memory ID %q", hostSharedMemoryID)
})
ginkgo.It("should show the shared memory ID in the host IPC containers [NodeFeature:HostAccess]", func() {
@ -239,13 +238,13 @@ var _ = framework.KubeDescribe("Security Context", func() {
createAndWaitHostIPCPod(ipcutilsPodName, true)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
if err != nil {
e2elog.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err)
framework.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err)
}
podSharedMemoryIDs := strings.TrimSpace(logs)
e2elog.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, ipcutilsPodName)
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, ipcutilsPodName)
if !strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) {
e2elog.Failf("hostIPC container should show shared memory IDs on host")
framework.Failf("hostIPC container should show shared memory IDs on host")
}
})
@ -254,13 +253,13 @@ var _ = framework.KubeDescribe("Security Context", func() {
createAndWaitHostIPCPod(ipcutilsPodName, false)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, ipcutilsPodName, ipcutilsPodName)
if err != nil {
e2elog.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err)
framework.Failf("GetPodLogs for pod %q failed: %v", ipcutilsPodName, err)
}
podSharedMemoryIDs := strings.TrimSpace(logs)
e2elog.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, ipcutilsPodName)
framework.Logf("Got shared memory IDs %q from pod %q", podSharedMemoryIDs, ipcutilsPodName)
if strings.Contains(podSharedMemoryIDs, hostSharedMemoryID) {
e2elog.Failf("non-hostIPC container should not show shared memory IDs on host")
framework.Failf("non-hostIPC container should not show shared memory IDs on host")
}
})
@ -268,7 +267,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
if hostSharedMemoryID != "" {
_, err := exec.Command("sh", "-c", fmt.Sprintf("ipcrm -m %q", hostSharedMemoryID)).Output()
if err != nil {
e2elog.Failf("Failed to remove shared memory %q on the host: %v", hostSharedMemoryID, err)
framework.Failf("Failed to remove shared memory %q on the host: %v", hostSharedMemoryID, err)
}
}
})
@ -310,11 +309,11 @@ var _ = framework.KubeDescribe("Security Context", func() {
ginkgo.BeforeEach(func() {
l, err = net.Listen("tcp", ":0")
if err != nil {
e2elog.Failf("Failed to open a new tcp port: %v", err)
framework.Failf("Failed to open a new tcp port: %v", err)
}
addr := strings.Split(l.Addr().String(), ":")
listeningPort = addr[len(addr)-1]
e2elog.Logf("Opened a new tcp port %q", listeningPort)
framework.Logf("Opened a new tcp port %q", listeningPort)
})
ginkgo.It("should listen on same port in the host network containers [NodeFeature:HostAccess]", func() {
@ -322,12 +321,12 @@ var _ = framework.KubeDescribe("Security Context", func() {
createAndWaitHostNetworkPod(busyboxPodName, true)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
e2elog.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
e2elog.Logf("Got logs for pod %q: %q", busyboxPodName, logs)
framework.Logf("Got logs for pod %q: %q", busyboxPodName, logs)
if !strings.Contains(logs, listeningPort) {
e2elog.Failf("host-networked container should listening on same port as host")
framework.Failf("host-networked container should listening on same port as host")
}
})
@ -336,12 +335,12 @@ var _ = framework.KubeDescribe("Security Context", func() {
createAndWaitHostNetworkPod(busyboxPodName, false)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, busyboxPodName, busyboxPodName)
if err != nil {
e2elog.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
framework.Failf("GetPodLogs for pod %q failed: %v", busyboxPodName, err)
}
e2elog.Logf("Got logs for pod %q: %q", busyboxPodName, logs)
framework.Logf("Got logs for pod %q: %q", busyboxPodName, logs)
if strings.Contains(logs, listeningPort) {
e2elog.Failf("non-hostnetworked container shouldn't show the same port as host")
framework.Failf("non-hostnetworked container shouldn't show the same port as host")
}
})
@ -388,12 +387,12 @@ var _ = framework.KubeDescribe("Security Context", func() {
podName := createAndWaitUserPod(true)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
e2elog.Failf("GetPodLogs for pod %q failed: %v", podName, err)
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}
e2elog.Logf("Got logs for pod %q: %q", podName, logs)
framework.Logf("Got logs for pod %q: %q", podName, logs)
if strings.Contains(logs, "Operation not permitted") {
e2elog.Failf("privileged container should be able to create dummy device")
framework.Failf("privileged container should be able to create dummy device")
}
})
})

View File

@ -26,7 +26,6 @@ import (
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -182,9 +181,9 @@ var _ = framework.KubeDescribe("StartupProbe [Serial] [Disruptive] [NodeFeature:
startedTime, err := common.GetContainerStartedTime(p, "busybox")
framework.ExpectNoError(err)
e2elog.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
if readyTime.Sub(startedTime) < 40*time.Second {
e2elog.Failf("Pod became ready before startupProbe succeeded")
framework.Failf("Pod became ready before startupProbe succeeded")
}
})
})

View File

@ -28,7 +28,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/volume"
systemdutil "github.com/coreos/go-systemd/util"
@ -46,7 +45,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
return
}
if framework.TestContext.DumpLogsOnFailure {
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
}
ginkgo.By("Recording processes in system cgroups")
recordSystemCgroupProcesses()
@ -152,7 +151,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
}
// The Kubelet only manages the 'misc' system container if the host is not running systemd.
if !systemdutil.IsRunningSystemd() {
e2elog.Logf("Host not running systemd; expecting 'misc' system container.")
framework.Logf("Host not running systemd; expecting 'misc' system container.")
miscContExpectations := sysContExpectations().(*gstruct.FieldsMatcher)
// Misc processes are system-dependent, so relax the memory constraints.
miscContExpectations.Fields["Memory"] = ptrMatchAllFields(gstruct.Fields{
@ -391,7 +390,7 @@ func summaryObjectID(element interface{}) string {
case kubeletstatsv1alpha1.UserDefinedMetric:
return el.Name
default:
e2elog.Failf("Unknown type: %T", el)
framework.Failf("Unknown type: %T", el)
return "???"
}
}
@ -419,7 +418,7 @@ func recent(d time.Duration) types.GomegaMatcher {
func recordSystemCgroupProcesses() {
cfg, err := getCurrentKubeletConfig()
if err != nil {
e2elog.Logf("Failed to read kubelet config: %v", err)
framework.Logf("Failed to read kubelet config: %v", err)
return
}
cgroups := map[string]string{
@ -428,24 +427,24 @@ func recordSystemCgroupProcesses() {
}
for name, cgroup := range cgroups {
if cgroup == "" {
e2elog.Logf("Skipping unconfigured cgroup %s", name)
framework.Logf("Skipping unconfigured cgroup %s", name)
continue
}
pids, err := ioutil.ReadFile(fmt.Sprintf("/sys/fs/cgroup/cpu/%s/cgroup.procs", cgroup))
if err != nil {
e2elog.Logf("Failed to read processes in cgroup %s: %v", name, err)
framework.Logf("Failed to read processes in cgroup %s: %v", name, err)
continue
}
e2elog.Logf("Processes in %s cgroup (%s):", name, cgroup)
framework.Logf("Processes in %s cgroup (%s):", name, cgroup)
for _, pid := range strings.Fields(string(pids)) {
path := fmt.Sprintf("/proc/%s/cmdline", pid)
cmd, err := ioutil.ReadFile(path)
if err != nil {
e2elog.Logf(" ginkgo.Failed to read %s: %v", path, err)
framework.Logf(" ginkgo.Failed to read %s: %v", path, err)
} else {
e2elog.Logf(" %s", cmd)
framework.Logf(" %s", cmd)
}
}
}

View File

@ -28,7 +28,6 @@ import (
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -79,7 +78,7 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
return nil
}
msg := fmt.Sprintf("NodeCondition: %s not encountered yet", v1.NodeDiskPressure)
e2elog.Logf(msg)
framework.Logf(msg)
return fmt.Errorf(msg)
}, time.Minute*2, time.Second*4).Should(gomega.BeNil())
@ -87,9 +86,9 @@ var _ = framework.KubeDescribe("SystemNodeCriticalPod [Slow] [Serial] [Disruptiv
gomega.Consistently(func() error {
err := checkMirrorPodRunning(f.ClientSet, mirrorPodName, ns)
if err == nil {
e2elog.Logf("mirror pod %q is running", mirrorPodName)
framework.Logf("mirror pod %q is running", mirrorPodName)
} else {
e2elog.Logf(err.Error())
framework.Logf(err.Error())
}
return err
}, time.Minute*8, time.Second*4).ShouldNot(gomega.HaveOccurred())

View File

@ -28,7 +28,6 @@ import (
"time"
"golang.org/x/net/context"
"k8s.io/klog"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
@ -38,6 +37,7 @@ import (
"k8s.io/client-go/kubernetes/scheme"
"k8s.io/component-base/featuregate"
internalapi "k8s.io/cri-api/pkg/apis"
"k8s.io/klog"
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
"k8s.io/kubernetes/pkg/features"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
@ -50,7 +50,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet/remote"
"k8s.io/kubernetes/pkg/kubelet/util"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
frameworkmetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -335,13 +334,13 @@ func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfigura
}
func logPodEvents(f *framework.Framework) {
e2elog.Logf("Summary of pod events during the test:")
framework.Logf("Summary of pod events during the test:")
err := framework.ListNamespaceEvents(f.ClientSet, f.Namespace.Name)
framework.ExpectNoError(err)
}
func logNodeEvents(f *framework.Framework) {
e2elog.Logf("Summary of node events during the test:")
framework.Logf("Summary of node events during the test:")
err := framework.ListNamespaceEvents(f.ClientSet, "")
framework.ExpectNoError(err)
}
@ -362,9 +361,9 @@ func logKubeletLatencyMetrics(metricNames ...string) {
}
metric, err := frameworkmetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
if err != nil {
e2elog.Logf("Error getting kubelet metrics: %v", err)
framework.Logf("Error getting kubelet metrics: %v", err)
} else {
e2elog.Logf("Kubelet Metrics: %+v", e2emetrics.GetKubeletLatencyMetrics(metric, metricSet))
framework.Logf("Kubelet Metrics: %+v", e2emetrics.GetKubeletLatencyMetrics(metric, metricSet))
}
}
@ -426,7 +425,7 @@ func restartKubelet() {
matches := regex.FindStringSubmatch(string(stdout))
framework.ExpectNotEqual(len(matches), 0)
kube := matches[0]
e2elog.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube)
framework.Logf("Get running kubelet with systemctl: %v, %v", string(stdout), kube)
stdout, err = exec.Command("sudo", "systemctl", "restart", kube).CombinedOutput()
framework.ExpectNoError(err, "Failed to restart kubelet with systemctl: %v, %v", err, stdout)
}

View File

@ -13,7 +13,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

View File

@ -28,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
)
// CleanCrdFn declares the clean up function needed to remove the CRD
@ -55,17 +54,17 @@ func CreateMultiVersionTestCRD(f *framework.Framework, group string, opts ...Opt
// Creating a custom resource definition for use by assorted tests.
config, err := framework.LoadConfig()
if err != nil {
e2elog.Failf("failed to load config: %v", err)
framework.Failf("failed to load config: %v", err)
return nil, err
}
apiExtensionClient, err := crdclientset.NewForConfig(config)
if err != nil {
e2elog.Failf("failed to initialize apiExtensionClient: %v", err)
framework.Failf("failed to initialize apiExtensionClient: %v", err)
return nil, err
}
dynamicClient, err := dynamic.NewForConfig(config)
if err != nil {
e2elog.Failf("failed to initialize dynamic client: %v", err)
framework.Failf("failed to initialize dynamic client: %v", err)
return nil, err
}
@ -102,7 +101,7 @@ func CreateMultiVersionTestCRD(f *framework.Framework, group string, opts ...Opt
//create CRD and waits for the resource to be recognized and available.
crd, err = fixtures.CreateNewV1CustomResourceDefinitionWatchUnsafe(crd, apiExtensionClient)
if err != nil {
e2elog.Failf("failed to create CustomResourceDefinition: %v", err)
framework.Failf("failed to create CustomResourceDefinition: %v", err)
return nil, err
}
@ -120,7 +119,7 @@ func CreateMultiVersionTestCRD(f *framework.Framework, group string, opts ...Opt
testcrd.CleanUp = func() error {
err := fixtures.DeleteV1CustomResourceDefinition(crd, apiExtensionClient)
if err != nil {
e2elog.Failf("failed to delete CustomResourceDefinition(%s): %v", name, err)
framework.Failf("failed to delete CustomResourceDefinition(%s): %v", name, err)
}
return err
}