use log functions of core framework

This commit is contained in:
YuikoTakada 2019-09-26 02:45:07 +00:00
parent 45f7f70479
commit 9ed2f632cc
18 changed files with 142 additions and 160 deletions

View File

@ -79,7 +79,6 @@ go_library(
"//staging/src/k8s.io/client-go/tools/watch:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",

View File

@ -32,7 +32,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/framework/replicaset"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
testutils "k8s.io/kubernetes/test/utils"
@ -171,19 +170,19 @@ func newResourceConsumer(name, nsName string, kind schema.GroupVersionKind, repl
// ConsumeCPU consumes given number of CPU
func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
e2elog.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
rc.cpu <- millicores
}
// ConsumeMem consumes given number of Mem
func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
e2elog.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
rc.mem <- megabytes
}
// ConsumeMem consumes given number of custom metric
func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
e2elog.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
rc.customMetric <- amount
}
@ -196,13 +195,13 @@ func (rc *ResourceConsumer) makeConsumeCPURequests() {
for {
select {
case millicores = <-rc.cpu:
e2elog.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores)
framework.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores)
case <-time.After(sleepTime):
e2elog.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores)
framework.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores)
rc.sendConsumeCPURequest(millicores)
sleepTime = rc.sleepTime
case <-rc.stopCPU:
e2elog.Logf("RC %s: stopping CPU consumer", rc.name)
framework.Logf("RC %s: stopping CPU consumer", rc.name)
return
}
}
@ -217,13 +216,13 @@ func (rc *ResourceConsumer) makeConsumeMemRequests() {
for {
select {
case megabytes = <-rc.mem:
e2elog.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes)
framework.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes)
case <-time.After(sleepTime):
e2elog.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes)
framework.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes)
rc.sendConsumeMemRequest(megabytes)
sleepTime = rc.sleepTime
case <-rc.stopMem:
e2elog.Logf("RC %s: stopping mem consumer", rc.name)
framework.Logf("RC %s: stopping mem consumer", rc.name)
return
}
}
@ -238,13 +237,13 @@ func (rc *ResourceConsumer) makeConsumeCustomMetric() {
for {
select {
case delta = <-rc.customMetric:
e2elog.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
framework.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
case <-time.After(sleepTime):
e2elog.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
rc.sendConsumeCustomMetric(delta)
sleepTime = rc.sleepTime
case <-rc.stopCustomMetric:
e2elog.Logf("RC %s: stopping metric consumer", rc.name)
framework.Logf("RC %s: stopping metric consumer", rc.name)
return
}
}
@ -264,10 +263,10 @@ func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
Param("millicores", strconv.Itoa(millicores)).
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores))
e2elog.Logf("ConsumeCPU URL: %v", *req.URL())
framework.Logf("ConsumeCPU URL: %v", *req.URL())
_, err = req.DoRaw()
if err != nil {
e2elog.Logf("ConsumeCPU failure: %v", err)
framework.Logf("ConsumeCPU failure: %v", err)
return false, nil
}
return true, nil
@ -291,10 +290,10 @@ func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
Param("megabytes", strconv.Itoa(megabytes)).
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
e2elog.Logf("ConsumeMem URL: %v", *req.URL())
framework.Logf("ConsumeMem URL: %v", *req.URL())
_, err = req.DoRaw()
if err != nil {
e2elog.Logf("ConsumeMem failure: %v", err)
framework.Logf("ConsumeMem failure: %v", err)
return false, nil
}
return true, nil
@ -319,10 +318,10 @@ func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
Param("delta", strconv.Itoa(delta)).
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric))
e2elog.Logf("ConsumeCustomMetric URL: %v", *req.URL())
framework.Logf("ConsumeCustomMetric URL: %v", *req.URL())
_, err = req.DoRaw()
if err != nil {
e2elog.Logf("ConsumeCustomMetric failure: %v", err)
framework.Logf("ConsumeCustomMetric failure: %v", err)
return false, nil
}
return true, nil
@ -336,25 +335,25 @@ func (rc *ResourceConsumer) GetReplicas() int {
replicationController, err := rc.clientSet.CoreV1().ReplicationControllers(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if replicationController == nil {
e2elog.Failf(rcIsNil)
framework.Failf(rcIsNil)
}
return int(replicationController.Status.ReadyReplicas)
case KindDeployment:
deployment, err := rc.clientSet.AppsV1().Deployments(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if deployment == nil {
e2elog.Failf(deploymentIsNil)
framework.Failf(deploymentIsNil)
}
return int(deployment.Status.ReadyReplicas)
case KindReplicaSet:
rs, err := rc.clientSet.AppsV1().ReplicaSets(rc.nsName).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if rs == nil {
e2elog.Failf(rsIsNil)
framework.Failf(rsIsNil)
}
return int(rs.Status.ReadyReplicas)
default:
e2elog.Failf(invalidKind)
framework.Failf(invalidKind)
}
return 0
}
@ -367,7 +366,7 @@ func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int, duration time.D
interval := 20 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
replicas := rc.GetReplicas()
e2elog.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
})
framework.ExpectNoErrorWithOffset(1, err, "timeout waiting %v for %d replicas", duration, desiredReplicas)
@ -381,12 +380,12 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, max
interval := 10 * time.Second
err := wait.PollImmediate(interval, duration, func() (bool, error) {
replicas := rc.GetReplicas()
e2elog.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
framework.Logf("expecting there to be in [%d, %d] replicas (are: %d)", minDesiredReplicas, maxDesiredReplicas, replicas)
as, err := rc.GetHpa(hpaName)
if err != nil {
e2elog.Logf("Error getting HPA: %s", err)
framework.Logf("Error getting HPA: %s", err)
} else {
e2elog.Logf("HPA status: %+v", as.Status)
framework.Logf("HPA status: %+v", as.Status)
}
if replicas < minDesiredReplicas {
return false, fmt.Errorf("number of replicas below target")
@ -398,7 +397,7 @@ func (rc *ResourceConsumer) EnsureDesiredReplicasInRange(minDesiredReplicas, max
})
// The call above always returns an error, but if it is timeout, it's OK (condition satisfied all the time).
if err == wait.ErrWaitTimeout {
e2elog.Logf("Number of replicas was stable over %v", duration)
framework.Logf("Number of replicas was stable over %v", duration)
return
}
framework.ExpectNoErrorWithOffset(1, err)
@ -491,7 +490,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
framework.ExpectNoError(replicaset.RunReplicaSet(rsConfig))
break
default:
e2elog.Failf(invalidKind)
framework.Failf(invalidKind)
}
ginkgo.By(fmt.Sprintf("Running controller"))

View File

@ -23,7 +23,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -43,7 +42,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
@ -91,7 +90,7 @@ var _ = ginkgo.Describe("[sig-node] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{

View File

@ -26,7 +26,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -140,7 +139,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
@ -227,7 +226,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
@ -345,12 +344,12 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
}
pod := &v1.Pod{
@ -465,7 +464,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
}
ginkgo.By("waiting to observe update in volume")
@ -493,7 +492,7 @@ var _ = ginkgo.Describe("[sig-storage] ConfigMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
@ -601,7 +600,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
one := int64(1)
@ -678,7 +677,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
one := int64(1)
@ -813,7 +812,7 @@ func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMount
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
//creating a pod with configMap object, but with different key which is not present in configMap object.
pod := &v1.Pod{

View File

@ -29,7 +29,6 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
@ -76,10 +75,10 @@ var _ = framework.KubeDescribe("Probing container", func() {
startedTime, err := GetContainerStartedTime(p, containerName)
framework.ExpectNoError(err)
e2elog.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
initialDelay := probeTestInitialDelaySeconds * time.Second
if readyTime.Sub(startedTime) < initialDelay {
e2elog.Failf("Pod became ready before it's %v initial delay", initialDelay)
framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
}
restartCount := getRestartCount(p)
@ -422,14 +421,14 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
// 'Terminated' which can cause indefinite blocking.)
framework.ExpectNoError(e2epod.WaitForPodNotPending(f.ClientSet, ns, pod.Name),
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
e2elog.Logf("Started pod %s in namespace %s", pod.Name, ns)
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
// Check the pod's current state and verify that restartCount is present.
ginkgo.By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
e2elog.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
// Wait for the restart state to be as desired.
deadline := time.Now().Add(timeout)
@ -440,10 +439,10 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
restartCount := podutil.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount {
e2elog.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
ns, pod.Name, restartCount, time.Since(start))
if restartCount < lastRestartCount {
e2elog.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
ns, pod.Name, lastRestartCount, restartCount)
}
}
@ -459,7 +458,7 @@ func RunLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int,
// If we expected n restarts (n > 0), fail if we observed < n restarts.
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
int(observedRestarts) < expectNumRestarts) {
e2elog.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
ns, pod.Name, expectNumRestarts, observedRestarts)
}
}

View File

@ -25,7 +25,6 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -482,14 +481,14 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
cmd := "touch /volume_mount/mypath/foo/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil {
e2elog.Failf("expected to be able to write to subpath")
framework.Failf("expected to be able to write to subpath")
}
ginkgo.By("test for file in mounted path")
cmd = "test -f /subpath_mount/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil {
e2elog.Failf("expected to be able to verify file")
framework.Failf("expected to be able to verify file")
}
ginkgo.By("updating the annotation value")
@ -629,13 +628,13 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
cmd := "test -f /volume_mount/foo/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil {
e2elog.Failf("expected to be able to verify old file exists")
framework.Failf("expected to be able to verify old file exists")
}
cmd = "test ! -f /volume_mount/newsubpath/test.log"
_, _, err = f.ExecShellInPodWithFullOutput(pod.Name, cmd)
if err != nil {
e2elog.Failf("expected to be able to verify new file does not exist")
framework.Failf("expected to be able to verify new file does not exist")
}
})
})
@ -660,7 +659,7 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
ginkgo.By("Failing liveness probe")
stdout, stderr, err := f.ExecShellInPodWithFullOutput(pod.Name, fmt.Sprintf("rm %v", volumeMount))
e2elog.Logf("Pod exec output: %v / %v", stdout, stderr)
framework.Logf("Pod exec output: %v / %v", stdout, stderr)
framework.ExpectNoError(err, "while failing liveness probe")
// Check that container has restarted
@ -673,10 +672,10 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == pod.Spec.Containers[0].Name {
e2elog.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
restarts = status.RestartCount
if restarts > 0 {
e2elog.Logf("Container has restart count: %v", restarts)
framework.Logf("Container has restart count: %v", restarts)
return true, nil
}
}
@ -688,7 +687,7 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
// Fix liveness probe
ginkgo.By("Rewriting the file")
stdout, _, err = f.ExecShellInPodWithFullOutput(pod.Name, fmt.Sprintf("echo test-after > %v", volumeMount))
e2elog.Logf("Pod exec output: %v", stdout)
framework.Logf("Pod exec output: %v", stdout)
framework.ExpectNoError(err, "while rewriting the probe file")
// Wait for container restarts to stabilize
@ -705,13 +704,13 @@ func waitForPodContainerRestart(f *framework.Framework, pod *v1.Pod, volumeMount
if status.RestartCount == restarts {
stableCount++
if stableCount > stableThreshold {
e2elog.Logf("Container restart has stabilized")
framework.Logf("Container restart has stabilized")
return true, nil
}
} else {
restarts = status.RestartCount
stableCount = 0
e2elog.Logf("Container has restart count: %v", restarts)
framework.Logf("Container has restart count: %v", restarts)
}
break
}

View File

@ -31,7 +31,6 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -88,7 +87,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
},
},
}
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
framework.ExpectNoError(err, "error watching a pod")
@ -159,7 +158,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
},
},
}
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
framework.ExpectNoError(err, "error watching a pod")
@ -231,7 +230,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
},
},
}
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))
framework.ExpectNoError(err, "error watching a pod")
@ -281,7 +280,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
if status.RestartCount < 3 {
return false, nil
}
e2elog.Logf("init container has failed twice: %#v", t)
framework.Logf("init container has failed twice: %#v", t)
// TODO: more conditions
return true, nil
default:
@ -348,7 +347,7 @@ var _ = framework.KubeDescribe("InitContainer [NodeConformance]", func() {
},
},
}
e2elog.Logf("PodSpec: initContainers in spec.initContainers")
framework.Logf("PodSpec: initContainers in spec.initContainers")
startedPod := podClient.Create(pod)
w, err := podClient.Watch(metav1.SingleObject(startedPod.ObjectMeta))

View File

@ -25,7 +25,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -137,11 +136,11 @@ func assertManagedStatus(
}
if expectedIsManaged {
e2elog.Failf(
framework.Failf(
"/etc/hosts file should be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
name, retryCount, etcHostsContent)
} else {
e2elog.Failf(
framework.Failf(
"/etc/hosts file should no be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
name, retryCount, etcHostsContent)
}

View File

@ -28,7 +28,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
testutils "k8s.io/kubernetes/test/utils"
@ -117,23 +116,23 @@ var _ = framework.KubeDescribe("NodeLease", func() {
if currentHeartbeatTime == lastHeartbeatTime {
if currentObserved.Sub(lastObserved) > 2*leaseDuration {
// heartbeat hasn't changed while watching for at least 2*leaseDuration, success!
e2elog.Logf("node status heartbeat is unchanged for %s, was waiting for at least %s, success!", currentObserved.Sub(lastObserved), 2*leaseDuration)
framework.Logf("node status heartbeat is unchanged for %s, was waiting for at least %s, success!", currentObserved.Sub(lastObserved), 2*leaseDuration)
return true, nil
}
e2elog.Logf("node status heartbeat is unchanged for %s, waiting for %s", currentObserved.Sub(lastObserved), 2*leaseDuration)
framework.Logf("node status heartbeat is unchanged for %s, waiting for %s", currentObserved.Sub(lastObserved), 2*leaseDuration)
return false, nil
}
if currentHeartbeatTime.Sub(lastHeartbeatTime) >= leaseDuration {
// heartbeat time changed, but the diff was greater than leaseDuration, success!
e2elog.Logf("node status heartbeat changed in %s, was waiting for at least %s, success!", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
framework.Logf("node status heartbeat changed in %s, was waiting for at least %s, success!", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
return true, nil
}
if !apiequality.Semantic.DeepEqual(lastStatus, currentStatus) {
// heartbeat time changed, but there were relevant changes in the status, keep waiting
e2elog.Logf("node status heartbeat changed in %s (with other status changes), waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
e2elog.Logf("%s", diff.ObjectReflectDiff(lastStatus, currentStatus))
framework.Logf("node status heartbeat changed in %s (with other status changes), waiting for %s", currentHeartbeatTime.Sub(lastHeartbeatTime), leaseDuration)
framework.Logf("%s", diff.ObjectReflectDiff(lastStatus, currentStatus))
lastHeartbeatTime = currentHeartbeatTime
lastObserved = currentObserved
lastStatus = currentStatus

View File

@ -41,7 +41,6 @@ import (
"k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -70,14 +69,14 @@ func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
p, err := podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Failed to get pod %q", pod.Name)
if p.Status.HostIP != "" {
e2elog.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
break
}
if time.Since(t) >= hostIPTimeout {
e2elog.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
p.Name, time.Since(t).Seconds())
}
e2elog.Logf("Retrying to get the hostIP of pod %s", p.Name)
framework.Logf("Retrying to get the hostIP of pod %s", p.Name)
time.Sleep(5 * time.Second)
}
}
@ -92,19 +91,19 @@ func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAm
ginkgo.By("getting restart delay-0")
_, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
ginkgo.By("getting restart delay-1")
delay1, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
ginkgo.By("getting restart delay-2")
delay2, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
return delay1, delay2
}
@ -119,13 +118,13 @@ func getRestartDelay(podClient *framework.PodClient, podName string, containerNa
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
status, ok := podutil.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
if !ok {
e2elog.Logf("getRestartDelay: status missing")
framework.Logf("getRestartDelay: status missing")
continue
}
// the only case this happens is if this is the first time the Pod is running and there is no "Last State".
if status.LastTerminationState.Terminated == nil {
e2elog.Logf("Container's last state is not \"Terminated\".")
framework.Logf("Container's last state is not \"Terminated\".")
continue
}
@ -152,7 +151,7 @@ func getRestartDelay(podClient *framework.PodClient, podName string, containerNa
} else {
startedAt = status.LastTerminationState.Terminated.StartedAt.Time
}
e2elog.Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, previousFinishedAt, startedAt, startedAt.Sub(previousFinishedAt))
framework.Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, previousFinishedAt, startedAt, startedAt.Sub(previousFinishedAt))
return startedAt.Sub(previousFinishedAt), nil
}
}
@ -234,10 +233,10 @@ var _ = framework.KubeDescribe("Pods", func() {
if err == nil {
select {
case listCompleted <- true:
e2elog.Logf("observed the pod list")
framework.Logf("observed the pod list")
return podList, err
default:
e2elog.Logf("channel blocked")
framework.Logf("channel blocked")
}
}
return podList, err
@ -266,13 +265,13 @@ var _ = framework.KubeDescribe("Pods", func() {
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
e2elog.Failf("Failed to observe pod creation: %v", event)
framework.Failf("Failed to observe pod creation: %v", event)
}
case <-time.After(framework.PodStartTimeout):
e2elog.Failf("Timeout while waiting for pod creation")
framework.Failf("Timeout while waiting for pod creation")
}
case <-time.After(10 * time.Second):
e2elog.Failf("Timeout while waiting to observe pod list")
framework.Failf("Timeout while waiting to observe pod list")
}
// We need to wait for the pod to be running, otherwise the deletion
@ -290,7 +289,7 @@ var _ = framework.KubeDescribe("Pods", func() {
err = wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
podList, err := e2ekubelet.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
if err != nil {
e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
return false, nil
}
for _, kubeletPod := range podList.Items {
@ -298,12 +297,12 @@ var _ = framework.KubeDescribe("Pods", func() {
continue
}
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
e2elog.Logf("deletion has not yet been observed")
framework.Logf("deletion has not yet been observed")
return false, nil
}
return true, nil
}
e2elog.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
return true, nil
})
framework.ExpectNoError(err, "kubelet never observed the termination notice")
@ -320,15 +319,15 @@ var _ = framework.KubeDescribe("Pods", func() {
lastPod = event.Object.(*v1.Pod)
deleted = true
case watch.Error:
e2elog.Logf("received a watch error: %v", event.Object)
e2elog.Failf("watch closed with error")
framework.Logf("received a watch error: %v", event.Object)
framework.Failf("watch closed with error")
}
case <-timer:
e2elog.Failf("timed out waiting for pod deletion")
framework.Failf("timed out waiting for pod deletion")
}
}
if !deleted {
e2elog.Failf("Failed to observe pod deletion")
framework.Failf("Failed to observe pod deletion")
}
gomega.Expect(lastPod.DeletionTimestamp).ToNot(gomega.BeNil())
@ -392,7 +391,7 @@ var _ = framework.KubeDescribe("Pods", func() {
pods, err = podClient.List(options)
framework.ExpectNoError(err, "failed to query for pods")
framework.ExpectEqual(len(pods.Items), 1)
e2elog.Logf("Pod update OK")
framework.Logf("Pod update OK")
})
/*
@ -576,7 +575,7 @@ var _ = framework.KubeDescribe("Pods", func() {
url := req.URL()
ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
if err != nil {
e2elog.Failf("Failed to open websocket to %s: %v", url.String(), err)
framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
}
defer ws.Close()
@ -588,7 +587,7 @@ var _ = framework.KubeDescribe("Pods", func() {
if err == io.EOF {
break
}
e2elog.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
}
if len(msg) == 0 {
continue
@ -598,7 +597,7 @@ var _ = framework.KubeDescribe("Pods", func() {
// skip an empty message on stream other than stdout
continue
} else {
e2elog.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg)
framework.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg)
}
}
@ -655,7 +654,7 @@ var _ = framework.KubeDescribe("Pods", func() {
ws, err := framework.OpenWebSocketForURL(url, config, []string{"binary.k8s.io"})
if err != nil {
e2elog.Failf("Failed to open websocket to %s: %v", url.String(), err)
framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
}
defer ws.Close()
buf := &bytes.Buffer{}
@ -665,7 +664,7 @@ var _ = framework.KubeDescribe("Pods", func() {
if err == io.EOF {
break
}
e2elog.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
}
if len(strings.TrimSpace(string(msg))) == 0 {
continue
@ -673,7 +672,7 @@ var _ = framework.KubeDescribe("Pods", func() {
buf.Write(msg)
}
if buf.String() != "container is alive\n" {
e2elog.Failf("Unexpected websocket logs:\n%s", buf.String())
framework.Failf("Unexpected websocket logs:\n%s", buf.String())
}
})
@ -710,11 +709,11 @@ var _ = framework.KubeDescribe("Pods", func() {
ginkgo.By("get restart delay after image update")
delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
if delayAfterUpdate > 2*delay2 || delayAfterUpdate > 2*delay1 {
e2elog.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2)
framework.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2)
}
})
@ -750,7 +749,7 @@ var _ = framework.KubeDescribe("Pods", func() {
for i := 0; i < 3; i++ {
delay1, err = getRestartDelay(podClient, podName, containerName)
if err != nil {
e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
if delay1 < kubelet.MaxContainerBackOff {
@ -759,17 +758,17 @@ var _ = framework.KubeDescribe("Pods", func() {
}
if (delay1 < kubelet.MaxContainerBackOff) || (delay1 > maxBackOffTolerance) {
e2elog.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1)
framework.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1)
}
ginkgo.By("getting restart delay after a capped delay")
delay2, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
e2elog.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
if delay2 < kubelet.MaxContainerBackOff || delay2 > maxBackOffTolerance { // syncloop cumulative drift
e2elog.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
}
})
@ -804,7 +803,7 @@ var _ = framework.KubeDescribe("Pods", func() {
podReady := podClient.PodIsReady(podName)
res := expectReady == podReady
if !res {
e2elog.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady)
framework.Logf("Expect the Ready condition of pod %q to be %v, but got %v", podName, expectReady, podReady)
}
return res, nil
})

View File

@ -23,7 +23,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -64,11 +63,11 @@ var _ = ginkgo.Describe("[sig-storage] Projected combined", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := projectedAllVolumeBasePod(podName, secretName, configMapName, nil, nil)

View File

@ -24,7 +24,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -140,7 +139,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
@ -255,12 +254,12 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
var err error
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(deleteConfigMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(updateConfigMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
}
pod := &v1.Pod{
@ -393,7 +392,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(createConfigMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
}
ginkgo.By("waiting to observe update in volume")
@ -421,7 +420,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected configMap", func() {
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
@ -528,7 +527,7 @@ func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
@ -610,7 +609,7 @@ func doProjectedConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup in
var err error
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
e2elog.Failf("unable to create test configMap %s: %v", configMap.Name, err)
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{

View File

@ -24,7 +24,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -96,7 +95,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
)
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
e2elog.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
}
secret2 := secretForTest(namespace2.Name, secret2Name)
@ -104,7 +103,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
}
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil {
e2elog.Failf("unable to create test secret %s: %v", secret2.Name, err)
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
}
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
})
@ -130,7 +129,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
@ -257,12 +256,12 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
}
pod := &v1.Pod{
@ -395,7 +394,7 @@ var _ = ginkgo.Describe("[sig-storage] Projected secret", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", createSecret.Name, err)
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
}
ginkgo.By("waiting to observe update in volume")
@ -437,7 +436,7 @@ func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
@ -515,7 +514,7 @@ func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{

View File

@ -25,7 +25,6 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -157,7 +156,7 @@ while true; do sleep 1; done
framework.ExpectEqual(GetContainerState(status.State), ContainerStateTerminated)
ginkgo.By("the termination message should be set")
e2elog.Logf("Expected: %v to match Container's Termination Message: %v --", expectedMsg, status.State.Terminated.Message)
framework.Logf("Expected: %v to match Container's Termination Message: %v --", expectedMsg, status.State.Terminated.Message)
gomega.Expect(status.State.Terminated.Message).Should(expectedMsg)
ginkgo.By("delete the container")
@ -348,9 +347,9 @@ while true; do sleep 1; done
break
}
if i < flakeRetry {
e2elog.Logf("No.%d attempt failed: %v, retrying...", i, err)
framework.Logf("No.%d attempt failed: %v, retrying...", i, err)
} else {
e2elog.Failf("All %d attempts failed: %v", flakeRetry, err)
framework.Failf("All %d attempts failed: %v", flakeRetry, err)
}
}
}

View File

@ -23,7 +23,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -44,7 +43,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
@ -92,7 +91,7 @@ var _ = ginkgo.Describe("[sig-api-machinery] Secrets", func() {
ginkgo.By(fmt.Sprintf("creating secret %v/%v", f.Namespace.Name, secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{

View File

@ -24,7 +24,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
@ -101,7 +100,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
)
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
e2elog.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
}
secret2 := secretForTest(namespace2.Name, secret2Name)
@ -109,7 +108,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
}
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(secret2); err != nil {
e2elog.Failf("unable to create test secret %s: %v", secret2.Name, err)
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
}
doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
})
@ -135,7 +134,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
@ -246,12 +245,12 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
var err error
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(deleteSecret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
}
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(updateSecret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
}
pod := &v1.Pod{
@ -360,7 +359,7 @@ var _ = ginkgo.Describe("[sig-storage] Secrets", func() {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(createSecret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", createSecret.Name, err)
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
}
ginkgo.By("waiting to observe update in volume")
@ -416,7 +415,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
@ -485,7 +484,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
@ -603,7 +602,7 @@ func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPat
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", secret.Name, err)
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
//creating a pod with secret object, with the key which is not present in secret object.
pod := &v1.Pod{

View File

@ -25,7 +25,6 @@ import (
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image"
"k8s.io/utils/pointer"
@ -264,12 +263,12 @@ var _ = framework.KubeDescribe("Security Context", func() {
podName := createAndWaitUserPod(false)
logs, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName)
if err != nil {
e2elog.Failf("GetPodLogs for pod %q failed: %v", podName, err)
framework.Failf("GetPodLogs for pod %q failed: %v", podName, err)
}
e2elog.Logf("Got logs for pod %q: %q", podName, logs)
framework.Logf("Got logs for pod %q: %q", podName, logs)
if !strings.Contains(logs, "Operation not permitted") {
e2elog.Failf("unprivileged container shouldn't be able to create dummy device")
framework.Failf("unprivileged container shouldn't be able to create dummy device")
}
})
})
@ -316,7 +315,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
ginkgo.It("should allow privilege escalation when not explicitly set and uid != 0 [LinuxOnly] [NodeConformance]", func() {
podName := "alpine-nnp-nil-" + string(uuid.NewUUID())
if err := createAndMatchOutput(podName, "Effective uid: 0", nil, 1000); err != nil {
e2elog.Failf("Match output for pod %q failed: %v", podName, err)
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
@ -332,7 +331,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
podName := "alpine-nnp-false-" + string(uuid.NewUUID())
apeFalse := false
if err := createAndMatchOutput(podName, "Effective uid: 1000", &apeFalse, 1000); err != nil {
e2elog.Failf("Match output for pod %q failed: %v", podName, err)
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
@ -349,7 +348,7 @@ var _ = framework.KubeDescribe("Security Context", func() {
podName := "alpine-nnp-true-" + string(uuid.NewUUID())
apeTrue := true
if err := createAndMatchOutput(podName, "Effective uid: 0", &apeTrue, 1000); err != nil {
e2elog.Failf("Match output for pod %q failed: %v", podName, err)
framework.Failf("Match output for pod %q failed: %v", podName, err)
}
})
})

View File

@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -104,11 +103,11 @@ func SubstituteImageName(content string) string {
contentWithImageName := new(bytes.Buffer)
tmpl, err := template.New("imagemanifest").Parse(content)
if err != nil {
e2elog.Failf("Failed Parse the template: %v", err)
framework.Failf("Failed Parse the template: %v", err)
}
err = tmpl.Execute(contentWithImageName, testImages)
if err != nil {
e2elog.Failf("Failed executing template: %v", err)
framework.Failf("Failed executing template: %v", err)
}
return contentWithImageName.String()
}