Merge pull request #83013 from clarklee92/use-log-functions-of-e2e/upgrades

Use log functions of core framework on test/e2e/upgrades
This commit is contained in:
Kubernetes Prow Robot 2019-09-24 00:25:41 -07:00 committed by GitHub
commit eccb813183
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 49 additions and 61 deletions

View File

@ -37,7 +37,6 @@ go_library(
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/statefulset:go_default_library",
"//test/e2e/framework/testfiles:go_default_library",

View File

@ -21,7 +21,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
"github.com/onsi/gomega"
@ -80,7 +79,7 @@ func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything
ginkgo.By("Logging container failures")
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
}
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {

View File

@ -29,7 +29,6 @@ go_library(
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/statefulset:go_default_library",
"//test/e2e/upgrades:go_default_library",

View File

@ -26,7 +26,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/upgrades"
)
@ -81,7 +80,7 @@ func (t *DaemonSetUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By("Creating a DaemonSet")
var err error
if t.daemonSet, err = f.ClientSet.AppsV1().DaemonSets(ns.Name).Create(t.daemonSet); err != nil {
e2elog.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err)
framework.Failf("unable to create test DaemonSet %s: %v", t.daemonSet.Name, err)
}
ginkgo.By("Waiting for DaemonSet pods to become ready")
@ -114,7 +113,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework)
res, err := checkRunningOnAllNodes(f, t.daemonSet.Namespace, t.daemonSet.Labels)
framework.ExpectNoError(err)
if !res {
e2elog.Failf("expected DaemonSet pod to be running on all nodes, it was not")
framework.Failf("expected DaemonSet pod to be running on all nodes, it was not")
}
// DaemonSet resource itself should be good
@ -122,7 +121,7 @@ func (t *DaemonSetUpgradeTest) validateRunningDaemonSet(f *framework.Framework)
res, err = checkDaemonStatus(f, t.daemonSet.Namespace, t.daemonSet.Name)
framework.ExpectNoError(err)
if !res {
e2elog.Failf("expected DaemonSet to be in a good state, it was not")
framework.Failf("expected DaemonSet to be in a good state, it was not")
}
}
@ -135,7 +134,7 @@ func checkRunningOnAllNodes(f *framework.Framework, namespace string, selector m
nodeNames := make([]string, 0)
for _, node := range nodeList.Items {
if len(node.Spec.Taints) != 0 {
e2elog.Logf("Ignore taints %v on Node %v for DaemonSet Pod.", node.Spec.Taints, node.Name)
framework.Logf("Ignore taints %v on Node %v for DaemonSet Pod.", node.Spec.Taints, node.Name)
}
// DaemonSet Pods are expected to run on all the nodes in e2e.
nodeNames = append(nodeNames, node.Name)
@ -156,11 +155,11 @@ func checkDaemonPodOnNodes(f *framework.Framework, namespace string, labelSet ma
nodesToPodCount := make(map[string]int)
for _, pod := range pods {
if controller.IsPodActive(&pod) {
e2elog.Logf("Pod name: %v\t Node Name: %v", pod.Name, pod.Spec.NodeName)
framework.Logf("Pod name: %v\t Node Name: %v", pod.Name, pod.Spec.NodeName)
nodesToPodCount[pod.Spec.NodeName]++
}
}
e2elog.Logf("nodesToPodCount: %v", nodesToPodCount)
framework.Logf("nodesToPodCount: %v", nodesToPodCount)
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
for _, nodeName := range nodeNames {

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
)
@ -90,13 +89,13 @@ func (t *CassandraUpgradeTest) Setup(f *framework.Framework) {
return false, nil
}
if _, err := t.listUsers(); err != nil {
e2elog.Logf("Service endpoint is up but isn't responding")
framework.Logf("Service endpoint is up but isn't responding")
return false, nil
}
return true, nil
})
framework.ExpectNoError(err)
e2elog.Logf("Service endpoint is up")
framework.Logf("Service endpoint is up")
ginkgo.By("Adding 2 dummy users")
err = t.addUser("Alice")
@ -177,7 +176,7 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
go wait.Until(func() {
writeAttempts++
if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil {
e2elog.Logf("Unable to add user: %v", err)
framework.Logf("Unable to add user: %v", err)
mu.Lock()
errors[err.Error()]++
mu.Unlock()
@ -189,7 +188,7 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
wait.Until(func() {
users, err := t.listUsers()
if err != nil {
e2elog.Logf("Could not retrieve users: %v", err)
framework.Logf("Could not retrieve users: %v", err)
failures++
mu.Lock()
errors[err.Error()]++
@ -199,14 +198,14 @@ func (t *CassandraUpgradeTest) Test(f *framework.Framework, done <-chan struct{}
success++
lastUserCount = len(users)
}, 10*time.Millisecond, done)
e2elog.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
ratio := float64(success) / float64(success+failures)
e2elog.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
ratio = float64(t.successfulWrites) / float64(writeAttempts)
e2elog.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
e2elog.Logf("Errors: %v", errors)
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
framework.Logf("Errors: %v", errors)
// TODO(maisem): tweak this value once we have a few test runs.
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
}

View File

@ -22,7 +22,6 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -59,7 +58,7 @@ func (t *ConfigMapUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By("Creating a ConfigMap")
var err error
if t.configMap, err = f.ClientSet.CoreV1().ConfigMaps(ns.Name).Create(t.configMap); err != nil {
e2elog.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err)
framework.Failf("unable to create test ConfigMap %s: %v", t.configMap.Name, err)
}
ginkgo.By("Making sure the ConfigMap is consumable")

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
)
@ -85,13 +84,13 @@ func (t *EtcdUpgradeTest) Setup(f *framework.Framework) {
return false, nil
}
if _, err := t.listUsers(); err != nil {
e2elog.Logf("Service endpoint is up but isn't responding")
framework.Logf("Service endpoint is up but isn't responding")
return false, nil
}
return true, nil
})
framework.ExpectNoError(err)
e2elog.Logf("Service endpoint is up")
framework.Logf("Service endpoint is up")
ginkgo.By("Adding 2 dummy users")
err = t.addUser("Alice")
@ -165,7 +164,7 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
go wait.Until(func() {
writeAttempts++
if err := t.addUser(fmt.Sprintf("user-%d", writeAttempts)); err != nil {
e2elog.Logf("Unable to add user: %v", err)
framework.Logf("Unable to add user: %v", err)
mu.Lock()
errors[err.Error()]++
mu.Unlock()
@ -177,7 +176,7 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
wait.Until(func() {
users, err := t.listUsers()
if err != nil {
e2elog.Logf("Could not retrieve users: %v", err)
framework.Logf("Could not retrieve users: %v", err)
failures++
mu.Lock()
errors[err.Error()]++
@ -187,14 +186,14 @@ func (t *EtcdUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, upg
success++
lastUserCount = len(users)
}, 10*time.Millisecond, done)
e2elog.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
framework.Logf("got %d users; want >=%d", lastUserCount, t.successfulWrites)
gomega.Expect(lastUserCount >= t.successfulWrites).To(gomega.BeTrue())
ratio := float64(success) / float64(success+failures)
e2elog.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
framework.Logf("Successful gets %d/%d=%v", success, success+failures, ratio)
ratio = float64(t.successfulWrites) / float64(writeAttempts)
e2elog.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
e2elog.Logf("Errors: %v", errors)
framework.Logf("Successful writes %d/%d=%v", t.successfulWrites, writeAttempts, ratio)
framework.Logf("Errors: %v", errors)
// TODO(maisem): tweak this value once we have a few test runs.
gomega.Expect(ratio > 0.75).To(gomega.BeTrue())
}

View File

@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo"
)
@ -110,12 +109,12 @@ func (t *KubeProxyDowngradeTest) Teardown(f *framework.Framework) {
}
func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
e2elog.Logf("Waiting up to %v for kube-proxy static pods running", defaultTestTimeout)
framework.Logf("Waiting up to %v for kube-proxy static pods running", defaultTestTimeout)
condition := func() (bool, error) {
pods, err := getKubeProxyStaticPods(c)
if err != nil {
e2elog.Logf("Failed to get kube-proxy static pods: %v", err)
framework.Logf("Failed to get kube-proxy static pods: %v", err)
return false, nil
}
@ -127,7 +126,7 @@ func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
}
}
if numberkubeProxyPods != numberSchedulableNodes {
e2elog.Logf("Expect %v kube-proxy static pods running, got %v running, %v in total", numberSchedulableNodes, numberkubeProxyPods, len(pods.Items))
framework.Logf("Expect %v kube-proxy static pods running, got %v running, %v in total", numberSchedulableNodes, numberkubeProxyPods, len(pods.Items))
return false, nil
}
return true, nil
@ -140,17 +139,17 @@ func waitForKubeProxyStaticPodsRunning(c clientset.Interface) error {
}
func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error {
e2elog.Logf("Waiting up to %v for kube-proxy static pods disappear", defaultTestTimeout)
framework.Logf("Waiting up to %v for kube-proxy static pods disappear", defaultTestTimeout)
condition := func() (bool, error) {
pods, err := getKubeProxyStaticPods(c)
if err != nil {
e2elog.Logf("Failed to get kube-proxy static pods: %v", err)
framework.Logf("Failed to get kube-proxy static pods: %v", err)
return false, nil
}
if len(pods.Items) != 0 {
e2elog.Logf("Expect kube-proxy static pods to disappear, got %v pods", len(pods.Items))
framework.Logf("Expect kube-proxy static pods to disappear, got %v pods", len(pods.Items))
return false, nil
}
return true, nil
@ -163,24 +162,24 @@ func waitForKubeProxyStaticPodsDisappear(c clientset.Interface) error {
}
func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
e2elog.Logf("Waiting up to %v for kube-proxy DaemonSet running", defaultTestTimeout)
framework.Logf("Waiting up to %v for kube-proxy DaemonSet running", defaultTestTimeout)
condition := func() (bool, error) {
daemonSets, err := getKubeProxyDaemonSet(c)
if err != nil {
e2elog.Logf("Failed to get kube-proxy DaemonSet: %v", err)
framework.Logf("Failed to get kube-proxy DaemonSet: %v", err)
return false, nil
}
if len(daemonSets.Items) != 1 {
e2elog.Logf("Expect only one kube-proxy DaemonSet, got %v", len(daemonSets.Items))
framework.Logf("Expect only one kube-proxy DaemonSet, got %v", len(daemonSets.Items))
return false, nil
}
numberSchedulableNodes := len(framework.GetReadySchedulableNodesOrDie(c).Items)
numberkubeProxyPods := int(daemonSets.Items[0].Status.NumberAvailable)
if numberkubeProxyPods != numberSchedulableNodes {
e2elog.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods)
framework.Logf("Expect %v kube-proxy DaemonSet pods running, got %v", numberSchedulableNodes, numberkubeProxyPods)
return false, nil
}
return true, nil
@ -193,17 +192,17 @@ func waitForKubeProxyDaemonSetRunning(c clientset.Interface) error {
}
func waitForKubeProxyDaemonSetDisappear(c clientset.Interface) error {
e2elog.Logf("Waiting up to %v for kube-proxy DaemonSet disappear", defaultTestTimeout)
framework.Logf("Waiting up to %v for kube-proxy DaemonSet disappear", defaultTestTimeout)
condition := func() (bool, error) {
daemonSets, err := getKubeProxyDaemonSet(c)
if err != nil {
e2elog.Logf("Failed to get kube-proxy DaemonSet: %v", err)
framework.Logf("Failed to get kube-proxy DaemonSet: %v", err)
return false, nil
}
if len(daemonSets.Items) != 0 {
e2elog.Logf("Expect kube-proxy DaemonSet to disappear, got %v DaemonSet", len(daemonSets.Items))
framework.Logf("Expect kube-proxy DaemonSet to disappear, got %v DaemonSet", len(daemonSets.Items))
return false, nil
}
return true, nil

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/util/version"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
"k8s.io/kubernetes/test/e2e/framework/testfiles"
)
@ -100,13 +99,13 @@ func (t *MySQLUpgradeTest) Setup(f *framework.Framework) {
return false, nil
}
if _, err := t.countNames(); err != nil {
e2elog.Logf("Service endpoint is up but isn't responding")
framework.Logf("Service endpoint is up but isn't responding")
return false, nil
}
return true, nil
})
framework.ExpectNoError(err)
e2elog.Logf("Service endpoint is up")
framework.Logf("Service endpoint is up")
ginkgo.By("Adding 2 names to the database")
err = t.addName(strconv.Itoa(t.nextWrite))
@ -128,7 +127,7 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
go wait.Until(func() {
_, err := t.countNames()
if err != nil {
e2elog.Logf("Error while trying to read data: %v", err)
framework.Logf("Error while trying to read data: %v", err)
readFailure++
} else {
readSuccess++
@ -138,7 +137,7 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
wait.Until(func() {
err := t.addName(strconv.Itoa(t.nextWrite))
if err != nil {
e2elog.Logf("Error while trying to write data: %v", err)
framework.Logf("Error while trying to write data: %v", err)
writeFailure++
} else {
writeSuccess++
@ -146,10 +145,10 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
}, framework.Poll, done)
t.successfulWrites = writeSuccess
e2elog.Logf("Successful reads: %d", readSuccess)
e2elog.Logf("Successful writes: %d", writeSuccess)
e2elog.Logf("Failed reads: %d", readFailure)
e2elog.Logf("Failed writes: %d", writeFailure)
framework.Logf("Successful reads: %d", readSuccess)
framework.Logf("Successful writes: %d", writeSuccess)
framework.Logf("Failed reads: %d", readFailure)
framework.Logf("Failed writes: %d", writeFailure)
// TODO: Not sure what the ratio defining a successful test run should be. At time of writing the
// test, failures only seem to happen when a race condition occurs (read/write starts, doesn't
@ -158,10 +157,10 @@ func (t *MySQLUpgradeTest) Test(f *framework.Framework, done <-chan struct{}, up
readRatio := float64(readSuccess) / float64(readSuccess+readFailure)
writeRatio := float64(writeSuccess) / float64(writeSuccess+writeFailure)
if readRatio < 0.75 {
e2elog.Failf("Too many failures reading data. Success ratio: %f", readRatio)
framework.Failf("Too many failures reading data. Success ratio: %f", readRatio)
}
if writeRatio < 0.75 {
e2elog.Failf("Too many failures writing data. Success ratio: %f", writeRatio)
framework.Failf("Too many failures writing data. Success ratio: %f", writeRatio)
}
}

View File

@ -23,7 +23,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -57,7 +56,7 @@ func (t *SecretUpgradeTest) Setup(f *framework.Framework) {
ginkgo.By("Creating a secret")
var err error
if t.secret, err = f.ClientSet.CoreV1().Secrets(ns.Name).Create(t.secret); err != nil {
e2elog.Failf("unable to create test secret %s: %v", t.secret.Name, err)
framework.Failf("unable to create test secret %s: %v", t.secret.Name, err)
}
ginkgo.By("Making sure the secret is consumable")

View File

@ -18,7 +18,6 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/version:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library",
"//test/e2e/storage/utils:go_default_library",

View File

@ -20,7 +20,6 @@ import (
"k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
@ -75,7 +74,7 @@ func (t *PersistentVolumeUpgradeTest) Test(f *framework.Framework, done <-chan s
func (t *PersistentVolumeUpgradeTest) Teardown(f *framework.Framework) {
errs := e2epv.PVPVCCleanup(f.ClientSet, f.Namespace.Name, nil, t.pvc)
if len(errs) > 0 {
e2elog.Failf("Failed to delete 1 or more PVs/PVCs. Errors: %v", utilerrors.NewAggregate(errs))
framework.Failf("Failed to delete 1 or more PVs/PVCs. Errors: %v", utilerrors.NewAggregate(errs))
}
}