Use log functions of core framework on test/e2e/apps

This commit is contained in:
toyoda 2019-08-26 17:38:11 +09:00
parent 8a2def5b51
commit c3fdb5e7d9
11 changed files with 181 additions and 192 deletions

View File

@ -64,7 +64,6 @@ go_library(
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/deployment:go_default_library", "//test/e2e/framework/deployment:go_default_library",
"//test/e2e/framework/job:go_default_library", "//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/replicaset:go_default_library", "//test/e2e/framework/replicaset:go_default_library",

View File

@ -35,7 +35,6 @@ import (
"k8s.io/kubernetes/pkg/controller/job" "k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
jobutil "k8s.io/kubernetes/test/e2e/framework/job" jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -451,7 +450,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
if len(aliveJobs) > 1 { if len(aliveJobs) > 1 {
return false, fmt.Errorf("More than one job is running %+v", jobs.Items) return false, fmt.Errorf("More than one job is running %+v", jobs.Items)
} else if len(aliveJobs) == 0 { } else if len(aliveJobs) == 0 {
e2elog.Logf("Warning: Found 0 jobs in namespace %v", ns) framework.Logf("Warning: Found 0 jobs in namespace %v", ns)
return false, nil return false, nil
} }
return aliveJobs[0].Name != previousJobName, nil return aliveJobs[0].Name != previousJobName, nil

View File

@ -33,7 +33,6 @@ import (
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -74,7 +73,7 @@ type RestartDaemonConfig struct {
// NewRestartConfig creates a RestartDaemonConfig for the given node and daemon. // NewRestartConfig creates a RestartDaemonConfig for the given node and daemon.
func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *RestartDaemonConfig { func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *RestartDaemonConfig {
if !framework.ProviderIs("gce") { if !framework.ProviderIs("gce") {
e2elog.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider) framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider)
} }
return &RestartDaemonConfig{ return &RestartDaemonConfig{
nodeName: nodeName, nodeName: nodeName,
@ -91,7 +90,7 @@ func (r *RestartDaemonConfig) String() string {
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout // waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
func (r *RestartDaemonConfig) waitUp() { func (r *RestartDaemonConfig) waitUp() {
e2elog.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r) framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
healthzCheck := fmt.Sprintf( healthzCheck := fmt.Sprintf(
"curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort) "curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort)
@ -101,12 +100,12 @@ func (r *RestartDaemonConfig) waitUp() {
if result.Code == 0 { if result.Code == 0 {
httpCode, err := strconv.Atoi(result.Stdout) httpCode, err := strconv.Atoi(result.Stdout)
if err != nil { if err != nil {
e2elog.Logf("Unable to parse healthz http return code: %v", err) framework.Logf("Unable to parse healthz http return code: %v", err)
} else if httpCode == 200 { } else if httpCode == 200 {
return true, nil return true, nil
} }
} }
e2elog.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v", framework.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v",
r.nodeName, healthzCheck, result.Code, result.Stdout, result.Stderr) r.nodeName, healthzCheck, result.Code, result.Stdout, result.Stderr)
return false, nil return false, nil
}) })
@ -115,7 +114,7 @@ func (r *RestartDaemonConfig) waitUp() {
// kill sends a SIGTERM to the daemon // kill sends a SIGTERM to the daemon
func (r *RestartDaemonConfig) kill() { func (r *RestartDaemonConfig) kill() {
e2elog.Logf("Killing %v", r) framework.Logf("Killing %v", r)
_, err := e2essh.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName), framework.TestContext.Provider) _, err := e2essh.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName), framework.TestContext.Provider)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -275,7 +274,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
} }
if len(newKeys.List()) != len(existingKeys.List()) || if len(newKeys.List()) != len(existingKeys.List()) ||
!newKeys.IsSuperset(existingKeys) { !newKeys.IsSuperset(existingKeys) {
e2elog.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker) framework.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker)
} }
}) })
@ -300,13 +299,13 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
ginkgo.It("Kubelet should not restart containers across restart", func() { ginkgo.It("Kubelet should not restart containers across restart", func() {
nodeIPs, err := e2enode.GetPublicIps(f.ClientSet) nodeIPs, err := e2enode.GetPublicIps(f.ClientSet)
if err != nil { if err != nil {
e2elog.Logf("Unexpected error occurred: %v", err) framework.Logf("Unexpected error occurred: %v", err)
} }
// TODO: write a wrapper for ExpectNoErrorWithOffset() // TODO: write a wrapper for ExpectNoErrorWithOffset()
framework.ExpectNoErrorWithOffset(0, err) framework.ExpectNoErrorWithOffset(0, err)
preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
if preRestarts != 0 { if preRestarts != 0 {
e2elog.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes) framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
} }
for _, ip := range nodeIPs { for _, ip := range nodeIPs {
restarter := NewRestartConfig( restarter := NewRestartConfig(
@ -315,8 +314,8 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
} }
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector) postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
if postRestarts != preRestarts { if postRestarts != preRestarts {
framework.DumpNodeDebugInfo(f.ClientSet, badNodes, e2elog.Logf) framework.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf)
e2elog.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker) framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
} }
}) })
}) })

View File

@ -37,7 +37,6 @@ import (
"k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/controller/daemon"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
"github.com/onsi/gomega" "github.com/onsi/gomega"
@ -79,14 +78,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
} }
} }
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
e2elog.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets)) framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets))
} else { } else {
e2elog.Logf("unable to dump daemonsets: %v", err) framework.Logf("unable to dump daemonsets: %v", err)
} }
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil { if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
e2elog.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods)) framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods))
} else { } else {
e2elog.Logf("unable to dump pods: %v", err) framework.Logf("unable to dump pods: %v", err)
} }
err = clearDaemonSetNodeLabels(f.ClientSet) err = clearDaemonSetNodeLabels(f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -149,7 +148,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ConformanceIt("should run and stop complex daemon", func() { framework.ConformanceIt("should run and stop complex daemon", func() {
complexLabel := map[string]string{daemonsetNameLabel: dsName} complexLabel := map[string]string{daemonsetNameLabel: dsName}
nodeSelector := map[string]string{daemonsetColorLabel: "blue"} nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
e2elog.Logf("Creating daemon %q with a node selector", dsName) framework.Logf("Creating daemon %q with a node selector", dsName)
ds := newDaemonSet(dsName, image, complexLabel) ds := newDaemonSet(dsName, image, complexLabel)
ds.Spec.Template.Spec.NodeSelector = nodeSelector ds.Spec.Template.Spec.NodeSelector = nodeSelector
ds, err := c.AppsV1().DaemonSets(ns).Create(ds) ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
@ -196,7 +195,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.It("should run and stop complex daemon with node affinity", func() { ginkgo.It("should run and stop complex daemon with node affinity", func() {
complexLabel := map[string]string{daemonsetNameLabel: dsName} complexLabel := map[string]string{daemonsetNameLabel: dsName}
nodeSelector := map[string]string{daemonsetColorLabel: "blue"} nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
e2elog.Logf("Creating daemon %q with a node affinity", dsName) framework.Logf("Creating daemon %q with a node affinity", dsName)
ds := newDaemonSet(dsName, image, complexLabel) ds := newDaemonSet(dsName, image, complexLabel)
ds.Spec.Template.Spec.Affinity = &v1.Affinity{ ds.Spec.Template.Spec.Affinity = &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{ NodeAffinity: &v1.NodeAffinity{
@ -278,7 +277,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
ginkgo.It("should not update pod when spec was updated and update strategy is OnDelete", func() { ginkgo.It("should not update pod when spec was updated and update strategy is OnDelete", func() {
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
e2elog.Logf("Creating simple daemon set %s", dsName) framework.Logf("Creating simple daemon set %s", dsName)
ds := newDaemonSet(dsName, image, label) ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.OnDeleteDaemonSetStrategyType} ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.OnDeleteDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds) ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
@ -327,7 +326,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() { framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() {
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
e2elog.Logf("Creating simple daemon set %s", dsName) framework.Logf("Creating simple daemon set %s", dsName)
ds := newDaemonSet(dsName, image, label) ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType} ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds) ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
@ -384,18 +383,18 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
framework.ConformanceIt("should rollback without unnecessary restarts", func() { framework.ConformanceIt("should rollback without unnecessary restarts", func() {
schedulableNodes := framework.GetReadySchedulableNodesOrDie(c) schedulableNodes := framework.GetReadySchedulableNodesOrDie(c)
gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.") gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.")
e2elog.Logf("Create a RollingUpdate DaemonSet") framework.Logf("Create a RollingUpdate DaemonSet")
label := map[string]string{daemonsetNameLabel: dsName} label := map[string]string{daemonsetNameLabel: dsName}
ds := newDaemonSet(dsName, image, label) ds := newDaemonSet(dsName, image, label)
ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType} ds.Spec.UpdateStrategy = appsv1.DaemonSetUpdateStrategy{Type: appsv1.RollingUpdateDaemonSetStrategyType}
ds, err := c.AppsV1().DaemonSets(ns).Create(ds) ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Check that daemon pods launch on every node of the cluster") framework.Logf("Check that daemon pods launch on every node of the cluster")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
framework.ExpectNoError(err, "error waiting for daemon pod to start") framework.ExpectNoError(err, "error waiting for daemon pod to start")
e2elog.Logf("Update the DaemonSet to trigger a rollout") framework.Logf("Update the DaemonSet to trigger a rollout")
// We use a nonexistent image here, so that we make sure it won't finish // We use a nonexistent image here, so that we make sure it won't finish
newImage := "foo:non-existent" newImage := "foo:non-existent"
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) { newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {
@ -418,7 +417,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
case newDS.Spec.Template.Spec.Containers[0].Image: case newDS.Spec.Template.Spec.Containers[0].Image:
newPods = append(newPods, &pod) newPods = append(newPods, &pod)
default: default:
e2elog.Failf("unexpected pod found, image = %s", image) framework.Failf("unexpected pod found, image = %s", image)
} }
} }
schedulableNodes = framework.GetReadySchedulableNodesOrDie(c) schedulableNodes = framework.GetReadySchedulableNodesOrDie(c)
@ -429,13 +428,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
} }
framework.ExpectNotEqual(len(newPods), 0) framework.ExpectNotEqual(len(newPods), 0)
e2elog.Logf("Roll back the DaemonSet before rollout is complete") framework.Logf("Roll back the DaemonSet before rollout is complete")
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) { rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *appsv1.DaemonSet) {
update.Spec.Template.Spec.Containers[0].Image = image update.Spec.Template.Spec.Containers[0].Image = image
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Make sure DaemonSet rollback is complete") framework.Logf("Make sure DaemonSet rollback is complete")
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1)) err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -562,7 +561,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
return true, err return true, err
} }
if se, ok := err.(*apierrors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict { if se, ok := err.(*apierrors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
e2elog.Logf("failed to update node due to resource version conflict") framework.Logf("failed to update node due to resource version conflict")
return false, nil return false, nil
} }
return false, err return false, err
@ -580,7 +579,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *appsv1.DaemonSet, nodeNam
return func() (bool, error) { return func() (bool, error) {
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}) podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil { if err != nil {
e2elog.Logf("could not get the pod list: %v", err) framework.Logf("could not get the pod list: %v", err)
return false, nil return false, nil
} }
pods := podList.Items pods := podList.Items
@ -597,17 +596,17 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *appsv1.DaemonSet, nodeNam
nodesToPodCount[pod.Spec.NodeName]++ nodesToPodCount[pod.Spec.NodeName]++
} }
} }
e2elog.Logf("Number of nodes with available pods: %d", len(nodesToPodCount)) framework.Logf("Number of nodes with available pods: %d", len(nodesToPodCount))
// Ensure that exactly 1 pod is running on all nodes in nodeNames. // Ensure that exactly 1 pod is running on all nodes in nodeNames.
for _, nodeName := range nodeNames { for _, nodeName := range nodeNames {
if nodesToPodCount[nodeName] != 1 { if nodesToPodCount[nodeName] != 1 {
e2elog.Logf("Node %s is running more than one daemon pod", nodeName) framework.Logf("Node %s is running more than one daemon pod", nodeName)
return false, nil return false, nil
} }
} }
e2elog.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount)) framework.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount))
// Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in // Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
// nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any // nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
// other nodes. // other nodes.
@ -628,7 +627,7 @@ func schedulableNodes(c clientset.Interface, ds *appsv1.DaemonSet) []string {
nodeNames := make([]string, 0) nodeNames := make([]string, 0)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
if !canScheduleOnNode(node, ds) { if !canScheduleOnNode(node, ds) {
e2elog.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints) framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
continue continue
} }
nodeNames = append(nodeNames, node.Name) nodeNames = append(nodeNames, node.Name)
@ -655,7 +654,7 @@ func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool {
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
fit, _, err := daemon.Predicates(newPod, nodeInfo) fit, _, err := daemon.Predicates(newPod, nodeInfo)
if err != nil { if err != nil {
e2elog.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err) framework.Failf("Can't test DaemonSet predicates for node %s: %v", node.Name, err)
return false return false
} }
return fit return fit
@ -693,12 +692,12 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *appsv1.Daemo
} }
podImage := pod.Spec.Containers[0].Image podImage := pod.Spec.Containers[0].Image
if podImage != image { if podImage != image {
e2elog.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage) framework.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage)
} else { } else {
nodesToUpdatedPodCount[pod.Spec.NodeName]++ nodesToUpdatedPodCount[pod.Spec.NodeName]++
} }
if !podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) { if !podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
e2elog.Logf("Pod %s is not available", pod.Name) framework.Logf("Pod %s is not available", pod.Name)
unavailablePods++ unavailablePods++
} }
} }
@ -737,7 +736,7 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
if len(historyList.Items) == numHistory { if len(historyList.Items) == numHistory {
return true, nil return true, nil
} }
e2elog.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory) framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory)
return false, nil return false, nil
} }
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn) err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn)

View File

@ -40,7 +40,6 @@ import (
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/replicaset" "k8s.io/kubernetes/test/e2e/framework/replicaset"
testutil "k8s.io/kubernetes/test/utils" testutil "k8s.io/kubernetes/test/utils"
@ -127,49 +126,49 @@ var _ = SIGDescribe("Deployment", func() {
func failureTrap(c clientset.Interface, ns string) { func failureTrap(c clientset.Interface, ns string) {
deployments, err := c.AppsV1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) deployments, err := c.AppsV1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil { if err != nil {
e2elog.Logf("Could not list Deployments in namespace %q: %v", ns, err) framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
return return
} }
for i := range deployments.Items { for i := range deployments.Items {
d := deployments.Items[i] d := deployments.Items[i]
e2elog.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d)) framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.AppsV1()) _, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.AppsV1())
if err != nil { if err != nil {
e2elog.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err) framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
return return
} }
testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, e2elog.Logf) testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, framework.Logf)
rsList := allOldRSs rsList := allOldRSs
if newRS != nil { if newRS != nil {
rsList = append(rsList, newRS) rsList = append(rsList, newRS)
} }
testutil.LogPodsOfDeployment(c, &d, rsList, e2elog.Logf) testutil.LogPodsOfDeployment(c, &d, rsList, framework.Logf)
} }
// We need print all the ReplicaSets if there are no Deployment object created // We need print all the ReplicaSets if there are no Deployment object created
if len(deployments.Items) != 0 { if len(deployments.Items) != 0 {
return return
} }
e2elog.Logf("Log out all the ReplicaSets if there is no deployment created") framework.Logf("Log out all the ReplicaSets if there is no deployment created")
rss, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) rss, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil { if err != nil {
e2elog.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err) framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
return return
} }
for _, rs := range rss.Items { for _, rs := range rss.Items {
e2elog.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs)) framework.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs))
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
if err != nil { if err != nil {
e2elog.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err) framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
} }
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(rs.Namespace).List(options) podList, err := c.CoreV1().Pods(rs.Namespace).List(options)
if err != nil { if err != nil {
e2elog.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err) framework.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err)
continue continue
} }
for _, pod := range podList.Items { for _, pod := range podList.Items {
e2elog.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod)) framework.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod))
} }
} }
} }
@ -191,22 +190,22 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) {
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Deleting deployment %s", deploymentName) framework.Logf("Deleting deployment %s", deploymentName)
err = framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name) err = framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Ensuring deployment %s was deleted", deploymentName) framework.Logf("Ensuring deployment %s was deleted", deploymentName)
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{}) _, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
framework.ExpectError(err) framework.ExpectError(err)
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue()) gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
e2elog.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
framework.ExpectNoError(err) framework.ExpectNoError(err)
options := metav1.ListOptions{LabelSelector: selector.String()} options := metav1.ListOptions{LabelSelector: selector.String()}
rss, err := c.AppsV1().ReplicaSets(ns).List(options) rss, err := c.AppsV1().ReplicaSets(ns).List(options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
gomega.Expect(rss.Items).Should(gomega.HaveLen(0)) gomega.Expect(rss.Items).Should(gomega.HaveLen(0))
e2elog.Logf("Ensuring deployment %s's Pods were deleted", deploymentName) framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
var pods *v1.PodList var pods *v1.PodList
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
pods, err = c.CoreV1().Pods(ns).List(options) pods, err = c.CoreV1().Pods(ns).List(options)
@ -219,7 +218,7 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) {
} }
return false, nil return false, nil
}); err != nil { }); err != nil {
e2elog.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods) framework.Failf("Err : %s\n. Failed to remove deployment %s pods : %+v", err, deploymentName, pods)
} }
} }
@ -230,7 +229,7 @@ func testDeleteDeployment(f *framework.Framework) {
deploymentName := "test-new-deployment" deploymentName := "test-new-deployment"
podLabels := map[string]string{"name": WebserverImageName} podLabels := map[string]string{"name": WebserverImageName}
replicas := int32(1) replicas := int32(1)
e2elog.Logf("Creating simple deployment %s", deploymentName) framework.Logf("Creating simple deployment %s", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"} d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
deploy, err := c.AppsV1().Deployments(ns).Create(d) deploy, err := c.AppsV1().Deployments(ns).Create(d)
@ -268,7 +267,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
annotations[deploymentutil.RevisionAnnotation] = rsRevision annotations[deploymentutil.RevisionAnnotation] = rsRevision
rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil) rs := newRS(rsName, replicas, rsPodLabels, WebserverImageName, WebserverImage, nil)
rs.Annotations = annotations rs.Annotations = annotations
e2elog.Logf("Creating replica set %q (going to be adopted)", rs.Name) framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
_, err := c.AppsV1().ReplicaSets(ns).Create(rs) _, err := c.AppsV1().ReplicaSets(ns).Create(rs)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Verify that the required pods have come up. // Verify that the required pods have come up.
@ -277,22 +276,22 @@ func testRollingUpdateDeployment(f *framework.Framework) {
// Create a deployment to delete webserver pods and instead bring up redis pods. // Create a deployment to delete webserver pods and instead bring up redis pods.
deploymentName := "test-rolling-update-deployment" deploymentName := "test-rolling-update-deployment"
e2elog.Logf("Creating deployment %q", deploymentName) framework.Logf("Creating deployment %q", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err := c.AppsV1().Deployments(ns).Create(d) deploy, err := c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for it to be updated to revision 3546343826724305833. // Wait for it to be updated to revision 3546343826724305833.
e2elog.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name) framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage) err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Ensuring status for deployment %q is the expected", deploy.Name) framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
err = e2edeploy.WaitForDeploymentComplete(c, deploy) err = e2edeploy.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// There should be 1 old RS (webserver-controller, which is adopted) // There should be 1 old RS (webserver-controller, which is adopted)
e2elog.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name) framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1()) _, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
@ -306,29 +305,29 @@ func testRecreateDeployment(f *framework.Framework) {
// Create a deployment that brings up redis pods. // Create a deployment that brings up redis pods.
deploymentName := "test-recreate-deployment" deploymentName := "test-recreate-deployment"
e2elog.Logf("Creating deployment %q", deploymentName) framework.Logf("Creating deployment %q", deploymentName)
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, appsv1.RecreateDeploymentStrategyType) d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, appsv1.RecreateDeploymentStrategyType)
deployment, err := c.AppsV1().Deployments(ns).Create(d) deployment, err := c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for it to be updated to revision 1 // Wait for it to be updated to revision 1
e2elog.Logf("Waiting deployment %q to be updated to revision 1", deploymentName) framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage) err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Waiting deployment %q to complete", deploymentName) framework.Logf("Waiting deployment %q to complete", deploymentName)
err = e2edeploy.WaitForDeploymentComplete(c, deployment) err = e2edeploy.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Update deployment to delete redis pods and bring up webserver pods. // Update deployment to delete redis pods and bring up webserver pods.
e2elog.Logf("Triggering a new rollout for deployment %q", deploymentName) framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = WebserverImageName update.Spec.Template.Spec.Containers[0].Name = WebserverImageName
update.Spec.Template.Spec.Containers[0].Image = WebserverImage update.Spec.Template.Spec.Containers[0].Image = WebserverImage
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName) framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
err = e2edeploy.WatchRecreateDeployment(c, deployment) err = e2edeploy.WatchRecreateDeployment(c, deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -355,7 +354,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
// Create a deployment to delete webserver pods and instead bring up redis pods. // Create a deployment to delete webserver pods and instead bring up redis pods.
deploymentName := "test-cleanup-deployment" deploymentName := "test-cleanup-deployment"
e2elog.Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
framework.ExpectNoError(err, "Failed to query for pods: %v", err) framework.ExpectNoError(err, "Failed to query for pods: %v", err)
@ -379,14 +378,14 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
} }
numPodCreation-- numPodCreation--
if numPodCreation < 0 { if numPodCreation < 0 {
e2elog.Failf("Expect only one pod creation, the second creation event: %#v\n", event) framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event)
} }
pod, ok := event.Object.(*v1.Pod) pod, ok := event.Object.(*v1.Pod)
if !ok { if !ok {
e2elog.Failf("Expect event Object to be a pod") framework.Failf("Expect event Object to be a pod")
} }
if pod.Spec.Containers[0].Name != RedisImageName { if pod.Spec.Containers[0].Name != RedisImageName {
e2elog.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod) framework.Failf("Expect the created pod to have container name %s, got pod %#v\n", RedisImageName, pod)
} }
case <-stopCh: case <-stopCh:
return return
@ -424,7 +423,7 @@ func testRolloverDeployment(f *framework.Framework) {
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
// Wait for replica set to become ready before adopting it. // Wait for replica set to become ready before adopting it.
e2elog.Logf("Waiting for pods owned by replica set %q to become ready", rsName) framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
err = replicaset.WaitForReadyReplicaSet(c, ns, rsName) err = replicaset.WaitForReadyReplicaSet(c, ns, rsName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -434,7 +433,7 @@ func testRolloverDeployment(f *framework.Framework) {
deploymentReplicas := int32(1) deploymentReplicas := int32(1)
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent" deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType deploymentStrategyType := appsv1.RollingUpdateDeploymentStrategyType
e2elog.Logf("Creating deployment %q", deploymentName) framework.Logf("Creating deployment %q", deploymentName)
newDeployment := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType) newDeployment := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
newDeployment.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{ newDeployment.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{
MaxUnavailable: intOrStrP(0), MaxUnavailable: intOrStrP(0),
@ -447,15 +446,15 @@ func testRolloverDeployment(f *framework.Framework) {
// Verify that the pods were scaled up and down as expected. // Verify that the pods were scaled up and down as expected.
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{}) deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Make sure deployment %q performs scaling operations", deploymentName) framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1 // Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
err = e2edeploy.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation) err = e2edeploy.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
// Check if it's updated to revision 1 correctly // Check if it's updated to revision 1 correctly
e2elog.Logf("Check revision of new replica set for deployment %q", deploymentName) framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage) err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Ensure that both replica sets have 1 created replica") framework.Logf("Ensure that both replica sets have 1 created replica")
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ensureReplicas(oldRS, int32(1)) ensureReplicas(oldRS, int32(1))
@ -464,7 +463,7 @@ func testRolloverDeployment(f *framework.Framework) {
ensureReplicas(newRS, int32(1)) ensureReplicas(newRS, int32(1))
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods. // The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
e2elog.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName) framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
@ -473,20 +472,20 @@ func testRolloverDeployment(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Use observedGeneration to determine if the controller noticed the pod template update. // Use observedGeneration to determine if the controller noticed the pod template update.
e2elog.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName) framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Wait for it to be updated to revision 2 // Wait for it to be updated to revision 2
e2elog.Logf("Wait for revision update of deployment %q to 2", deploymentName) framework.Logf("Wait for revision update of deployment %q to 2", deploymentName)
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage) err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Make sure deployment %q is complete", deploymentName) framework.Logf("Make sure deployment %q is complete", deploymentName)
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment) err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Ensure that both old replica sets have no replicas") framework.Logf("Ensure that both old replica sets have no replicas")
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{}) oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
ensureReplicas(oldRS, int32(0)) ensureReplicas(oldRS, int32(0))
@ -504,11 +503,11 @@ func ensureReplicas(rs *appsv1.ReplicaSet, replicas int32) {
func randomScale(d *appsv1.Deployment, i int) { func randomScale(d *appsv1.Deployment, i int) {
switch r := rand.Float32(); { switch r := rand.Float32(); {
case r < 0.3: case r < 0.3:
e2elog.Logf("%02d: scaling up", i) framework.Logf("%02d: scaling up", i)
*(d.Spec.Replicas)++ *(d.Spec.Replicas)++
case r < 0.6: case r < 0.6:
if *(d.Spec.Replicas) > 1 { if *(d.Spec.Replicas) > 1 {
e2elog.Logf("%02d: scaling down", i) framework.Logf("%02d: scaling down", i)
*(d.Spec.Replicas)-- *(d.Spec.Replicas)--
} }
} }
@ -530,7 +529,7 @@ func testIterativeDeployments(f *framework.Framework) {
d.Spec.ProgressDeadlineSeconds = &thirty d.Spec.ProgressDeadlineSeconds = &thirty
d.Spec.RevisionHistoryLimit = &two d.Spec.RevisionHistoryLimit = &two
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
e2elog.Logf("Creating deployment %q", deploymentName) framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.AppsV1().Deployments(ns).Create(d) deployment, err := c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -543,7 +542,7 @@ func testIterativeDeployments(f *framework.Framework) {
switch n := rand.Float32(); { switch n := rand.Float32(); {
case n < 0.2: case n < 0.2:
// trigger a new deployment // trigger a new deployment
e2elog.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name) framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)} newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv) update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
@ -553,7 +552,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.4: case n < 0.4:
// rollback to the previous version // rollback to the previous version
e2elog.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name) framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
if update.Annotations == nil { if update.Annotations == nil {
update.Annotations = make(map[string]string) update.Annotations = make(map[string]string)
@ -564,7 +563,7 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.6: case n < 0.6:
// just scaling // just scaling
e2elog.Logf("%02d: scaling deployment %q", i, deployment.Name) framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
randomScale(update, i) randomScale(update, i)
}) })
@ -573,14 +572,14 @@ func testIterativeDeployments(f *framework.Framework) {
case n < 0.8: case n < 0.8:
// toggling the deployment // toggling the deployment
if deployment.Spec.Paused { if deployment.Spec.Paused {
e2elog.Logf("%02d: pausing deployment %q", i, deployment.Name) framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = true update.Spec.Paused = true
randomScale(update, i) randomScale(update, i)
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
} else { } else {
e2elog.Logf("%02d: resuming deployment %q", i, deployment.Name) framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Paused = false update.Spec.Paused = false
randomScale(update, i) randomScale(update, i)
@ -590,14 +589,14 @@ func testIterativeDeployments(f *framework.Framework) {
default: default:
// arbitrarily delete deployment pods // arbitrarily delete deployment pods
e2elog.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name) framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector) selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
framework.ExpectNoError(err) framework.ExpectNoError(err)
opts := metav1.ListOptions{LabelSelector: selector.String()} opts := metav1.ListOptions{LabelSelector: selector.String()}
podList, err := c.CoreV1().Pods(ns).List(opts) podList, err := c.CoreV1().Pods(ns).List(opts)
framework.ExpectNoError(err) framework.ExpectNoError(err)
if len(podList.Items) == 0 { if len(podList.Items) == 0 {
e2elog.Logf("%02d: no deployment pods to delete", i) framework.Logf("%02d: no deployment pods to delete", i)
continue continue
} }
for p := range podList.Items { for p := range podList.Items {
@ -605,7 +604,7 @@ func testIterativeDeployments(f *framework.Framework) {
continue continue
} }
name := podList.Items[p].Name name := podList.Items[p].Name
e2elog.Logf("%02d: deleting deployment pod %q", i, name) framework.Logf("%02d: deleting deployment pod %q", i, name)
err := c.CoreV1().Pods(ns).Delete(name, nil) err := c.CoreV1().Pods(ns).Delete(name, nil)
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -623,15 +622,15 @@ func testIterativeDeployments(f *framework.Framework) {
}) })
} }
e2elog.Logf("Waiting for deployment %q to be observed by the controller", deploymentName) framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Waiting for deployment %q status", deploymentName) framework.Logf("Waiting for deployment %q status", deploymentName)
err = e2edeploy.WaitForDeploymentComplete(c, deployment) err = e2edeploy.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Checking deployment %q for a complete condition", deploymentName) framework.Logf("Checking deployment %q for a complete condition", deploymentName)
err = e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, appsv1.DeploymentProgressing) err = e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, appsv1.DeploymentProgressing)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -641,7 +640,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
c := f.ClientSet c := f.ClientSet
deploymentName := "test-orphan-deployment" deploymentName := "test-orphan-deployment"
e2elog.Logf("Creating Deployment %q", deploymentName) framework.Logf("Creating Deployment %q", deploymentName)
podLabels := map[string]string{"name": WebserverImageName} podLabels := map[string]string{"name": WebserverImageName}
replicas := int32(1) replicas := int32(1)
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
@ -650,18 +649,18 @@ func testDeploymentsControllerRef(f *framework.Framework) {
err = e2edeploy.WaitForDeploymentComplete(c, deploy) err = e2edeploy.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName) framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
rsList := listDeploymentReplicaSets(c, ns, podLabels) rsList := listDeploymentReplicaSets(c, ns, podLabels)
framework.ExpectEqual(len(rsList.Items), 1) framework.ExpectEqual(len(rsList.Items), 1)
e2elog.Logf("Obtaining the ReplicaSet's UID") framework.Logf("Obtaining the ReplicaSet's UID")
orphanedRSUID := rsList.Items[0].UID orphanedRSUID := rsList.Items[0].UID
e2elog.Logf("Checking the ReplicaSet has the right controllerRef") framework.Logf("Checking the ReplicaSet has the right controllerRef")
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels) err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName) framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName)
err = orphanDeploymentReplicaSets(c, deploy) err = orphanDeploymentReplicaSets(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -670,22 +669,22 @@ func testDeploymentsControllerRef(f *framework.Framework) {
framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned") framework.ExpectNoError(err, "error waiting for Deployment ReplicaSet to be orphaned")
deploymentName = "test-adopt-deployment" deploymentName = "test-adopt-deployment"
e2elog.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName) framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType) d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, WebserverImageName, WebserverImage, appsv1.RollingUpdateDeploymentStrategyType)
deploy, err = c.AppsV1().Deployments(ns).Create(d) deploy, err = c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = e2edeploy.WaitForDeploymentComplete(c, deploy) err = e2edeploy.WaitForDeploymentComplete(c, deploy)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Waiting for the ReplicaSet to have the right controllerRef") framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels) err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName) framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
rsList = listDeploymentReplicaSets(c, ns, podLabels) rsList = listDeploymentReplicaSets(c, ns, podLabels)
framework.ExpectEqual(len(rsList.Items), 1) framework.ExpectEqual(len(rsList.Items), 1)
e2elog.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet") framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
framework.ExpectEqual(rsList.Items[0].UID, orphanedRSUID) framework.ExpectEqual(rsList.Items[0].UID, orphanedRSUID)
} }
@ -706,20 +705,20 @@ func testProportionalScalingDeployment(f *framework.Framework) {
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3) d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2) d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
e2elog.Logf("Creating deployment %q", deploymentName) framework.Logf("Creating deployment %q", deploymentName)
deployment, err := c.AppsV1().Deployments(ns).Create(d) deployment, err := c.AppsV1().Deployments(ns).Create(d)
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Waiting for observed generation %d", deployment.Generation) framework.Logf("Waiting for observed generation %d", deployment.Generation)
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Verify that the required pods have come up. // Verify that the required pods have come up.
e2elog.Logf("Waiting for all required pods to come up") framework.Logf("Waiting for all required pods to come up")
err = e2epod.VerifyPodsRunning(c, ns, WebserverImageName, false, *(deployment.Spec.Replicas)) err = e2epod.VerifyPodsRunning(c, ns, WebserverImageName, false, *(deployment.Spec.Replicas))
framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err) framework.ExpectNoError(err, "error in waiting for pods to come up: %v", err)
e2elog.Logf("Waiting for deployment %q to complete", deployment.Name) framework.Logf("Waiting for deployment %q to complete", deployment.Name)
err = e2edeploy.WaitForDeploymentComplete(c, deployment) err = e2edeploy.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -728,13 +727,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Update the deployment with a non-existent image so that the new replica set // Update the deployment with a non-existent image so that the new replica set
// will be blocked to simulate a partial rollout. // will be blocked to simulate a partial rollout.
e2elog.Logf("Updating deployment %q with a non-existent image", deploymentName) framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) {
update.Spec.Template.Spec.Containers[0].Image = "webserver:404" update.Spec.Template.Spec.Containers[0].Image = "webserver:404"
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Waiting for observed generation %d", deployment.Generation) framework.Logf("Waiting for observed generation %d", deployment.Generation)
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation) err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -744,17 +743,17 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas. // First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
minAvailableReplicas := replicas - int32(maxUnavailable) minAvailableReplicas := replicas - int32(maxUnavailable)
e2elog.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas) framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
err = replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas) err = replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// First rollout's replicaset should have .spec.replicas = 8 too. // First rollout's replicaset should have .spec.replicas = 8 too.
e2elog.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas) framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas) err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// The desired replicas wait makes sure that the RS controller has created expected number of pods. // The desired replicas wait makes sure that the RS controller has created expected number of pods.
e2elog.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS) err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
@ -768,24 +767,24 @@ func testProportionalScalingDeployment(f *framework.Framework) {
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Second rollout's replicaset should have 0 available replicas. // Second rollout's replicaset should have 0 available replicas.
e2elog.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0") framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
framework.ExpectEqual(secondRS.Status.AvailableReplicas, int32(0)) framework.ExpectEqual(secondRS.Status.AvailableReplicas, int32(0))
// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas. // Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
newReplicas := replicas + int32(maxSurge) - minAvailableReplicas newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
e2elog.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas) framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas) err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// The desired replicas wait makes sure that the RS controller has created expected number of pods. // The desired replicas wait makes sure that the RS controller has created expected number of pods.
e2elog.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName) framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS) err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Check the deployment's minimum availability. // Check the deployment's minimum availability.
e2elog.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName) framework.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName)
if deployment.Status.AvailableReplicas < minAvailableReplicas { if deployment.Status.AvailableReplicas < minAvailableReplicas {
err = fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas) err = fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -793,13 +792,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// Scale the deployment to 30 replicas. // Scale the deployment to 30 replicas.
newReplicas = int32(30) newReplicas = int32(30)
e2elog.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas) framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) { deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *appsv1.Deployment) {
update.Spec.Replicas = &newReplicas update.Spec.Replicas = &newReplicas
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName) framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{}) firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{}) secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
@ -807,13 +806,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas. // First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
// Note that 12 comes from rounding (30-10)*(8/13) to nearest integer. // Note that 12 comes from rounding (30-10)*(8/13) to nearest integer.
e2elog.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20") framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20) err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas. // Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas.
// Note that 8 comes from rounding (30-10)*(5/13) to nearest integer. // Note that 8 comes from rounding (30-10)*(5/13) to nearest integer.
e2elog.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13") framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13) err = replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }

View File

@ -32,7 +32,6 @@ import (
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/util/retry" "k8s.io/client-go/util/retry"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -311,7 +310,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
return false, fmt.Errorf("pods is nil") return false, fmt.Errorf("pods is nil")
} }
if len(pods.Items) < n { if len(pods.Items) < n {
e2elog.Logf("pods: %v < %v", len(pods.Items), n) framework.Logf("pods: %v < %v", len(pods.Items), n)
return false, nil return false, nil
} }
ready := 0 ready := 0
@ -321,7 +320,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
} }
} }
if ready < n { if ready < n {
e2elog.Logf("running pods: %v < %v", ready, n) framework.Logf("running pods: %v < %v", ready, n)
return false, nil return false, nil
} }
return true, nil return true, nil

View File

@ -26,7 +26,6 @@ import (
batchinternal "k8s.io/kubernetes/pkg/apis/batch" batchinternal "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
jobutil "k8s.io/kubernetes/test/e2e/framework/job" jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -243,7 +242,7 @@ var _ = SIGDescribe("Job", func() {
// updates we need to allow more than backoff+1 // updates we need to allow more than backoff+1
// TODO revert this back to above when https://github.com/kubernetes/kubernetes/issues/64787 gets fixed // TODO revert this back to above when https://github.com/kubernetes/kubernetes/issues/64787 gets fixed
if len(pods.Items) < backoff+1 { if len(pods.Items) < backoff+1 {
e2elog.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items) framework.Failf("Not enough pod created expected at least %d, got %#v", backoff+1, pods.Items)
} }
for _, pod := range pods.Items { for _, pod := range pods.Items {
framework.ExpectEqual(pod.Status.Phase, v1.PodFailed) framework.ExpectEqual(pod.Status.Phase, v1.PodFailed)

View File

@ -37,7 +37,6 @@ import (
"k8s.io/kubernetes/test/e2e/common" "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
jobutil "k8s.io/kubernetes/test/e2e/framework/job" jobutil "k8s.io/kubernetes/test/e2e/framework/job"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
@ -64,14 +63,14 @@ func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
if e2enode.IsConditionSetAsExpected(n, v1.NodeReady, isReady) { if e2enode.IsConditionSetAsExpected(n, v1.NodeReady, isReady) {
expected = true expected = true
} else { } else {
e2elog.Logf("Observed node ready status is NOT %v as expected", isReady) framework.Logf("Observed node ready status is NOT %v as expected", isReady)
} }
case <-timer: case <-timer:
timeout = true timeout = true
} }
} }
if !expected { if !expected {
e2elog.Failf("Failed to observe node ready status change to %v", isReady) framework.Failf("Failed to observe node ready status change to %v", isReady)
} }
} }
@ -101,9 +100,9 @@ func podOnNode(podName, nodeName string, image string) *v1.Pod {
func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error { func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error {
pod, err := c.CoreV1().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage)) pod, err := c.CoreV1().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage))
if err == nil { if err == nil {
e2elog.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName) framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
} else { } else {
e2elog.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err) framework.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
} }
return err return err
} }
@ -122,7 +121,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed. // TODO(foxish): Re-enable testing on gce after kubernetes#56787 is fixed.
framework.SkipUnlessProviderIs("gke", "aws") framework.SkipUnlessProviderIs("gke", "aws")
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 { if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
e2elog.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup) framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
} }
}) })
@ -158,12 +157,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
return true return true
}) })
if len(nodes.Items) <= 0 { if len(nodes.Items) <= 0 {
e2elog.Failf("No eligible node were found: %d", len(nodes.Items)) framework.Failf("No eligible node were found: %d", len(nodes.Items))
} }
node := nodes.Items[0] node := nodes.Items[0]
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
e2elog.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
} }
ginkgo.By("Set up watch on node status") ginkgo.By("Set up watch on node status")
@ -219,7 +218,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers") ginkgo.By("Expect to observe node and pod status change from NotReady to Ready after network connectivity recovers")
expectNodeReadiness(true, newNode) expectNodeReadiness(true, newNode)
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
e2elog.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err) framework.Failf("Pods on node %s did not become ready and running within %v: %v", node.Name, podReadyTimeout, err)
} }
}() }()
@ -230,7 +229,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition") ginkgo.By("Expect to observe node and pod status change from Ready to NotReady after network partition")
expectNodeReadiness(false, newNode) expectNodeReadiness(false, newNode)
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil { if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
e2elog.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
} }
}) })
}) })
@ -269,7 +268,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// pods on another node and that now the number of replicas is equal 'replicas'. // pods on another node and that now the number of replicas is equal 'replicas'.
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name) err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -278,9 +277,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
// sleep a bit, to allow Watch in NodeController to catch up. // sleep a bit, to allow Watch in NodeController to catch up.
@ -300,7 +299,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{}) pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
if pod.Spec.NodeName != node.Name { if pod.Spec.NodeName != node.Name {
e2elog.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name) framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
} }
} }
}) })
@ -338,7 +337,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// pods on another node and that now the number of replicas is equal 'replicas + 1'. // pods on another node and that now the number of replicas is equal 'replicas + 1'.
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name) err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pod was not deleted during network partition.") framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pod was not deleted during network partition.")
@ -347,9 +346,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
}) })
}) })
@ -376,7 +375,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
if ginkgo.CurrentGinkgoTestDescription().Failed { if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DumpDebugInfo(c, ns) framework.DumpDebugInfo(c, ns)
} }
e2elog.Logf("Deleting all stateful set in ns %v", ns) framework.Logf("Deleting all stateful set in ns %v", ns)
e2esset.DeleteAllStatefulSets(c, ns) e2esset.DeleteAllStatefulSets(c, ns)
}) })
@ -414,14 +413,14 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// that belongs to StatefulSet 'statefulSetName', **does not** disappear due to forced deletion from the apiserver. // that belongs to StatefulSet 'statefulSetName', **does not** disappear due to forced deletion from the apiserver.
// The grace period on the stateful pods is set to a value > 0. // The grace period on the stateful pods is set to a value > 0.
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
e2elog.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name) framework.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name)
err := e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute) err := e2epod.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute)
framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pod was not deleted during network partition.") framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pod was not deleted during network partition.")
}) })
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
ginkgo.By("waiting for pods to be running again") ginkgo.By("waiting for pods to be running again")
@ -460,7 +459,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
// running pods after the node-controller detects node unreachable. // running pods after the node-controller detects node unreachable.
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name)) ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() { framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name) framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
err := e2epod.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute) err := e2epod.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute)
framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pod was not deleted during network partition.") framework.ExpectEqual(err, wait.ErrWaitTimeout, "Pod was not deleted during network partition.")
@ -469,9 +468,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name) framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) { if !e2enode.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
e2elog.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
}) })
}) })
@ -506,12 +505,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
return true return true
}) })
if len(nodes.Items) <= 0 { if len(nodes.Items) <= 0 {
e2elog.Failf("No eligible node were found: %d", len(nodes.Items)) framework.Failf("No eligible node were found: %d", len(nodes.Items))
} }
node := nodes.Items[0] node := nodes.Items[0]
podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()} podOpts = metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
if err := e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil { if err := e2epod.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReadyOrSucceeded); err != nil {
e2elog.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
} }
pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts) pods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(podOpts)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -547,7 +546,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
} }
} }
} }
e2elog.Logf( framework.Logf(
"Only %v should be running after partition. Maximum TolerationSeconds among other Pods is %v", "Only %v should be running after partition. Maximum TolerationSeconds among other Pods is %v",
neverEvictedPods, neverEvictedPods,
maxTolerationTime, maxTolerationTime,
@ -617,7 +616,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
return framework.NodeHasTaint(c, node.Name, nodepkg.UnreachableTaintTemplate) return framework.NodeHasTaint(c, node.Name, nodepkg.UnreachableTaintTemplate)
})) }))
if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil { if err = e2epod.WaitForMatchPodsCondition(c, podOpts, "NotReady", podNotReadyTimeout, testutils.PodNotReady); err != nil {
e2elog.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err) framework.Failf("Pods on node %s did not become NotReady within %v: %v", node.Name, podNotReadyTimeout, err)
} }
sleepTime := maxTolerationTime + 20*time.Second sleepTime := maxTolerationTime + 20*time.Second
@ -637,7 +636,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
if pod.DeletionTimestamp == nil { if pod.DeletionTimestamp == nil {
seenRunning = append(seenRunning, namespacedName) seenRunning = append(seenRunning, namespacedName)
if shouldBeTerminating { if shouldBeTerminating {
e2elog.Failf("Pod %v should have been deleted but was seen running", namespacedName) framework.Failf("Pod %v should have been deleted but was seen running", namespacedName)
} }
} }
} }
@ -651,7 +650,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
} }
} }
if !running { if !running {
e2elog.Failf("Pod %v was evicted even though it shouldn't", neverEvictedPod) framework.Failf("Pod %v was evicted even though it shouldn't", neverEvictedPod)
} }
} }
}) })

View File

@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/controller/replication" "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -136,7 +135,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
// Wait for the pods to enter the running state. Waiting loops until the pods // Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test. // are running so non-running pods cause a timeout for this test.
e2elog.Logf("Ensuring all pods for ReplicationController %q are running", name) framework.Logf("Ensuring all pods for ReplicationController %q are running", name)
running := int32(0) running := int32(0)
for _, pod := range pods.Items { for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil { if pod.DeletionTimestamp != nil {
@ -152,7 +151,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
} }
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions) framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
running++ running++
} }
@ -162,13 +161,13 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
} }
// Verify that something is listening. // Verify that something is listening.
e2elog.Logf("Trying to dial the pod") framework.Logf("Trying to dial the pod")
retryTimeout := 2 * time.Minute retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second retryInterval := 5 * time.Second
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil { if err != nil {
e2elog.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
} }
} }
@ -181,7 +180,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
namespace := f.Namespace.Name namespace := f.Namespace.Name
name := "condition-test" name := "condition-test"
e2elog.Logf("Creating quota %q that allows only two pods to run in the current namespace", name) framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
quota := newPodQuota(name, "2") quota := newPodQuota(name, "2")
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota) _, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -30,7 +30,6 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/controller/replicaset" "k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset" replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset"
@ -125,7 +124,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
// Create a ReplicaSet for a service that serves its hostname. // Create a ReplicaSet for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is // The source for the Docker containter kubernetes/serve_hostname is
// in contrib/for-demos/serve_hostname // in contrib/for-demos/serve_hostname
e2elog.Logf("Creating ReplicaSet %s", name) framework.Logf("Creating ReplicaSet %s", name)
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"}) newRS := newRS(name, replicas, map[string]string{"name": name}, name, image, []string{"serve-hostname"})
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}} newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS) _, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS)
@ -138,7 +137,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
// Wait for the pods to enter the running state. Waiting loops until the pods // Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test. // are running so non-running pods cause a timeout for this test.
e2elog.Logf("Ensuring a pod for ReplicaSet %q is running", name) framework.Logf("Ensuring a pod for ReplicaSet %q is running", name)
running := int32(0) running := int32(0)
for _, pod := range pods.Items { for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil { if pod.DeletionTimestamp != nil {
@ -154,7 +153,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
} }
} }
framework.ExpectNoError(err) framework.ExpectNoError(err)
e2elog.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions) framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
running++ running++
} }
@ -164,13 +163,13 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
} }
// Verify that something is listening. // Verify that something is listening.
e2elog.Logf("Trying to dial the pod") framework.Logf("Trying to dial the pod")
retryTimeout := 2 * time.Minute retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second retryInterval := 5 * time.Second
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses) err = wait.Poll(retryInterval, retryTimeout, e2epod.NewProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil { if err != nil {
e2elog.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds()) framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
} }
} }

View File

@ -34,7 +34,6 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
watchtools "k8s.io/client-go/tools/watch" watchtools "k8s.io/client-go/tools/watch"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset" e2esset "k8s.io/kubernetes/test/e2e/framework/statefulset"
@ -90,7 +89,7 @@ var _ = SIGDescribe("StatefulSet", func() {
if ginkgo.CurrentGinkgoTestDescription().Failed { if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DumpDebugInfo(c, ns) framework.DumpDebugInfo(c, ns)
} }
e2elog.Logf("Deleting all statefulset in ns %v", ns) framework.Logf("Deleting all statefulset in ns %v", ns)
e2esset.DeleteAllStatefulSets(c, ns) e2esset.DeleteAllStatefulSets(c, ns)
}) })
@ -712,7 +711,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name) ginkgo.By("Waiting until pod " + podName + " will start running in namespace " + f.Namespace.Name)
if err := f.WaitForPodRunning(podName); err != nil { if err := f.WaitForPodRunning(podName); err != nil {
e2elog.Failf("Pod %v did not start running: %v", podName, err) framework.Failf("Pod %v did not start running: %v", podName, err)
} }
var initialStatefulPodUID types.UID var initialStatefulPodUID types.UID
@ -726,19 +725,19 @@ var _ = SIGDescribe("StatefulSet", func() {
pod := event.Object.(*v1.Pod) pod := event.Object.(*v1.Pod)
switch event.Type { switch event.Type {
case watch.Deleted: case watch.Deleted:
e2elog.Logf("Observed delete event for stateful pod %v in namespace %v", pod.Name, pod.Namespace) framework.Logf("Observed delete event for stateful pod %v in namespace %v", pod.Name, pod.Namespace)
if initialStatefulPodUID == "" { if initialStatefulPodUID == "" {
return false, nil return false, nil
} }
return true, nil return true, nil
} }
e2elog.Logf("Observed stateful pod in namespace: %v, name: %v, uid: %v, status phase: %v. Waiting for statefulset controller to delete.", framework.Logf("Observed stateful pod in namespace: %v, name: %v, uid: %v, status phase: %v. Waiting for statefulset controller to delete.",
pod.Namespace, pod.Name, pod.UID, pod.Status.Phase) pod.Namespace, pod.Name, pod.UID, pod.Status.Phase)
initialStatefulPodUID = pod.UID initialStatefulPodUID = pod.UID
return false, nil return false, nil
}) })
if err != nil { if err != nil {
e2elog.Failf("Pod %v expected to be re-created at least once", statefulPodName) framework.Failf("Pod %v expected to be re-created at least once", statefulPodName)
} }
ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name) ginkgo.By("Removing pod with conflicting port in namespace " + f.Namespace.Name)
@ -780,7 +779,7 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("getting scale subresource") ginkgo.By("getting scale subresource")
scale, err := c.AppsV1().StatefulSets(ns).GetScale(ssName, metav1.GetOptions{}) scale, err := c.AppsV1().StatefulSets(ns).GetScale(ssName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed to get scale subresource: %v", err) framework.Failf("Failed to get scale subresource: %v", err)
} }
framework.ExpectEqual(scale.Spec.Replicas, int32(1)) framework.ExpectEqual(scale.Spec.Replicas, int32(1))
framework.ExpectEqual(scale.Status.Replicas, int32(1)) framework.ExpectEqual(scale.Status.Replicas, int32(1))
@ -789,14 +788,14 @@ var _ = SIGDescribe("StatefulSet", func() {
scale.Spec.Replicas = 2 scale.Spec.Replicas = 2
scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(ssName, scale) scaleResult, err := c.AppsV1().StatefulSets(ns).UpdateScale(ssName, scale)
if err != nil { if err != nil {
e2elog.Failf("Failed to put scale subresource: %v", err) framework.Failf("Failed to put scale subresource: %v", err)
} }
framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2)) framework.ExpectEqual(scaleResult.Spec.Replicas, int32(2))
ginkgo.By("verifying the statefulset Spec.Replicas was modified") ginkgo.By("verifying the statefulset Spec.Replicas was modified")
ss, err = c.AppsV1().StatefulSets(ns).Get(ssName, metav1.GetOptions{}) ss, err = c.AppsV1().StatefulSets(ns).Get(ssName, metav1.GetOptions{})
if err != nil { if err != nil {
e2elog.Failf("Failed to get statefulset resource: %v", err) framework.Failf("Failed to get statefulset resource: %v", err)
} }
framework.ExpectEqual(*(ss.Spec.Replicas), int32(2)) framework.ExpectEqual(*(ss.Spec.Replicas), int32(2))
}) })
@ -813,7 +812,7 @@ var _ = SIGDescribe("StatefulSet", func() {
if ginkgo.CurrentGinkgoTestDescription().Failed { if ginkgo.CurrentGinkgoTestDescription().Failed {
framework.DumpDebugInfo(c, ns) framework.DumpDebugInfo(c, ns)
} }
e2elog.Logf("Deleting all statefulset in ns %v", ns) framework.Logf("Deleting all statefulset in ns %v", ns)
e2esset.DeleteAllStatefulSets(c, ns) e2esset.DeleteAllStatefulSets(c, ns)
}) })
@ -853,9 +852,9 @@ func kubectlExecWithRetries(args ...string) (out string) {
if out, err = framework.RunKubectl(args...); err == nil { if out, err = framework.RunKubectl(args...); err == nil {
return return
} }
e2elog.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out) framework.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out)
} }
e2elog.Failf("Failed to execute \"%v\" with retries: %v", args, err) framework.Failf("Failed to execute \"%v\" with retries: %v", args, err)
return return
} }
@ -892,7 +891,7 @@ func (c *clusterAppTester) run() {
ginkgo.By("Reading value under foo from member with index 2") ginkgo.By("Reading value under foo from member with index 2")
if err := pollReadWithTimeout(c.statefulPod, 2, "foo", "bar"); err != nil { if err := pollReadWithTimeout(c.statefulPod, 2, "foo", "bar"); err != nil {
e2elog.Failf("%v", err) framework.Failf("%v", err)
} }
} }
@ -915,7 +914,7 @@ func (z *zookeeperTester) write(statefulPodIndex int, kv map[string]string) {
ns := fmt.Sprintf("--namespace=%v", z.ss.Namespace) ns := fmt.Sprintf("--namespace=%v", z.ss.Namespace)
for k, v := range kv { for k, v := range kv {
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v) cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v)
e2elog.Logf(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd)) framework.Logf(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd))
} }
} }
@ -946,12 +945,12 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string {
func (m *mysqlGaleraTester) deploy(ns string) *appsv1.StatefulSet { func (m *mysqlGaleraTester) deploy(ns string) *appsv1.StatefulSet {
m.ss = e2esset.CreateStatefulSet(m.client, mysqlGaleraManifestPath, ns) m.ss = e2esset.CreateStatefulSet(m.client, mysqlGaleraManifestPath, ns)
e2elog.Logf("Deployed statefulset %v, initializing database", m.ss.Name) framework.Logf("Deployed statefulset %v, initializing database", m.ss.Name)
for _, cmd := range []string{ for _, cmd := range []string{
"create database statefulset;", "create database statefulset;",
"use statefulset; create table foo (k varchar(20), v varchar(20));", "use statefulset; create table foo (k varchar(20), v varchar(20));",
} { } {
e2elog.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name))) framework.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name)))
} }
return m.ss return m.ss
} }
@ -960,7 +959,7 @@ func (m *mysqlGaleraTester) write(statefulPodIndex int, kv map[string]string) {
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex) name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
for k, v := range kv { for k, v := range kv {
cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v) cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v)
e2elog.Logf(m.mysqlExec(cmd, m.ss.Namespace, name)) framework.Logf(m.mysqlExec(cmd, m.ss.Namespace, name))
} }
} }
@ -991,7 +990,7 @@ func (m *redisTester) deploy(ns string) *appsv1.StatefulSet {
func (m *redisTester) write(statefulPodIndex int, kv map[string]string) { func (m *redisTester) write(statefulPodIndex int, kv map[string]string) {
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex) name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
for k, v := range kv { for k, v := range kv {
e2elog.Logf(m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name)) framework.Logf(m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name))
} }
} }
@ -1016,12 +1015,12 @@ func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string {
func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet { func (c *cockroachDBTester) deploy(ns string) *appsv1.StatefulSet {
c.ss = e2esset.CreateStatefulSet(c.client, cockroachDBManifestPath, ns) c.ss = e2esset.CreateStatefulSet(c.client, cockroachDBManifestPath, ns)
e2elog.Logf("Deployed statefulset %v, initializing database", c.ss.Name) framework.Logf("Deployed statefulset %v, initializing database", c.ss.Name)
for _, cmd := range []string{ for _, cmd := range []string{
"CREATE DATABASE IF NOT EXISTS foo;", "CREATE DATABASE IF NOT EXISTS foo;",
"CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);", "CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);",
} { } {
e2elog.Logf(c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name))) framework.Logf(c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name)))
} }
return c.ss return c.ss
} }
@ -1030,7 +1029,7 @@ func (c *cockroachDBTester) write(statefulPodIndex int, kv map[string]string) {
name := fmt.Sprintf("%v-%d", c.ss.Name, statefulPodIndex) name := fmt.Sprintf("%v-%d", c.ss.Name, statefulPodIndex)
for k, v := range kv { for k, v := range kv {
cmd := fmt.Sprintf("UPSERT INTO foo.bar VALUES ('%v', '%v');", k, v) cmd := fmt.Sprintf("UPSERT INTO foo.bar VALUES ('%v', '%v');", k, v)
e2elog.Logf(c.cockroachDBExec(cmd, c.ss.Namespace, name)) framework.Logf(c.cockroachDBExec(cmd, c.ss.Namespace, name))
} }
} }
func (c *cockroachDBTester) read(statefulPodIndex int, key string) string { func (c *cockroachDBTester) read(statefulPodIndex int, key string) string {
@ -1175,14 +1174,14 @@ func confirmStatefulPodCount(c clientset.Interface, count int, ss *appsv1.Statef
if statefulPodCount != count { if statefulPodCount != count {
e2epod.LogPodStates(podList.Items) e2epod.LogPodStates(podList.Items)
if hard { if hard {
e2elog.Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items)) framework.Failf("StatefulSet %v scaled unexpectedly scaled to %d -> %d replicas", ss.Name, count, len(podList.Items))
} else { } else {
e2elog.Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount) framework.Logf("StatefulSet %v has not reached scale %d, at %d", ss.Name, count, statefulPodCount)
} }
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
continue continue
} }
e2elog.Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t)) framework.Logf("Verifying statefulset %v doesn't scale past %d for another %+v", ss.Name, count, deadline.Sub(t))
time.Sleep(1 * time.Second) time.Sleep(1 * time.Second)
} }
} }