mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge pull request #77425 from figo/e2e
Modify apimachinery,apps,instrumentation tests to import framework/log
This commit is contained in:
commit
ee0038adaa
@ -81,6 +81,7 @@ go_library(
|
||||
"//test/e2e/apps:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
@ -41,6 +41,7 @@ import (
|
||||
rbacv1beta1helpers "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
samplev1alpha1 "k8s.io/sample-apiserver/pkg/apis/wardle/v1alpha1"
|
||||
"k8s.io/utils/pointer"
|
||||
@ -373,16 +374,16 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
}, "Waited %s for the sample-apiserver to be ready to handle requests.")
|
||||
if err != nil {
|
||||
currentAPIServiceJSON, _ := json.Marshal(currentAPIService)
|
||||
framework.Logf("current APIService: %s", string(currentAPIServiceJSON))
|
||||
e2elog.Logf("current APIService: %s", string(currentAPIServiceJSON))
|
||||
|
||||
currentPodsJSON, _ := json.Marshal(currentPods)
|
||||
framework.Logf("current pods: %s", string(currentPodsJSON))
|
||||
e2elog.Logf("current pods: %s", string(currentPodsJSON))
|
||||
|
||||
if currentPods != nil {
|
||||
for _, pod := range currentPods.Items {
|
||||
for _, container := range pod.Spec.Containers {
|
||||
logs, err := framework.GetPodLogs(client, namespace, pod.Name, container.Name)
|
||||
framework.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
|
||||
e2elog.Logf("logs of %s/%s (error: %v): %s", pod.Name, container.Name, err, logs)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -485,12 +486,12 @@ func TestSampleAPIServer(f *framework.Framework, aggrclient *aggregatorclient.Cl
|
||||
}
|
||||
|
||||
// pollTimed will call Poll but time how long Poll actually took.
|
||||
// It will then framework.logf the msg with the duration of the Poll.
|
||||
// It will then e2elog.Logf the msg with the duration of the Poll.
|
||||
// It is assumed that msg will contain one %s for the elapsed time.
|
||||
func pollTimed(interval, timeout time.Duration, condition wait.ConditionFunc, msg string) error {
|
||||
defer func(start time.Time, msg string) {
|
||||
elapsed := time.Since(start)
|
||||
framework.Logf(msg, elapsed)
|
||||
e2elog.Logf(msg, elapsed)
|
||||
}(time.Now(), msg)
|
||||
return wait.Poll(interval, timeout, condition)
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
const numberOfTotalResources = 400
|
||||
@ -62,7 +63,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
framework.Logf("Got an error creating template %d: %v", i, err)
|
||||
e2elog.Logf("Got an error creating template %d: %v", i, err)
|
||||
}
|
||||
ginkgo.Fail("Unable to create template %d, exiting", i)
|
||||
})
|
||||
@ -81,7 +82,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
opts.Limit = int64(rand.Int31n(numberOfTotalResources/10) + 1)
|
||||
list, err := client.List(opts)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
|
||||
|
||||
if len(lastRV) == 0 {
|
||||
@ -122,7 +123,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
firstToken := list.Continue
|
||||
firstRV := list.ResourceVersion
|
||||
gomega.Expect(int(list.RemainingItemCount) + len(list.Items)).To(gomega.BeNumerically("==", numberOfTotalResources))
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
|
||||
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, firstToken)
|
||||
|
||||
ginkgo.By("retrieving the second page until the token expires")
|
||||
opts.Continue = firstToken
|
||||
@ -130,13 +131,13 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
wait.Poll(20*time.Second, 2*storagebackend.DefaultCompactInterval, func() (bool, error) {
|
||||
_, err := client.List(opts)
|
||||
if err == nil {
|
||||
framework.Logf("Token %s has not expired yet", firstToken)
|
||||
e2elog.Logf("Token %s has not expired yet", firstToken)
|
||||
return false, nil
|
||||
}
|
||||
if err != nil && !errors.IsResourceExpired(err) {
|
||||
return false, err
|
||||
}
|
||||
framework.Logf("got error %s", err)
|
||||
e2elog.Logf("got error %s", err)
|
||||
status, ok := err.(errors.APIStatus)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("expect error to implement the APIStatus interface, got %v", reflect.TypeOf(err))
|
||||
@ -145,7 +146,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
if len(inconsistentToken) == 0 {
|
||||
return false, fmt.Errorf("expect non empty continue token")
|
||||
}
|
||||
framework.Logf("Retrieved inconsistent continue %s", inconsistentToken)
|
||||
e2elog.Logf("Retrieved inconsistent continue %s", inconsistentToken)
|
||||
return true, nil
|
||||
})
|
||||
|
||||
@ -169,7 +170,7 @@ var _ = SIGDescribe("Servers with support for API chunking", func() {
|
||||
list, err := client.List(opts)
|
||||
gomega.Expect(err).ToNot(gomega.HaveOccurred(), "failed to list pod templates in namespace: %s, given limit: %d", ns, opts.Limit)
|
||||
gomega.Expect(int(list.RemainingItemCount) + len(list.Items) + found).To(gomega.BeNumerically("==", numberOfTotalResources))
|
||||
framework.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
e2elog.Logf("Retrieved %d/%d results with rv %s and continue %s", len(list.Items), opts.Limit, list.ResourceVersion, list.Continue)
|
||||
gomega.Expect(len(list.Items)).To(gomega.BeNumerically("<=", opts.Limit))
|
||||
gomega.Expect(list.ResourceVersion).To(gomega.Equal(lastRV))
|
||||
for _, item := range list.Items {
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/utils/pointer"
|
||||
@ -169,7 +170,7 @@ func createAuthReaderRoleBindingForCRDConversion(f *framework.Framework, namespa
|
||||
},
|
||||
})
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
framework.Logf("role binding %s already exists", roleBindingCRDName)
|
||||
e2elog.Logf("role binding %s already exists", roleBindingCRDName)
|
||||
} else {
|
||||
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
|
||||
}
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/test/e2e/apps"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -114,7 +115,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
||||
options := metav1.ListOptions{LabelSelector: rcSelector.String()}
|
||||
pods, err := podClient.List(options)
|
||||
if err != nil {
|
||||
framework.Logf("apiserver returned error, as expected before recovery: %v", err)
|
||||
e2elog.Logf("apiserver returned error, as expected before recovery: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if len(pods.Items) == 0 {
|
||||
@ -124,7 +125,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
|
||||
err = podClient.Delete(pod.Name, metav1.NewDeleteOptions(0))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to delete pod %s in namespace: %s", pod.Name, f.Namespace.Name)
|
||||
}
|
||||
framework.Logf("apiserver has recovered")
|
||||
e2elog.Logf("apiserver has recovered")
|
||||
return true, nil
|
||||
}))
|
||||
|
||||
|
@ -38,6 +38,7 @@ import (
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -244,14 +245,14 @@ func gatherMetrics(f *framework.Framework) {
|
||||
var summary framework.TestDataSummary
|
||||
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, f.KubemarkExternalClusterClientSet, false, false, true, false, false)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
|
||||
e2elog.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
|
||||
} else {
|
||||
received, err := grabber.Grab()
|
||||
if err != nil {
|
||||
framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
|
||||
e2elog.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
|
||||
} else {
|
||||
summary = (*framework.MetricsForE2E)(&received)
|
||||
framework.Logf(summary.PrintHumanReadable())
|
||||
e2elog.Logf(summary.PrintHumanReadable())
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -653,15 +654,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
_, err := rcClient.Get(rc.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
pods, _ := podClient.List(metav1.ListOptions{})
|
||||
framework.Logf("%d pods remaining", len(pods.Items))
|
||||
e2elog.Logf("%d pods remaining", len(pods.Items))
|
||||
count := 0
|
||||
for _, pod := range pods.Items {
|
||||
if pod.ObjectMeta.DeletionTimestamp == nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
framework.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
framework.Logf("")
|
||||
e2elog.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
e2elog.Logf("")
|
||||
return false, nil
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
@ -673,10 +674,10 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err2 != nil {
|
||||
framework.Failf("%v", err2)
|
||||
}
|
||||
framework.Logf("%d remaining pods are:", len(pods.Items))
|
||||
framework.Logf("The ObjectMeta of the remaining pods are:")
|
||||
e2elog.Logf("%d remaining pods are:", len(pods.Items))
|
||||
e2elog.Logf("The ObjectMeta of the remaining pods are:")
|
||||
for _, pod := range pods.Items {
|
||||
framework.Logf("%#v", pod.ObjectMeta)
|
||||
e2elog.Logf("%#v", pod.ObjectMeta)
|
||||
}
|
||||
framework.Failf("failed to delete the rc: %v", err)
|
||||
}
|
||||
@ -755,15 +756,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
_, err := rcClient.Get(rc1.Name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
pods, _ := podClient.List(metav1.ListOptions{})
|
||||
framework.Logf("%d pods remaining", len(pods.Items))
|
||||
e2elog.Logf("%d pods remaining", len(pods.Items))
|
||||
count := 0
|
||||
for _, pod := range pods.Items {
|
||||
if pod.ObjectMeta.DeletionTimestamp == nil {
|
||||
count++
|
||||
}
|
||||
}
|
||||
framework.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
framework.Logf("")
|
||||
e2elog.Logf("%d pods has nil DeletionTimestamp", count)
|
||||
e2elog.Logf("")
|
||||
return false, nil
|
||||
}
|
||||
if errors.IsNotFound(err) {
|
||||
@ -775,10 +776,10 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err2 != nil {
|
||||
framework.Failf("%v", err2)
|
||||
}
|
||||
framework.Logf("%d remaining pods are:", len(pods.Items))
|
||||
framework.Logf("ObjectMeta of remaining pods are:")
|
||||
e2elog.Logf("%d remaining pods are:", len(pods.Items))
|
||||
e2elog.Logf("ObjectMeta of remaining pods are:")
|
||||
for _, pod := range pods.Items {
|
||||
framework.Logf("%#v", pod.ObjectMeta)
|
||||
e2elog.Logf("%#v", pod.ObjectMeta)
|
||||
}
|
||||
framework.Failf("failed to delete rc %s, err: %v", rc1Name, err)
|
||||
}
|
||||
@ -830,15 +831,15 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
patch1 := addRefPatch(pod3.Name, pod3.UID)
|
||||
pod1, err = podClient.Patch(pod1.Name, types.StrategicMergePatchType, patch1)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod1.Name, f.Namespace.Name, patch1)
|
||||
framework.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
|
||||
e2elog.Logf("pod1.ObjectMeta.OwnerReferences=%#v", pod1.ObjectMeta.OwnerReferences)
|
||||
patch2 := addRefPatch(pod1.Name, pod1.UID)
|
||||
pod2, err = podClient.Patch(pod2.Name, types.StrategicMergePatchType, patch2)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod2.Name, f.Namespace.Name, patch2)
|
||||
framework.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
|
||||
e2elog.Logf("pod2.ObjectMeta.OwnerReferences=%#v", pod2.ObjectMeta.OwnerReferences)
|
||||
patch3 := addRefPatch(pod2.Name, pod2.UID)
|
||||
pod3, err = podClient.Patch(pod3.Name, types.StrategicMergePatchType, patch3)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to apply to pod %s in namespace %s, a strategic merge patch: %s", pod3.Name, f.Namespace.Name, patch3)
|
||||
framework.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
|
||||
e2elog.Logf("pod3.ObjectMeta.OwnerReferences=%#v", pod3.ObjectMeta.OwnerReferences)
|
||||
// delete one pod, should result in the deletion of all pods
|
||||
deleteOptions := getForegroundOptions()
|
||||
deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(pod1.UID))
|
||||
@ -858,7 +859,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
framework.Logf("pods are %#v", pods.Items)
|
||||
e2elog.Logf("pods are %#v", pods.Items)
|
||||
framework.Failf("failed to wait for all pods to be deleted: %v", err)
|
||||
}
|
||||
})
|
||||
@ -909,7 +910,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err != nil {
|
||||
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
framework.Logf("created owner resource %q", ownerName)
|
||||
e2elog.Logf("created owner resource %q", ownerName)
|
||||
|
||||
// Create a custom dependent resource.
|
||||
dependentName := names.SimpleNameGenerator.GenerateName("dependent")
|
||||
@ -934,7 +935,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err != nil {
|
||||
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
|
||||
}
|
||||
framework.Logf("created dependent resource %q", dependentName)
|
||||
e2elog.Logf("created dependent resource %q", dependentName)
|
||||
|
||||
// Delete the owner.
|
||||
background := metav1.DeletePropagationBackground
|
||||
@ -948,8 +949,8 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
_, err := resourceClient.Get(dependentName, metav1.GetOptions{})
|
||||
return errors.IsNotFound(err), nil
|
||||
}); err != nil {
|
||||
framework.Logf("owner: %#v", persistedOwner)
|
||||
framework.Logf("dependent: %#v", persistedDependent)
|
||||
e2elog.Logf("owner: %#v", persistedOwner)
|
||||
e2elog.Logf("dependent: %#v", persistedDependent)
|
||||
framework.Failf("failed waiting for dependent resource %q to be deleted", dependentName)
|
||||
}
|
||||
|
||||
@ -1010,7 +1011,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err != nil {
|
||||
framework.Failf("failed to create owner resource %q: %v", ownerName, err)
|
||||
}
|
||||
framework.Logf("created owner resource %q", ownerName)
|
||||
e2elog.Logf("created owner resource %q", ownerName)
|
||||
|
||||
// Create a custom dependent resource.
|
||||
dependentName := names.SimpleNameGenerator.GenerateName("dependent")
|
||||
@ -1035,7 +1036,7 @@ var _ = SIGDescribe("Garbage collector", func() {
|
||||
if err != nil {
|
||||
framework.Failf("failed to create dependent resource %q: %v", dependentName, err)
|
||||
}
|
||||
framework.Logf("created dependent resource %q", dependentName)
|
||||
e2elog.Logf("created dependent resource %q", dependentName)
|
||||
|
||||
// Delete the owner and orphan the dependent.
|
||||
err = resourceClient.Delete(ownerName, getOrphanOptions())
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -74,7 +75,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
|
||||
}
|
||||
}
|
||||
if cnt > maxAllowedAfterDel {
|
||||
framework.Logf("Remaining namespaces : %v", cnt)
|
||||
e2elog.Logf("Remaining namespaces : %v", cnt)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/quota/v1/evaluator/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
@ -1591,7 +1592,7 @@ func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.R
|
||||
// verify that the quota shows the expected used resource values
|
||||
for k, v := range used {
|
||||
if actualValue, found := resourceQuota.Status.Used[k]; !found || (actualValue.Cmp(v) != 0) {
|
||||
framework.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
|
||||
e2elog.Logf("resource %s, expected %s, actual %s", k, v.String(), actualValue.String())
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -52,7 +53,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
c := f.ClientSet
|
||||
|
||||
podName := "pod-1"
|
||||
framework.Logf("Creating pod %s", podName)
|
||||
e2elog.Logf("Creating pod %s", podName)
|
||||
|
||||
_, err := c.CoreV1().Pods(ns).Create(newTablePod(podName))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to create pod %s in namespace: %s", podName, ns)
|
||||
@ -60,7 +61,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
table := &metav1beta1.Table{}
|
||||
err = c.CoreV1().RESTClient().Get().Resource("pods").Namespace(ns).Name(podName).SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get pod %s in Table form in namespace: %s", podName, ns)
|
||||
framework.Logf("Table: %#v", table)
|
||||
e2elog.Logf("Table: %#v", table)
|
||||
|
||||
gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">", 2))
|
||||
gomega.Expect(len(table.Rows)).To(gomega.Equal(1))
|
||||
@ -71,7 +72,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
out := printTable(table)
|
||||
gomega.Expect(out).To(gomega.MatchRegexp("^NAME\\s"))
|
||||
gomega.Expect(out).To(gomega.MatchRegexp("\npod-1\\s"))
|
||||
framework.Logf("Table:\n%s", out)
|
||||
e2elog.Logf("Table:\n%s", out)
|
||||
})
|
||||
|
||||
ginkgo.It("should return chunks of table results for list calls", func() {
|
||||
@ -97,7 +98,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
if err == nil {
|
||||
return
|
||||
}
|
||||
framework.Logf("Got an error creating template %d: %v", i, err)
|
||||
e2elog.Logf("Got an error creating template %d: %v", i, err)
|
||||
}
|
||||
ginkgo.Fail("Unable to create template %d, exiting", i)
|
||||
})
|
||||
@ -130,7 +131,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
table := &metav1beta1.Table{}
|
||||
err := c.CoreV1().RESTClient().Get().Resource("nodes").SetHeader("Accept", "application/json;as=Table;v=v1beta1;g=meta.k8s.io").Do().Into(table)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "failed to get nodes in Table form across all namespaces")
|
||||
framework.Logf("Table: %#v", table)
|
||||
e2elog.Logf("Table: %#v", table)
|
||||
|
||||
gomega.Expect(len(table.ColumnDefinitions)).To(gomega.BeNumerically(">=", 2))
|
||||
gomega.Expect(len(table.Rows)).To(gomega.BeNumerically(">=", 1))
|
||||
@ -141,7 +142,7 @@ var _ = SIGDescribe("Servers with support for Table transformation", func() {
|
||||
|
||||
out := printTable(table)
|
||||
gomega.Expect(out).To(gomega.MatchRegexp("^NAME\\s"))
|
||||
framework.Logf("Table:\n%s", out)
|
||||
e2elog.Logf("Table:\n%s", out)
|
||||
})
|
||||
|
||||
ginkgo.It("should return a 406 for a backend which does not implement metadata", func() {
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@ -413,7 +414,7 @@ func waitForEvent(w watch.Interface, expectType watch.EventType, expectObject ru
|
||||
select {
|
||||
case actual, ok := <-w.ResultChan():
|
||||
if ok {
|
||||
framework.Logf("Got : %v %v", actual.Type, actual.Object)
|
||||
e2elog.Logf("Got : %v %v", actual.Type, actual.Object)
|
||||
} else {
|
||||
framework.Failf("Watch closed unexpectedly")
|
||||
}
|
||||
|
@ -39,6 +39,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/utils/crd"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
"k8s.io/utils/pointer"
|
||||
@ -267,7 +268,7 @@ func createAuthReaderRoleBinding(f *framework.Framework, namespace string) {
|
||||
},
|
||||
})
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
framework.Logf("role binding %s already exists", roleBindingName)
|
||||
e2elog.Logf("role binding %s already exists", roleBindingName)
|
||||
} else {
|
||||
framework.ExpectNoError(err, "creating role binding %s:webhook to access configMap", namespace)
|
||||
}
|
||||
|
@ -63,6 +63,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/deployment:go_default_library",
|
||||
"//test/e2e/framework/job:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/replicaset:go_default_library",
|
||||
"//test/e2e/framework/ssh:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
@ -35,6 +35,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -423,7 +424,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
|
||||
if len(aliveJobs) > 1 {
|
||||
return false, fmt.Errorf("More than one job is running %+v", jobs.Items)
|
||||
} else if len(aliveJobs) == 0 {
|
||||
framework.Logf("Warning: Found 0 jobs in namespace %v", ns)
|
||||
e2elog.Logf("Warning: Found 0 jobs in namespace %v", ns)
|
||||
return false, nil
|
||||
}
|
||||
return aliveJobs[0].Name != previousJobName, nil
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
@ -72,7 +73,7 @@ type RestartDaemonConfig struct {
|
||||
// NewRestartConfig creates a RestartDaemonConfig for the given node and daemon.
|
||||
func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *RestartDaemonConfig {
|
||||
if !framework.ProviderIs("gce") {
|
||||
framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider)
|
||||
e2elog.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider)
|
||||
}
|
||||
return &RestartDaemonConfig{
|
||||
nodeName: nodeName,
|
||||
@ -89,7 +90,7 @@ func (r *RestartDaemonConfig) String() string {
|
||||
|
||||
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
|
||||
func (r *RestartDaemonConfig) waitUp() {
|
||||
framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
|
||||
e2elog.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
|
||||
healthzCheck := fmt.Sprintf(
|
||||
"curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort)
|
||||
|
||||
@ -99,12 +100,12 @@ func (r *RestartDaemonConfig) waitUp() {
|
||||
if result.Code == 0 {
|
||||
httpCode, err := strconv.Atoi(result.Stdout)
|
||||
if err != nil {
|
||||
framework.Logf("Unable to parse healthz http return code: %v", err)
|
||||
e2elog.Logf("Unable to parse healthz http return code: %v", err)
|
||||
} else if httpCode == 200 {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
framework.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v",
|
||||
e2elog.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v",
|
||||
r.nodeName, healthzCheck, result.Code, result.Stdout, result.Stderr)
|
||||
return false, nil
|
||||
})
|
||||
@ -113,7 +114,7 @@ func (r *RestartDaemonConfig) waitUp() {
|
||||
|
||||
// kill sends a SIGTERM to the daemon
|
||||
func (r *RestartDaemonConfig) kill() {
|
||||
framework.Logf("Killing %v", r)
|
||||
e2elog.Logf("Killing %v", r)
|
||||
_, err := e2essh.NodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName), framework.TestContext.Provider)
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
@ -301,7 +302,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
|
||||
if preRestarts != 0 {
|
||||
framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
|
||||
e2elog.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
|
||||
}
|
||||
for _, ip := range nodeIPs {
|
||||
restarter := NewRestartConfig(
|
||||
@ -310,7 +311,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
|
||||
}
|
||||
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
|
||||
if postRestarts != preRestarts {
|
||||
framework.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf)
|
||||
framework.DumpNodeDebugInfo(f.ClientSet, badNodes, e2elog.Logf)
|
||||
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
|
||||
}
|
||||
})
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/daemon"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
@ -78,14 +79,14 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
}
|
||||
}
|
||||
if daemonsets, err := f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets))
|
||||
e2elog.Logf("daemonset: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), daemonsets))
|
||||
} else {
|
||||
framework.Logf("unable to dump daemonsets: %v", err)
|
||||
e2elog.Logf("unable to dump daemonsets: %v", err)
|
||||
}
|
||||
if pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{}); err == nil {
|
||||
framework.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods))
|
||||
e2elog.Logf("pods: %s", runtime.EncodeOrDie(scheme.Codecs.LegacyCodec(scheme.Scheme.PrioritizedVersionsAllGroups()...), pods))
|
||||
} else {
|
||||
framework.Logf("unable to dump pods: %v", err)
|
||||
e2elog.Logf("unable to dump pods: %v", err)
|
||||
}
|
||||
err = clearDaemonSetNodeLabels(f.ClientSet)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
@ -148,7 +149,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ConformanceIt("should run and stop complex daemon", func() {
|
||||
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
||||
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
||||
framework.Logf("Creating daemon %q with a node selector", dsName)
|
||||
e2elog.Logf("Creating daemon %q with a node selector", dsName)
|
||||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.NodeSelector = nodeSelector
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
@ -195,7 +196,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ginkgo.It("should run and stop complex daemon with node affinity", func() {
|
||||
complexLabel := map[string]string{daemonsetNameLabel: dsName}
|
||||
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
|
||||
framework.Logf("Creating daemon %q with a node affinity", dsName)
|
||||
e2elog.Logf("Creating daemon %q with a node affinity", dsName)
|
||||
ds := newDaemonSet(dsName, image, complexLabel)
|
||||
ds.Spec.Template.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
@ -277,7 +278,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
ginkgo.It("should not update pod when spec was updated and update strategy is OnDelete", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
e2elog.Logf("Creating simple daemon set %s", dsName)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.OnDeleteDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
@ -326,7 +327,7 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ConformanceIt("should update pod when spec was updated and update strategy is RollingUpdate", func() {
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
|
||||
framework.Logf("Creating simple daemon set %s", dsName)
|
||||
e2elog.Logf("Creating simple daemon set %s", dsName)
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
@ -383,18 +384,18 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
framework.ConformanceIt("should rollback without unnecessary restarts", func() {
|
||||
schedulableNodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||
gomega.Expect(len(schedulableNodes.Items)).To(gomega.BeNumerically(">", 1), "Conformance test suite needs a cluster with at least 2 nodes.")
|
||||
framework.Logf("Create a RollingUpdate DaemonSet")
|
||||
e2elog.Logf("Create a RollingUpdate DaemonSet")
|
||||
label := map[string]string{daemonsetNameLabel: dsName}
|
||||
ds := newDaemonSet(dsName, image, label)
|
||||
ds.Spec.UpdateStrategy = apps.DaemonSetUpdateStrategy{Type: apps.RollingUpdateDaemonSetStrategyType}
|
||||
ds, err := c.AppsV1().DaemonSets(ns).Create(ds)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
e2elog.Logf("Check that daemon pods launch on every node of the cluster")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, ds))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for daemon pod to start")
|
||||
|
||||
framework.Logf("Update the DaemonSet to trigger a rollout")
|
||||
e2elog.Logf("Update the DaemonSet to trigger a rollout")
|
||||
// We use a nonexistent image here, so that we make sure it won't finish
|
||||
newImage := "foo:non-existent"
|
||||
newDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
@ -428,13 +429,13 @@ var _ = SIGDescribe("Daemon set [Serial]", func() {
|
||||
}
|
||||
gomega.Expect(len(newPods)).NotTo(gomega.Equal(0))
|
||||
|
||||
framework.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
e2elog.Logf("Roll back the DaemonSet before rollout is complete")
|
||||
rollbackDS, err := framework.UpdateDaemonSetWithRetries(c, ns, ds.Name, func(update *apps.DaemonSet) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = image
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure DaemonSet rollback is complete")
|
||||
e2elog.Logf("Make sure DaemonSet rollback is complete")
|
||||
err = wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, checkDaemonPodsImageAndAvailability(c, rollbackDS, image, 1))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
@ -561,7 +562,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
|
||||
return true, err
|
||||
}
|
||||
if se, ok := err.(*apierrors.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
|
||||
framework.Logf("failed to update node due to resource version conflict")
|
||||
e2elog.Logf("failed to update node due to resource version conflict")
|
||||
return false, nil
|
||||
}
|
||||
return false, err
|
||||
@ -579,7 +580,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames
|
||||
return func() (bool, error) {
|
||||
podList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("could not get the pod list: %v", err)
|
||||
e2elog.Logf("could not get the pod list: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
pods := podList.Items
|
||||
@ -596,17 +597,17 @@ func checkDaemonPodOnNodes(f *framework.Framework, ds *apps.DaemonSet, nodeNames
|
||||
nodesToPodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
}
|
||||
framework.Logf("Number of nodes with available pods: %d", len(nodesToPodCount))
|
||||
e2elog.Logf("Number of nodes with available pods: %d", len(nodesToPodCount))
|
||||
|
||||
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
|
||||
for _, nodeName := range nodeNames {
|
||||
if nodesToPodCount[nodeName] != 1 {
|
||||
framework.Logf("Node %s is running more than one daemon pod", nodeName)
|
||||
e2elog.Logf("Node %s is running more than one daemon pod", nodeName)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount))
|
||||
e2elog.Logf("Number of running nodes: %d, number of available pods: %d", len(nodeNames), len(nodesToPodCount))
|
||||
// Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
|
||||
// nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
|
||||
// other nodes.
|
||||
@ -627,7 +628,7 @@ func schedulableNodes(c clientset.Interface, ds *apps.DaemonSet) []string {
|
||||
nodeNames := make([]string, 0)
|
||||
for _, node := range nodeList.Items {
|
||||
if !canScheduleOnNode(node, ds) {
|
||||
framework.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
|
||||
e2elog.Logf("DaemonSet pods can't tolerate node %s with taints %+v, skip checking this node", node.Name, node.Spec.Taints)
|
||||
continue
|
||||
}
|
||||
nodeNames = append(nodeNames, node.Name)
|
||||
@ -692,12 +693,12 @@ func checkDaemonPodsImageAndAvailability(c clientset.Interface, ds *apps.DaemonS
|
||||
}
|
||||
podImage := pod.Spec.Containers[0].Image
|
||||
if podImage != image {
|
||||
framework.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage)
|
||||
e2elog.Logf("Wrong image for pod: %s. Expected: %s, got: %s.", pod.Name, image, podImage)
|
||||
} else {
|
||||
nodesToUpdatedPodCount[pod.Spec.NodeName]++
|
||||
}
|
||||
if !podutil.IsPodAvailable(&pod, ds.Spec.MinReadySeconds, metav1.Now()) {
|
||||
framework.Logf("Pod %s is not available", pod.Name)
|
||||
e2elog.Logf("Pod %s is not available", pod.Name)
|
||||
unavailablePods++
|
||||
}
|
||||
}
|
||||
@ -736,7 +737,7 @@ func waitForHistoryCreated(c clientset.Interface, ns string, label map[string]st
|
||||
if len(historyList.Items) == numHistory {
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory)
|
||||
e2elog.Logf("%d/%d controllerrevisions created.", len(historyList.Items), numHistory)
|
||||
return false, nil
|
||||
}
|
||||
err := wait.PollImmediate(dsRetryPeriod, dsRetryTimeout, listHistoryFn)
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
testutil "k8s.io/kubernetes/test/utils"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
@ -128,49 +129,49 @@ var _ = SIGDescribe("Deployment", func() {
|
||||
func failureTrap(c clientset.Interface, ns string) {
|
||||
deployments, err := c.AppsV1().Deployments(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list Deployments in namespace %q: %v", ns, err)
|
||||
e2elog.Logf("Could not list Deployments in namespace %q: %v", ns, err)
|
||||
return
|
||||
}
|
||||
for i := range deployments.Items {
|
||||
d := deployments.Items[i]
|
||||
|
||||
framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
|
||||
e2elog.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
|
||||
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.AppsV1())
|
||||
if err != nil {
|
||||
framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
|
||||
e2elog.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
|
||||
return
|
||||
}
|
||||
testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, framework.Logf)
|
||||
testutil.LogReplicaSetsOfDeployment(&d, allOldRSs, newRS, e2elog.Logf)
|
||||
rsList := allOldRSs
|
||||
if newRS != nil {
|
||||
rsList = append(rsList, newRS)
|
||||
}
|
||||
testutil.LogPodsOfDeployment(c, &d, rsList, framework.Logf)
|
||||
testutil.LogPodsOfDeployment(c, &d, rsList, e2elog.Logf)
|
||||
}
|
||||
// We need print all the ReplicaSets if there are no Deployment object created
|
||||
if len(deployments.Items) != 0 {
|
||||
return
|
||||
}
|
||||
framework.Logf("Log out all the ReplicaSets if there is no deployment created")
|
||||
e2elog.Logf("Log out all the ReplicaSets if there is no deployment created")
|
||||
rss, err := c.AppsV1().ReplicaSets(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
framework.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
|
||||
e2elog.Logf("Could not list ReplicaSets in namespace %q: %v", ns, err)
|
||||
return
|
||||
}
|
||||
for _, rs := range rss.Items {
|
||||
framework.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs))
|
||||
e2elog.Logf(spew.Sprintf("ReplicaSet %q:\n%+v\n", rs.Name, rs))
|
||||
selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||
if err != nil {
|
||||
framework.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
|
||||
e2elog.Logf("failed to get selector of ReplicaSet %s: %v", rs.Name, err)
|
||||
}
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(rs.Namespace).List(options)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err)
|
||||
e2elog.Logf("Failed to list Pods in namespace %s: %v", rs.Namespace, err)
|
||||
continue
|
||||
}
|
||||
for _, pod := range podList.Items {
|
||||
framework.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod))
|
||||
e2elog.Logf(spew.Sprintf("pod: %q:\n%+v\n", pod.Name, pod))
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -192,21 +193,21 @@ func stopDeployment(c clientset.Interface, ns, deploymentName string) {
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting deployment %s", deploymentName)
|
||||
e2elog.Logf("Deleting deployment %s", deploymentName)
|
||||
framework.ExpectNoError(framework.DeleteResourceAndWaitForGC(c, appsinternal.Kind("Deployment"), ns, deployment.Name))
|
||||
|
||||
framework.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
e2elog.Logf("Ensuring deployment %s was deleted", deploymentName)
|
||||
_, err = c.AppsV1().Deployments(ns).Get(deployment.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||
gomega.Expect(errors.IsNotFound(err)).To(gomega.BeTrue())
|
||||
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
e2elog.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
options := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
rss, err := c.AppsV1().ReplicaSets(ns).List(options)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
gomega.Expect(rss.Items).Should(gomega.HaveLen(0))
|
||||
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
|
||||
e2elog.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
|
||||
var pods *v1.PodList
|
||||
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
|
||||
pods, err = c.CoreV1().Pods(ns).List(options)
|
||||
@ -230,7 +231,7 @@ func testDeleteDeployment(f *framework.Framework) {
|
||||
deploymentName := "test-new-deployment"
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
framework.Logf("Creating simple deployment %s", deploymentName)
|
||||
e2elog.Logf("Creating simple deployment %s", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
d.Annotations = map[string]string{"test": "should-copy-to-replica-set", v1.LastAppliedConfigAnnotation: "should-not-copy-to-replica-set"}
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
@ -268,7 +269,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
annotations[deploymentutil.RevisionAnnotation] = rsRevision
|
||||
rs := newRS(rsName, replicas, rsPodLabels, NginxImageName, NginxImage)
|
||||
rs.Annotations = annotations
|
||||
framework.Logf("Creating replica set %q (going to be adopted)", rs.Name)
|
||||
e2elog.Logf("Creating replica set %q (going to be adopted)", rs.Name)
|
||||
_, err := c.AppsV1().ReplicaSets(ns).Create(rs)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
// Verify that the required pods have come up.
|
||||
@ -277,22 +278,22 @@ func testRollingUpdateDeployment(f *framework.Framework) {
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-rolling-update-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, deploymentPodLabels, RedisImageName, RedisImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 3546343826724305833.
|
||||
framework.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
|
||||
e2elog.Logf("Ensuring deployment %q gets the next revision from the one the adopted replica set %q has", deploy.Name, rs.Name)
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "3546343826724305833", RedisImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
|
||||
e2elog.Logf("Ensuring status for deployment %q is the expected", deploy.Name)
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// There should be 1 old RS (nginx-controller, which is adopted)
|
||||
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
|
||||
e2elog.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
|
||||
@ -306,28 +307,28 @@ func testRecreateDeployment(f *framework.Framework) {
|
||||
|
||||
// Create a deployment that brings up redis pods.
|
||||
deploymentName := "test-recreate-deployment"
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, int32(1), map[string]string{"name": "sample-pod-3"}, RedisImageName, RedisImage, apps.RecreateDeploymentStrategyType)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 1
|
||||
framework.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
|
||||
e2elog.Logf("Waiting deployment %q to be updated to revision 1", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "1", RedisImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting deployment %q to complete", deploymentName)
|
||||
e2elog.Logf("Waiting deployment %q to complete", deploymentName)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Update deployment to delete redis pods and bring up nginx pods.
|
||||
framework.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
e2elog.Logf("Triggering a new rollout for deployment %q", deploymentName)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deploymentName, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = NginxImageName
|
||||
update.Spec.Template.Spec.Containers[0].Image = NginxImage
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
|
||||
e2elog.Logf("Watching deployment %q to verify that new pods will not run with olds pods", deploymentName)
|
||||
gomega.Expect(e2edeploy.WatchRecreateDeployment(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
@ -353,7 +354,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis pods.
|
||||
deploymentName := "test-cleanup-deployment"
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
e2elog.Logf("Creating deployment %s", deploymentName)
|
||||
|
||||
pods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to query for pods: %v", err)
|
||||
@ -422,7 +423,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
// Wait for replica set to become ready before adopting it.
|
||||
framework.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
|
||||
e2elog.Logf("Waiting for pods owned by replica set %q to become ready", rsName)
|
||||
gomega.Expect(replicaset.WaitForReadyReplicaSet(c, ns, rsName)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Create a deployment to delete nginx pods and instead bring up redis-slave pods.
|
||||
@ -431,7 +432,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := "gcr.io/google_samples/gb-redisslave:nonexistent"
|
||||
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
newDeployment := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
|
||||
MaxUnavailable: intOrStrP(0),
|
||||
@ -444,15 +445,15 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
// Verify that the pods were scaled up and down as expected.
|
||||
deployment, err := c.AppsV1().Deployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
framework.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
e2elog.Logf("Make sure deployment %q performs scaling operations", deploymentName)
|
||||
// Make sure the deployment starts to scale up and down replica sets by checking if its updated replicas >= 1
|
||||
err = e2edeploy.WaitForDeploymentUpdatedReplicasGTE(c, ns, deploymentName, deploymentReplicas, deployment.Generation)
|
||||
// Check if it's updated to revision 1 correctly
|
||||
framework.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
e2elog.Logf("Check revision of new replica set for deployment %q", deploymentName)
|
||||
err = e2edeploy.CheckDeploymentRevisionAndImage(c, ns, deploymentName, "1", deploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both replica sets have 1 created replica")
|
||||
e2elog.Logf("Ensure that both replica sets have 1 created replica")
|
||||
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(1))
|
||||
@ -461,7 +462,7 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
ensureReplicas(newRS, int32(1))
|
||||
|
||||
// The deployment is stuck, update it to rollover the above 2 ReplicaSets and bring up redis pods.
|
||||
framework.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
|
||||
e2elog.Logf("Rollover old replica sets for deployment %q with new image update", deploymentName)
|
||||
updatedDeploymentImageName, updatedDeploymentImage := RedisImageName, RedisImage
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
|
||||
@ -470,20 +471,20 @@ func testRolloverDeployment(f *framework.Framework) {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Use observedGeneration to determine if the controller noticed the pod template update.
|
||||
framework.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
|
||||
e2elog.Logf("Wait deployment %q to be observed by the deployment controller", deploymentName)
|
||||
err = e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Wait for it to be updated to revision 2
|
||||
framework.Logf("Wait for revision update of deployment %q to 2", deploymentName)
|
||||
e2elog.Logf("Wait for revision update of deployment %q to 2", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentRevisionAndImage(c, ns, deploymentName, "2", updatedDeploymentImage)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Make sure deployment %q is complete", deploymentName)
|
||||
e2elog.Logf("Make sure deployment %q is complete", deploymentName)
|
||||
err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Ensure that both old replica sets have no replicas")
|
||||
e2elog.Logf("Ensure that both old replica sets have no replicas")
|
||||
oldRS, err = c.AppsV1().ReplicaSets(ns).Get(rsName, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
ensureReplicas(oldRS, int32(0))
|
||||
@ -514,7 +515,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
deploymentReplicas := int32(1)
|
||||
deploymentImage := NginxImage
|
||||
deploymentStrategyType := apps.RollingUpdateDeploymentStrategyType
|
||||
framework.Logf("Creating deployment %s", deploymentName)
|
||||
e2elog.Logf("Creating deployment %s", deploymentName)
|
||||
d := e2edeploy.NewDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType)
|
||||
createAnnotation := map[string]string{"action": "create", "author": "node"}
|
||||
d.Annotations = createAnnotation
|
||||
@ -560,7 +561,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
|
||||
// 3. Update the deploymentRollback to rollback to revision 1
|
||||
revision := int64(1)
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback := newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
@ -583,7 +584,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
|
||||
// 4. Update the deploymentRollback to rollback to last revision
|
||||
revision = 0
|
||||
framework.Logf("rolling back deployment %s to last revision", deploymentName)
|
||||
e2elog.Logf("rolling back deployment %s to last revision", deploymentName)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
@ -605,7 +606,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
// 5. Update the deploymentRollback to rollback to revision 10
|
||||
// Since there's no revision 10 in history, it should stay as revision 4
|
||||
revision = 10
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
@ -623,7 +624,7 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
// 6. Update the deploymentRollback to rollback to revision 4
|
||||
// Since it's already revision 4, it should be no-op
|
||||
revision = 4
|
||||
framework.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
e2elog.Logf("rolling back deployment %s to revision %d", deploymentName, revision)
|
||||
rollback = newDeploymentRollback(deploymentName, nil, revision)
|
||||
err = c.ExtensionsV1beta1().Deployments(ns).Rollback(rollback)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
@ -642,11 +643,11 @@ func testRollbackDeployment(f *framework.Framework) {
|
||||
func randomScale(d *apps.Deployment, i int) {
|
||||
switch r := rand.Float32(); {
|
||||
case r < 0.3:
|
||||
framework.Logf("%02d: scaling up", i)
|
||||
e2elog.Logf("%02d: scaling up", i)
|
||||
*(d.Spec.Replicas)++
|
||||
case r < 0.6:
|
||||
if *(d.Spec.Replicas) > 1 {
|
||||
framework.Logf("%02d: scaling down", i)
|
||||
e2elog.Logf("%02d: scaling down", i)
|
||||
*(d.Spec.Replicas)--
|
||||
}
|
||||
}
|
||||
@ -668,7 +669,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
d.Spec.ProgressDeadlineSeconds = &thirty
|
||||
d.Spec.RevisionHistoryLimit = &two
|
||||
d.Spec.Template.Spec.TerminationGracePeriodSeconds = &zero
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
@ -681,7 +682,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
switch n := rand.Float32(); {
|
||||
case n < 0.2:
|
||||
// trigger a new deployment
|
||||
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
|
||||
e2elog.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
|
||||
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
|
||||
@ -691,7 +692,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
|
||||
case n < 0.4:
|
||||
// rollback to the previous version
|
||||
framework.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
|
||||
e2elog.Logf("%02d: rolling back a rollout for deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
if update.Annotations == nil {
|
||||
update.Annotations = make(map[string]string)
|
||||
@ -702,7 +703,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
|
||||
case n < 0.6:
|
||||
// just scaling
|
||||
framework.Logf("%02d: scaling deployment %q", i, deployment.Name)
|
||||
e2elog.Logf("%02d: scaling deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
randomScale(update, i)
|
||||
})
|
||||
@ -711,14 +712,14 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
case n < 0.8:
|
||||
// toggling the deployment
|
||||
if deployment.Spec.Paused {
|
||||
framework.Logf("%02d: pausing deployment %q", i, deployment.Name)
|
||||
e2elog.Logf("%02d: pausing deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = true
|
||||
randomScale(update, i)
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
} else {
|
||||
framework.Logf("%02d: resuming deployment %q", i, deployment.Name)
|
||||
e2elog.Logf("%02d: resuming deployment %q", i, deployment.Name)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Paused = false
|
||||
randomScale(update, i)
|
||||
@ -728,14 +729,14 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
|
||||
default:
|
||||
// arbitrarily delete deployment pods
|
||||
framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
|
||||
e2elog.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
|
||||
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
opts := metav1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := c.CoreV1().Pods(ns).List(opts)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
if len(podList.Items) == 0 {
|
||||
framework.Logf("%02d: no deployment pods to delete", i)
|
||||
e2elog.Logf("%02d: no deployment pods to delete", i)
|
||||
continue
|
||||
}
|
||||
for p := range podList.Items {
|
||||
@ -743,7 +744,7 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
continue
|
||||
}
|
||||
name := podList.Items[p].Name
|
||||
framework.Logf("%02d: deleting deployment pod %q", i, name)
|
||||
e2elog.Logf("%02d: deleting deployment pod %q", i, name)
|
||||
err := c.CoreV1().Pods(ns).Delete(name, nil)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
@ -761,13 +762,13 @@ func testIterativeDeployments(f *framework.Framework) {
|
||||
})
|
||||
}
|
||||
|
||||
framework.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
|
||||
e2elog.Logf("Waiting for deployment %q to be observed by the controller", deploymentName)
|
||||
gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for deployment %q status", deploymentName)
|
||||
e2elog.Logf("Waiting for deployment %q status", deploymentName)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
|
||||
e2elog.Logf("Checking deployment %q for a complete condition", deploymentName)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, apps.DeploymentProgressing)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
@ -776,7 +777,7 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
c := f.ClientSet
|
||||
|
||||
deploymentName := "test-orphan-deployment"
|
||||
framework.Logf("Creating Deployment %q", deploymentName)
|
||||
e2elog.Logf("Creating Deployment %q", deploymentName)
|
||||
podLabels := map[string]string{"name": NginxImageName}
|
||||
replicas := int32(1)
|
||||
d := e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
@ -785,18 +786,18 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
|
||||
e2elog.Logf("Verifying Deployment %q has only one ReplicaSet", deploymentName)
|
||||
rsList := listDeploymentReplicaSets(c, ns, podLabels)
|
||||
gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1))
|
||||
|
||||
framework.Logf("Obtaining the ReplicaSet's UID")
|
||||
e2elog.Logf("Obtaining the ReplicaSet's UID")
|
||||
orphanedRSUID := rsList.Items[0].UID
|
||||
|
||||
framework.Logf("Checking the ReplicaSet has the right controllerRef")
|
||||
e2elog.Logf("Checking the ReplicaSet has the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName)
|
||||
e2elog.Logf("Deleting Deployment %q and orphaning its ReplicaSet", deploymentName)
|
||||
err = orphanDeploymentReplicaSets(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
@ -805,22 +806,22 @@ func testDeploymentsControllerRef(f *framework.Framework) {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error waiting for Deployment ReplicaSet to be orphaned")
|
||||
|
||||
deploymentName = "test-adopt-deployment"
|
||||
framework.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
e2elog.Logf("Creating Deployment %q to adopt the ReplicaSet", deploymentName)
|
||||
d = e2edeploy.NewDeployment(deploymentName, replicas, podLabels, NginxImageName, NginxImage, apps.RollingUpdateDeploymentStrategyType)
|
||||
deploy, err = c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
err = e2edeploy.WaitForDeploymentComplete(c, deploy)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for the ReplicaSet to have the right controllerRef")
|
||||
e2elog.Logf("Waiting for the ReplicaSet to have the right controllerRef")
|
||||
err = checkDeploymentReplicaSetsControllerRef(c, ns, deploy.UID, podLabels)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
|
||||
e2elog.Logf("Verifying no extra ReplicaSet is created (Deployment %q still has only one ReplicaSet after adoption)", deploymentName)
|
||||
rsList = listDeploymentReplicaSets(c, ns, podLabels)
|
||||
gomega.Expect(len(rsList.Items)).Should(gomega.Equal(1))
|
||||
|
||||
framework.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
|
||||
e2elog.Logf("Verifying the ReplicaSet has the same UID as the orphaned ReplicaSet")
|
||||
gomega.Expect(rsList.Items[0].UID).Should(gomega.Equal(orphanedRSUID))
|
||||
}
|
||||
|
||||
@ -841,19 +842,19 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
d.Spec.Strategy.RollingUpdate.MaxSurge = intOrStrP(3)
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intOrStrP(2)
|
||||
|
||||
framework.Logf("Creating deployment %q", deploymentName)
|
||||
e2elog.Logf("Creating deployment %q", deploymentName)
|
||||
deployment, err := c.AppsV1().Deployments(ns).Create(d)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
e2elog.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Verify that the required pods have come up.
|
||||
framework.Logf("Waiting for all required pods to come up")
|
||||
e2elog.Logf("Waiting for all required pods to come up")
|
||||
err = framework.VerifyPodsRunning(c, ns, NginxImageName, false, *(deployment.Spec.Replicas))
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "error in waiting for pods to come up: %v", err)
|
||||
|
||||
framework.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
e2elog.Logf("Waiting for deployment %q to complete", deployment.Name)
|
||||
gomega.Expect(e2edeploy.WaitForDeploymentComplete(c, deployment)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
|
||||
@ -861,13 +862,13 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
|
||||
// Update the deployment with a non-existent image so that the new replica set
|
||||
// will be blocked to simulate a partial rollout.
|
||||
framework.Logf("Updating deployment %q with a non-existent image", deploymentName)
|
||||
e2elog.Logf("Updating deployment %q with a non-existent image", deploymentName)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Template.Spec.Containers[0].Image = "nginx:404"
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
e2elog.Logf("Waiting for observed generation %d", deployment.Generation)
|
||||
gomega.Expect(e2edeploy.WaitForObservedDeployment(c, ns, deploymentName, deployment.Generation)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Checking state of first rollout's replicaset.
|
||||
@ -876,15 +877,15 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
|
||||
// First rollout's replicaset should have Deployment's (replicas - maxUnavailable) = 10 - 2 = 8 available replicas.
|
||||
minAvailableReplicas := replicas - int32(maxUnavailable)
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
|
||||
e2elog.Logf("Waiting for the first rollout's replicaset to have .status.availableReplicas = %d", minAvailableReplicas)
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetAvailableReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 too.
|
||||
framework.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
|
||||
e2elog.Logf("Waiting for the first rollout's replicaset to have .spec.replicas = %d", minAvailableReplicas)
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, minAvailableReplicas)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
e2elog.Logf("Waiting for the first rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), firstRS)
|
||||
@ -898,36 +899,36 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Second rollout's replicaset should have 0 available replicas.
|
||||
framework.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
|
||||
e2elog.Logf("Verifying that the second rollout's replicaset has .status.availableReplicas = 0")
|
||||
gomega.Expect(secondRS.Status.AvailableReplicas).Should(gomega.Equal(int32(0)))
|
||||
|
||||
// Second rollout's replicaset should have Deployment's (replicas + maxSurge - first RS's replicas) = 10 + 3 - 8 = 5 for .spec.replicas.
|
||||
newReplicas := replicas + int32(maxSurge) - minAvailableReplicas
|
||||
framework.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
|
||||
e2elog.Logf("Waiting for the second rollout's replicaset to have .spec.replicas = %d", newReplicas)
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, newReplicas)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// The desired replicas wait makes sure that the RS controller has created expected number of pods.
|
||||
framework.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
e2elog.Logf("Waiting for the second rollout's replicaset of deployment %q to have desired number of replicas", deploymentName)
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
err = replicaset.WaitForReplicaSetDesiredReplicas(c.AppsV1(), secondRS)
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Check the deployment's minimum availability.
|
||||
framework.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName)
|
||||
e2elog.Logf("Verifying that deployment %q has minimum required number of available replicas", deploymentName)
|
||||
if deployment.Status.AvailableReplicas < minAvailableReplicas {
|
||||
gomega.Expect(fmt.Errorf("observed %d available replicas, less than min required %d", deployment.Status.AvailableReplicas, minAvailableReplicas)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
// Scale the deployment to 30 replicas.
|
||||
newReplicas = int32(30)
|
||||
framework.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
|
||||
e2elog.Logf("Scaling up the deployment %q from %d to %d", deploymentName, replicas, newReplicas)
|
||||
deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *apps.Deployment) {
|
||||
update.Spec.Replicas = &newReplicas
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
|
||||
framework.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
|
||||
e2elog.Logf("Waiting for the replicasets of deployment %q to have desired number of replicas", deploymentName)
|
||||
firstRS, err = c.AppsV1().ReplicaSets(ns).Get(firstRS.Name, metav1.GetOptions{})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
secondRS, err = c.AppsV1().ReplicaSets(ns).Get(secondRS.Name, metav1.GetOptions{})
|
||||
@ -935,12 +936,12 @@ func testProportionalScalingDeployment(f *framework.Framework) {
|
||||
|
||||
// First rollout's replicaset should have .spec.replicas = 8 + (30-10)*(8/13) = 8 + 12 = 20 replicas.
|
||||
// Note that 12 comes from rounding (30-10)*(8/13) to nearest integer.
|
||||
framework.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
|
||||
e2elog.Logf("Verifying that first rollout's replicaset has .spec.replicas = 20")
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, firstRS, 20)).NotTo(gomega.HaveOccurred())
|
||||
|
||||
// Second rollout's replicaset should have .spec.replicas = 5 + (30-10)*(5/13) = 5 + 8 = 13 replicas.
|
||||
// Note that 8 comes from rounding (30-10)*(5/13) to nearest integer.
|
||||
framework.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
|
||||
e2elog.Logf("Verifying that second rollout's replicaset has .spec.replicas = 13")
|
||||
gomega.Expect(replicaset.WaitForReplicaSetTargetSpecReplicas(c, secondRS, 13)).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -281,7 +282,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
return false, fmt.Errorf("pods is nil")
|
||||
}
|
||||
if len(pods.Items) < n {
|
||||
framework.Logf("pods: %v < %v", len(pods.Items), n)
|
||||
e2elog.Logf("pods: %v < %v", len(pods.Items), n)
|
||||
return false, nil
|
||||
}
|
||||
ready := 0
|
||||
@ -291,7 +292,7 @@ func waitForPodsOrDie(cs kubernetes.Interface, ns string, n int) {
|
||||
}
|
||||
}
|
||||
if ready < n {
|
||||
framework.Logf("running pods: %v < %v", ready, n)
|
||||
e2elog.Logf("running pods: %v < %v", ready, n)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
testutils "k8s.io/kubernetes/test/utils"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -60,7 +61,7 @@ func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
|
||||
if framework.IsNodeConditionSetAsExpected(n, v1.NodeReady, isReady) {
|
||||
expected = true
|
||||
} else {
|
||||
framework.Logf("Observed node ready status is NOT %v as expected", isReady)
|
||||
e2elog.Logf("Observed node ready status is NOT %v as expected", isReady)
|
||||
}
|
||||
case <-timer:
|
||||
timeout = true
|
||||
@ -96,9 +97,9 @@ func podOnNode(podName, nodeName string, image string) *v1.Pod {
|
||||
func newPodOnNode(c clientset.Interface, namespace, podName, nodeName string) error {
|
||||
pod, err := c.CoreV1().Pods(namespace).Create(podOnNode(podName, nodeName, framework.ServeHostnameImage))
|
||||
if err == nil {
|
||||
framework.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
|
||||
e2elog.Logf("Created pod %s on node %s", pod.ObjectMeta.Name, nodeName)
|
||||
} else {
|
||||
framework.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
|
||||
e2elog.Logf("Failed to create pod %s on node %s: %v", podName, nodeName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -262,7 +263,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
// pods on another node and that now the number of replicas is equal 'replicas'.
|
||||
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
@ -271,7 +272,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
@ -293,7 +294,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
pod, err := c.CoreV1().Pods(ns).Get(additionalPod, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if pod.Spec.NodeName != node.Name {
|
||||
framework.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
|
||||
e2elog.Logf("Pod %s found on invalid node: %s instead of %s", pod.Name, pod.Spec.NodeName, node.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
@ -329,7 +330,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
// pods on another node and that now the number of replicas is equal 'replicas + 1'.
|
||||
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForRCPodToDisappear(c, ns, name, pods.Items[0].Name)
|
||||
gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
|
||||
@ -338,7 +339,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
@ -367,7 +368,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all stateful set in ns %v", ns)
|
||||
e2elog.Logf("Deleting all stateful set in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
@ -406,12 +407,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
// that belongs to StatefulSet 'statefulSetName', **does not** disappear due to forced deletion from the apiserver.
|
||||
// The grace period on the stateful pods is set to a value > 0.
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name)
|
||||
e2elog.Logf("Checking that the NodeController does not force delete stateful pods %v", pod.Name)
|
||||
err := framework.WaitTimeoutForPodNoLongerRunningInNamespace(c, pod.Name, ns, 10*time.Minute)
|
||||
gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
@ -450,7 +451,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
// running pods after the node-controller detects node unreachable.
|
||||
ginkgo.By(fmt.Sprintf("blocking network traffic from node %s", node.Name))
|
||||
framework.TestUnderTemporaryNetworkFailure(c, ns, node, func() {
|
||||
framework.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
e2elog.Logf("Waiting for pod %s to be removed", pods.Items[0].Name)
|
||||
err := framework.WaitForPodToDisappear(c, ns, pods.Items[0].Name, label, 20*time.Second, 10*time.Minute)
|
||||
gomega.Expect(err).To(gomega.Equal(wait.ErrWaitTimeout), "Pod was not deleted during network partition.")
|
||||
|
||||
@ -459,7 +460,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
framework.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
e2elog.Logf("Waiting %v for node %s to be ready once temporary network failure ends", resizeNodeReadyTimeout, node.Name)
|
||||
if !framework.WaitForNodeToBeReady(c, node.Name, resizeNodeReadyTimeout) {
|
||||
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
|
||||
}
|
||||
@ -536,7 +537,7 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
|
||||
}
|
||||
}
|
||||
}
|
||||
framework.Logf(
|
||||
e2elog.Logf(
|
||||
"Only %v should be running after partition. Maximum TolerationSeconds among other Pods is %v",
|
||||
neverEvictedPods,
|
||||
maxTolerationTime,
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/controller/replication"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -133,7 +134,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
framework.Logf("Ensuring all pods for ReplicationController %q are running", name)
|
||||
e2elog.Logf("Ensuring all pods for ReplicationController %q are running", name)
|
||||
running := int32(0)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
@ -149,7 +150,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
}
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
e2elog.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
running++
|
||||
}
|
||||
|
||||
@ -159,7 +160,7 @@ func TestReplicationControllerServeImageOrFail(f *framework.Framework, test stri
|
||||
}
|
||||
|
||||
// Verify that something is listening.
|
||||
framework.Logf("Trying to dial the pod")
|
||||
e2elog.Logf("Trying to dial the pod")
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
@ -178,7 +179,7 @@ func testReplicationControllerConditionCheck(f *framework.Framework) {
|
||||
namespace := f.Namespace.Name
|
||||
name := "condition-test"
|
||||
|
||||
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
|
||||
e2elog.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
|
||||
quota := newPodQuota(name, "2")
|
||||
_, err := c.CoreV1().ResourceQuotas(namespace).Create(quota)
|
||||
framework.ExpectNoError(err)
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/controller/replicaset"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
replicasetutil "k8s.io/kubernetes/test/e2e/framework/replicaset"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -122,7 +123,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
// Create a ReplicaSet for a service that serves its hostname.
|
||||
// The source for the Docker containter kubernetes/serve_hostname is
|
||||
// in contrib/for-demos/serve_hostname
|
||||
framework.Logf("Creating ReplicaSet %s", name)
|
||||
e2elog.Logf("Creating ReplicaSet %s", name)
|
||||
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
|
||||
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
|
||||
_, err := f.ClientSet.AppsV1().ReplicaSets(f.Namespace.Name).Create(newRS)
|
||||
@ -135,7 +136,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
|
||||
// Wait for the pods to enter the running state. Waiting loops until the pods
|
||||
// are running so non-running pods cause a timeout for this test.
|
||||
framework.Logf("Ensuring a pod for ReplicaSet %q is running", name)
|
||||
e2elog.Logf("Ensuring a pod for ReplicaSet %q is running", name)
|
||||
running := int32(0)
|
||||
for _, pod := range pods.Items {
|
||||
if pod.DeletionTimestamp != nil {
|
||||
@ -151,7 +152,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
}
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
e2elog.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
|
||||
running++
|
||||
}
|
||||
|
||||
@ -161,7 +162,7 @@ func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image s
|
||||
}
|
||||
|
||||
// Verify that something is listening.
|
||||
framework.Logf("Trying to dial the pod")
|
||||
e2elog.Logf("Trying to dial the pod")
|
||||
retryTimeout := 2 * time.Minute
|
||||
retryInterval := 5 * time.Second
|
||||
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
watchtools "k8s.io/client-go/tools/watch"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -86,7 +87,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all statefulset in ns %v", ns)
|
||||
e2elog.Logf("Deleting all statefulset in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
@ -753,13 +754,13 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
pod := event.Object.(*v1.Pod)
|
||||
switch event.Type {
|
||||
case watch.Deleted:
|
||||
framework.Logf("Observed delete event for stateful pod %v in namespace %v", pod.Name, pod.Namespace)
|
||||
e2elog.Logf("Observed delete event for stateful pod %v in namespace %v", pod.Name, pod.Namespace)
|
||||
if initialStatefulPodUID == "" {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
framework.Logf("Observed stateful pod in namespace: %v, name: %v, uid: %v, status phase: %v. Waiting for statefulset controller to delete.",
|
||||
e2elog.Logf("Observed stateful pod in namespace: %v, name: %v, uid: %v, status phase: %v. Waiting for statefulset controller to delete.",
|
||||
pod.Namespace, pod.Name, pod.UID, pod.Status.Phase)
|
||||
initialStatefulPodUID = pod.UID
|
||||
return false, nil
|
||||
@ -836,7 +837,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||
framework.DumpDebugInfo(c, ns)
|
||||
}
|
||||
framework.Logf("Deleting all statefulset in ns %v", ns)
|
||||
e2elog.Logf("Deleting all statefulset in ns %v", ns)
|
||||
framework.DeleteAllStatefulSets(c, ns)
|
||||
})
|
||||
|
||||
@ -876,7 +877,7 @@ func kubectlExecWithRetries(args ...string) (out string) {
|
||||
if out, err = framework.RunKubectl(args...); err == nil {
|
||||
return
|
||||
}
|
||||
framework.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out)
|
||||
e2elog.Logf("Retrying %v:\nerror %v\nstdout %v", args, err, out)
|
||||
}
|
||||
framework.Failf("Failed to execute \"%v\" with retries: %v", args, err)
|
||||
return
|
||||
@ -938,7 +939,7 @@ func (z *zookeeperTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
ns := fmt.Sprintf("--namespace=%v", z.ss.Namespace)
|
||||
for k, v := range kv {
|
||||
cmd := fmt.Sprintf("/opt/zookeeper/bin/zkCli.sh create /%v %v", k, v)
|
||||
framework.Logf(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd))
|
||||
e2elog.Logf(framework.RunKubectlOrDie("exec", ns, name, "--", "/bin/sh", "-c", cmd))
|
||||
}
|
||||
}
|
||||
|
||||
@ -969,12 +970,12 @@ func (m *mysqlGaleraTester) mysqlExec(cmd, ns, podName string) string {
|
||||
func (m *mysqlGaleraTester) deploy(ns string) *apps.StatefulSet {
|
||||
m.ss = m.tester.CreateStatefulSet(mysqlGaleraManifestPath, ns)
|
||||
|
||||
framework.Logf("Deployed statefulset %v, initializing database", m.ss.Name)
|
||||
e2elog.Logf("Deployed statefulset %v, initializing database", m.ss.Name)
|
||||
for _, cmd := range []string{
|
||||
"create database statefulset;",
|
||||
"use statefulset; create table foo (k varchar(20), v varchar(20));",
|
||||
} {
|
||||
framework.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name)))
|
||||
e2elog.Logf(m.mysqlExec(cmd, ns, fmt.Sprintf("%v-0", m.ss.Name)))
|
||||
}
|
||||
return m.ss
|
||||
}
|
||||
@ -983,7 +984,7 @@ func (m *mysqlGaleraTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
|
||||
for k, v := range kv {
|
||||
cmd := fmt.Sprintf("use statefulset; insert into foo (k, v) values (\"%v\", \"%v\");", k, v)
|
||||
framework.Logf(m.mysqlExec(cmd, m.ss.Namespace, name))
|
||||
e2elog.Logf(m.mysqlExec(cmd, m.ss.Namespace, name))
|
||||
}
|
||||
}
|
||||
|
||||
@ -1014,7 +1015,7 @@ func (m *redisTester) deploy(ns string) *apps.StatefulSet {
|
||||
func (m *redisTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
name := fmt.Sprintf("%v-%d", m.ss.Name, statefulPodIndex)
|
||||
for k, v := range kv {
|
||||
framework.Logf(m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name))
|
||||
e2elog.Logf(m.redisExec(fmt.Sprintf("SET %v %v", k, v), m.ss.Namespace, name))
|
||||
}
|
||||
}
|
||||
|
||||
@ -1039,12 +1040,12 @@ func (c *cockroachDBTester) cockroachDBExec(cmd, ns, podName string) string {
|
||||
|
||||
func (c *cockroachDBTester) deploy(ns string) *apps.StatefulSet {
|
||||
c.ss = c.tester.CreateStatefulSet(cockroachDBManifestPath, ns)
|
||||
framework.Logf("Deployed statefulset %v, initializing database", c.ss.Name)
|
||||
e2elog.Logf("Deployed statefulset %v, initializing database", c.ss.Name)
|
||||
for _, cmd := range []string{
|
||||
"CREATE DATABASE IF NOT EXISTS foo;",
|
||||
"CREATE TABLE IF NOT EXISTS foo.bar (k STRING PRIMARY KEY, v STRING);",
|
||||
} {
|
||||
framework.Logf(c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name)))
|
||||
e2elog.Logf(c.cockroachDBExec(cmd, ns, fmt.Sprintf("%v-0", c.ss.Name)))
|
||||
}
|
||||
return c.ss
|
||||
}
|
||||
@ -1053,7 +1054,7 @@ func (c *cockroachDBTester) write(statefulPodIndex int, kv map[string]string) {
|
||||
name := fmt.Sprintf("%v-%d", c.ss.Name, statefulPodIndex)
|
||||
for k, v := range kv {
|
||||
cmd := fmt.Sprintf("UPSERT INTO foo.bar VALUES ('%v', '%v');", k, v)
|
||||
framework.Logf(c.cockroachDBExec(cmd, c.ss.Namespace, name))
|
||||
e2elog.Logf(c.cockroachDBExec(cmd, c.ss.Namespace, name))
|
||||
}
|
||||
}
|
||||
func (c *cockroachDBTester) read(statefulPodIndex int, key string) string {
|
||||
|
@ -16,6 +16,7 @@ go_library(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/config:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/elasticsearch:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/stackdriver:go_default_library",
|
||||
|
@ -20,6 +20,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -61,7 +62,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
// being run as the first e2e test just after the e2e cluster has been created.
|
||||
err := wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||
if _, err := s.Get("kibana-logging", metav1.GetOptions{}); err != nil {
|
||||
framework.Logf("Kibana is unreachable: %v", err)
|
||||
e2elog.Logf("Kibana is unreachable: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -83,7 +84,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
err = wait.Poll(pollingInterval, pollingTimeout, func() (bool, error) {
|
||||
req, err := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if err != nil {
|
||||
framework.Logf("Failed to get services proxy request: %v", err)
|
||||
e2elog.Logf("Failed to get services proxy request: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -95,7 +96,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
|
||||
Name("kibana-logging").
|
||||
DoRaw()
|
||||
if err != nil {
|
||||
framework.Logf("Proxy call to kibana-logging failed: %v", err)
|
||||
e2elog.Logf("Proxy call to kibana-logging failed: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||
)
|
||||
|
||||
@ -54,7 +55,7 @@ func newEsLogProvider(f *framework.Framework) (*esLogProvider, error) {
|
||||
func (p *esLogProvider) Init() error {
|
||||
f := p.Framework
|
||||
// Check for the existence of the Elasticsearch service.
|
||||
framework.Logf("Checking the Elasticsearch service exists.")
|
||||
e2elog.Logf("Checking the Elasticsearch service exists.")
|
||||
s := f.ClientSet.CoreV1().Services(api.NamespaceSystem)
|
||||
// Make a few attempts to connect. This makes the test robust against
|
||||
// being run as the first e2e test just after the e2e cluster has been created.
|
||||
@ -63,14 +64,14 @@ func (p *esLogProvider) Init() error {
|
||||
if _, err = s.Get("elasticsearch-logging", meta_v1.GetOptions{}); err == nil {
|
||||
break
|
||||
}
|
||||
framework.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start))
|
||||
e2elog.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start))
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for the Elasticsearch pods to enter the running state.
|
||||
framework.Logf("Checking to make sure the Elasticsearch pods are running")
|
||||
e2elog.Logf("Checking to make sure the Elasticsearch pods are running")
|
||||
labelSelector := fields.SelectorFromSet(fields.Set(map[string]string{"k8s-app": "elasticsearch-logging"})).String()
|
||||
options := meta_v1.ListOptions{LabelSelector: labelSelector}
|
||||
pods, err := f.ClientSet.CoreV1().Pods(api.NamespaceSystem).List(options)
|
||||
@ -84,7 +85,7 @@ func (p *esLogProvider) Init() error {
|
||||
}
|
||||
}
|
||||
|
||||
framework.Logf("Checking to make sure we are talking to an Elasticsearch service.")
|
||||
e2elog.Logf("Checking to make sure we are talking to an Elasticsearch service.")
|
||||
// Perform a few checks to make sure this looks like an Elasticsearch cluster.
|
||||
var statusCode int
|
||||
err = nil
|
||||
@ -92,7 +93,7 @@ func (p *esLogProvider) Init() error {
|
||||
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||
e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||
continue
|
||||
}
|
||||
// Query against the root URL for Elasticsearch.
|
||||
@ -103,11 +104,11 @@ func (p *esLogProvider) Init() error {
|
||||
response.StatusCode(&statusCode)
|
||||
|
||||
if err != nil {
|
||||
framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
|
||||
e2elog.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
|
||||
continue
|
||||
}
|
||||
if int(statusCode) != 200 {
|
||||
framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode)
|
||||
e2elog.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode)
|
||||
continue
|
||||
}
|
||||
break
|
||||
@ -121,12 +122,12 @@ func (p *esLogProvider) Init() error {
|
||||
|
||||
// Now assume we really are talking to an Elasticsearch instance.
|
||||
// Check the cluster health.
|
||||
framework.Logf("Checking health of Elasticsearch service.")
|
||||
e2elog.Logf("Checking health of Elasticsearch service.")
|
||||
healthy := false
|
||||
for start := time.Now(); time.Since(start) < esRetryTimeout; time.Sleep(esRetryDelay) {
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||
e2elog.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
|
||||
continue
|
||||
}
|
||||
body, err = proxyRequest.Namespace(api.NamespaceSystem).
|
||||
@ -140,17 +141,17 @@ func (p *esLogProvider) Init() error {
|
||||
health := make(map[string]interface{})
|
||||
err := json.Unmarshal(body, &health)
|
||||
if err != nil {
|
||||
framework.Logf("Bad json response from elasticsearch: %v", err)
|
||||
e2elog.Logf("Bad json response from elasticsearch: %v", err)
|
||||
continue
|
||||
}
|
||||
statusIntf, ok := health["status"]
|
||||
if !ok {
|
||||
framework.Logf("No status field found in cluster health response: %v", health)
|
||||
e2elog.Logf("No status field found in cluster health response: %v", health)
|
||||
continue
|
||||
}
|
||||
status := statusIntf.(string)
|
||||
if status != "green" && status != "yellow" {
|
||||
framework.Logf("Cluster health has bad status: %v", health)
|
||||
e2elog.Logf("Cluster health has bad status: %v", health)
|
||||
continue
|
||||
}
|
||||
if err == nil && ok {
|
||||
@ -174,12 +175,12 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
||||
|
||||
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.CoreV1().RESTClient().Get())
|
||||
if errProxy != nil {
|
||||
framework.Logf("Failed to get services proxy request: %v", errProxy)
|
||||
e2elog.Logf("Failed to get services proxy request: %v", errProxy)
|
||||
return nil
|
||||
}
|
||||
|
||||
query := fmt.Sprintf("kubernetes.pod_name:%s AND kubernetes.namespace_name:%s", name, f.Namespace.Name)
|
||||
framework.Logf("Sending a search request to Elasticsearch with the following query: %s", query)
|
||||
e2elog.Logf("Sending a search request to Elasticsearch with the following query: %s", query)
|
||||
|
||||
// Ask Elasticsearch to return all the log lines that were tagged with the
|
||||
// pod name. Ask for ten times as many log lines because duplication is possible.
|
||||
@ -191,26 +192,26 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
||||
Param("size", strconv.Itoa(searchPageSize)).
|
||||
DoRaw()
|
||||
if err != nil {
|
||||
framework.Logf("Failed to make proxy call to elasticsearch-logging: %v", err)
|
||||
e2elog.Logf("Failed to make proxy call to elasticsearch-logging: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var response map[string]interface{}
|
||||
err = json.Unmarshal(body, &response)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to unmarshal response: %v", err)
|
||||
e2elog.Logf("Failed to unmarshal response: %v", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
hits, ok := response["hits"].(map[string]interface{})
|
||||
if !ok {
|
||||
framework.Logf("response[hits] not of the expected type: %T", response["hits"])
|
||||
e2elog.Logf("response[hits] not of the expected type: %T", response["hits"])
|
||||
return nil
|
||||
}
|
||||
|
||||
h, ok := hits["hits"].([]interface{})
|
||||
if !ok {
|
||||
framework.Logf("Hits not of the expected type: %T", hits["hits"])
|
||||
e2elog.Logf("Hits not of the expected type: %T", hits["hits"])
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -219,13 +220,13 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
||||
for _, e := range h {
|
||||
l, ok := e.(map[string]interface{})
|
||||
if !ok {
|
||||
framework.Logf("Element of hit not of expected type: %T", e)
|
||||
e2elog.Logf("Element of hit not of expected type: %T", e)
|
||||
continue
|
||||
}
|
||||
|
||||
source, ok := l["_source"].(map[string]interface{})
|
||||
if !ok {
|
||||
framework.Logf("_source not of the expected type: %T", l["_source"])
|
||||
e2elog.Logf("_source not of the expected type: %T", l["_source"])
|
||||
continue
|
||||
}
|
||||
|
||||
@ -241,7 +242,7 @@ func (p *esLogProvider) ReadEntries(name string) []utils.LogEntry {
|
||||
continue
|
||||
}
|
||||
|
||||
framework.Logf("Log is of unknown type, got %v, want string or object in field 'log'", source)
|
||||
e2elog.Logf("Log is of unknown type, got %v, want string or object in field 'log'", source)
|
||||
}
|
||||
|
||||
return entries
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/config"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
@ -61,14 +62,14 @@ var _ = instrumentation.SIGDescribe("Logging soak [Performance] [Slow] [Disrupti
|
||||
defer wg.Done()
|
||||
defer ginkgo.GinkgoRecover()
|
||||
wave := fmt.Sprintf("wave%v", strconv.Itoa(i))
|
||||
framework.Logf("Starting logging soak, wave = %v", wave)
|
||||
e2elog.Logf("Starting logging soak, wave = %v", wave)
|
||||
RunLogPodsWithSleepOf(f, kbRateInSeconds, wave, totalLogTime)
|
||||
framework.Logf("Completed logging soak, wave %v", i)
|
||||
e2elog.Logf("Completed logging soak, wave %v", i)
|
||||
}()
|
||||
// Niceness.
|
||||
time.Sleep(loggingSoak.TimeBetweenWaves)
|
||||
}
|
||||
framework.Logf("Waiting on all %v logging soak waves to complete", loggingSoak.Scale)
|
||||
e2elog.Logf("Waiting on all %v logging soak waves to complete", loggingSoak.Scale)
|
||||
wg.Wait()
|
||||
})
|
||||
})
|
||||
|
@ -18,6 +18,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/instrumentation/logging/utils:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||
|
||||
@ -155,7 +156,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
|
||||
podName := fmt.Sprintf("synthlogger-%s", string(uuid.NewUUID()))
|
||||
err := utils.NewLoadLoggingPod(podName, "", 1, 1*time.Second).Start(f)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to create a logging pod: %v", err)
|
||||
e2elog.Logf("Failed to create a logging pod: %v", err)
|
||||
}
|
||||
return false, nil
|
||||
}, stopCh)
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||
|
||||
@ -85,7 +86,7 @@ var _ = instrumentation.SIGDescribe("Cluster level logging implemented by Stackd
|
||||
// Starting one pod on each node.
|
||||
for _, pod := range podsByRun[runIdx] {
|
||||
if err := pod.Start(f); err != nil {
|
||||
framework.Logf("Failed to start pod: %v", err)
|
||||
e2elog.Logf("Failed to start pod: %v", err)
|
||||
}
|
||||
}
|
||||
<-t.C
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/instrumentation/logging/utils"
|
||||
|
||||
"golang.org/x/oauth2/google"
|
||||
@ -117,7 +118,7 @@ func ensureProjectHasSinkCapacity(sinksService *sd.ProjectsSinksService, project
|
||||
return err
|
||||
}
|
||||
if len(listResponse.Sinks) >= stackdriverSinkCountLimit {
|
||||
framework.Logf("Reached Stackdriver sink limit. Deleting all sinks")
|
||||
e2elog.Logf("Reached Stackdriver sink limit. Deleting all sinks")
|
||||
deleteSinks(sinksService, projectID, listResponse.Sinks)
|
||||
}
|
||||
return nil
|
||||
@ -136,7 +137,7 @@ func deleteSinks(sinksService *sd.ProjectsSinksService, projectID string, sinks
|
||||
for _, sink := range sinks {
|
||||
sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, sink.Name)
|
||||
if _, err := sinksService.Delete(sinkNameID).Do(); err != nil {
|
||||
framework.Logf("Failed to delete LogSink: %v", err)
|
||||
e2elog.Logf("Failed to delete LogSink: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -185,21 +186,21 @@ func (p *sdLogProvider) Cleanup() {
|
||||
sinkNameID := fmt.Sprintf("projects/%s/sinks/%s", projectID, p.logSink.Name)
|
||||
sinksService := p.sdService.Projects.Sinks
|
||||
if _, err := sinksService.Delete(sinkNameID).Do(); err != nil {
|
||||
framework.Logf("Failed to delete LogSink: %v", err)
|
||||
e2elog.Logf("Failed to delete LogSink: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if p.subscription != nil {
|
||||
subsService := p.pubsubService.Projects.Subscriptions
|
||||
if _, err := subsService.Delete(p.subscription.Name).Do(); err != nil {
|
||||
framework.Logf("Failed to delete PubSub subscription: %v", err)
|
||||
e2elog.Logf("Failed to delete PubSub subscription: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if p.topic != nil {
|
||||
topicsService := p.pubsubService.Projects.Topics
|
||||
if _, err := topicsService.Delete(p.topic.Name).Do(); err != nil {
|
||||
framework.Logf("Failed to delete PubSub topic: %v", err)
|
||||
e2elog.Logf("Failed to delete PubSub topic: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -234,7 +235,7 @@ func (p *sdLogProvider) createSink(projectID, sinkName, topicName string) (*sd.L
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
framework.Logf("Using the following filter for log entries: %s", filter)
|
||||
e2elog.Logf("Using the following filter for log entries: %s", filter)
|
||||
sink := &sd.LogSink{
|
||||
Name: sinkName,
|
||||
Destination: fmt.Sprintf("pubsub.googleapis.com/%s", topicName),
|
||||
@ -280,20 +281,20 @@ func (p *sdLogProvider) authorizeSink() error {
|
||||
}
|
||||
|
||||
func (p *sdLogProvider) waitSinkInit() error {
|
||||
framework.Logf("Waiting for log sink to become operational")
|
||||
e2elog.Logf("Waiting for log sink to become operational")
|
||||
return wait.Poll(1*time.Second, sinkStartupTimeout, func() (bool, error) {
|
||||
err := publish(p.pubsubService, p.topic, "embrace eternity")
|
||||
if err != nil {
|
||||
framework.Logf("Failed to push message to PubSub due to %v", err)
|
||||
e2elog.Logf("Failed to push message to PubSub due to %v", err)
|
||||
}
|
||||
|
||||
messages, err := pullAndAck(p.pubsubService, p.subscription)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to pull messages from PubSub due to %v", err)
|
||||
e2elog.Logf("Failed to pull messages from PubSub due to %v", err)
|
||||
return false, nil
|
||||
}
|
||||
if len(messages) > 0 {
|
||||
framework.Logf("Sink %s is operational", p.logSink.Name)
|
||||
e2elog.Logf("Sink %s is operational", p.logSink.Name)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -318,32 +319,32 @@ func (p *sdLogProvider) startPollingLogs() {
|
||||
func (p *sdLogProvider) pollLogsOnce() {
|
||||
messages, err := pullAndAck(p.pubsubService, p.subscription)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to pull messages from PubSub due to %v", err)
|
||||
e2elog.Logf("Failed to pull messages from PubSub due to %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, msg := range messages {
|
||||
logEntryEncoded, err := base64.StdEncoding.DecodeString(msg.Message.Data)
|
||||
if err != nil {
|
||||
framework.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data)
|
||||
e2elog.Logf("Got a message from pubsub that is not base64-encoded: %s", msg.Message.Data)
|
||||
continue
|
||||
}
|
||||
|
||||
var sdLogEntry sd.LogEntry
|
||||
if err := json.Unmarshal(logEntryEncoded, &sdLogEntry); err != nil {
|
||||
framework.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err)
|
||||
e2elog.Logf("Failed to decode a pubsub message '%s': %v", logEntryEncoded, err)
|
||||
continue
|
||||
}
|
||||
|
||||
name, ok := p.tryGetName(sdLogEntry)
|
||||
if !ok {
|
||||
framework.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type)
|
||||
e2elog.Logf("Received LogEntry with unexpected resource type: %s", sdLogEntry.Resource.Type)
|
||||
continue
|
||||
}
|
||||
|
||||
logEntry, err := convertLogEntry(sdLogEntry)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to parse Stackdriver LogEntry: %v", err)
|
||||
e2elog.Logf("Failed to parse Stackdriver LogEntry: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
@ -407,7 +408,7 @@ func pullAndAck(service *pubsub.Service, subs *pubsub.Subscription) ([]*pubsub.R
|
||||
if len(ids) > 0 {
|
||||
ackReq := &pubsub.AcknowledgeRequest{AckIds: ids}
|
||||
if _, err = subsService.Acknowledge(subs.Name, ackReq).Do(); err != nil {
|
||||
framework.Logf("Failed to ack poll: %v", err)
|
||||
e2elog.Logf("Failed to ack poll: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,7 @@ go_library(
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/k8s.io/utils/integer:go_default_library",
|
||||
],
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/utils/integer"
|
||||
)
|
||||
|
||||
@ -67,13 +68,13 @@ func EnsureLoggingAgentRestartsCount(f *framework.Framework, appName string, max
|
||||
for _, pod := range agentPods.Items {
|
||||
contStatuses := pod.Status.ContainerStatuses
|
||||
if len(contStatuses) == 0 {
|
||||
framework.Logf("There are no container statuses for pod %s", pod.Name)
|
||||
e2elog.Logf("There are no container statuses for pod %s", pod.Name)
|
||||
continue
|
||||
}
|
||||
restartCount := int(contStatuses[0].RestartCount)
|
||||
maxRestartCount = integer.IntMax(maxRestartCount, restartCount)
|
||||
|
||||
framework.Logf("Logging agent %s on node %s was restarted %d times",
|
||||
e2elog.Logf("Logging agent %s on node %s was restarted %d times",
|
||||
pod.Name, pod.Spec.NodeName, restartCount)
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
@ -93,7 +94,7 @@ func (p *loadLoggingPod) Name() string {
|
||||
}
|
||||
|
||||
func (p *loadLoggingPod) Start(f *framework.Framework) error {
|
||||
framework.Logf("Starting load logging pod %s", p.name)
|
||||
e2elog.Logf("Starting load logging pod %s", p.name)
|
||||
f.PodClient().Create(&api_v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: p.name,
|
||||
@ -168,7 +169,7 @@ func (p *execLoggingPod) Name() string {
|
||||
}
|
||||
|
||||
func (p *execLoggingPod) Start(f *framework.Framework) error {
|
||||
framework.Logf("Starting repeating logging pod %s", p.name)
|
||||
e2elog.Logf("Starting repeating logging pod %s", p.name)
|
||||
f.PodClient().Create(&api_v1.Pod{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Name: p.name,
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
// LogChecker is an interface for an entity that can check whether logging
|
||||
@ -195,14 +195,14 @@ func getFullIngestionTimeout(podsMap map[string]FiniteLoggingPod, slack float64)
|
||||
totalWant += want
|
||||
}
|
||||
if len(lossMsgs) > 0 {
|
||||
framework.Logf("Still missing logs from:\n%s", strings.Join(lossMsgs, "\n"))
|
||||
e2elog.Logf("Still missing logs from:\n%s", strings.Join(lossMsgs, "\n"))
|
||||
}
|
||||
lostFrac := 1 - float64(totalGot)/float64(totalWant)
|
||||
if lostFrac > slack {
|
||||
return fmt.Errorf("still missing %.2f%% of logs, only %.2f%% is tolerable",
|
||||
lostFrac*100, slack*100)
|
||||
}
|
||||
framework.Logf("Missing %.2f%% of logs, which is lower than the threshold %.2f%%",
|
||||
e2elog.Logf("Missing %.2f%% of logs, which is lower than the threshold %.2f%%",
|
||||
lostFrac*100, slack*100)
|
||||
return nil
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ go_library(
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/config:go_default_library",
|
||||
"//test/e2e/framework/gpu:go_default_library",
|
||||
"//test/e2e/framework/log:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/instrumentation/common:go_default_library",
|
||||
"//test/e2e/scheduling:go_default_library",
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/gpu"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||
"k8s.io/kubernetes/test/utils/image"
|
||||
@ -101,7 +102,7 @@ func testStackdriverAcceleratorMonitoring(f *framework.Framework) {
|
||||
pollingFunction := checkForAcceleratorMetrics(projectID, gcmService, time.Now(), metricsMap)
|
||||
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
|
||||
if err != nil {
|
||||
framework.Logf("Missing metrics: %+v", metricsMap)
|
||||
e2elog.Logf("Missing metrics: %+v", metricsMap)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
@ -119,9 +120,9 @@ func checkForAcceleratorMetrics(projectID string, gcmService *gcm.Service, start
|
||||
if len(ts) > 0 {
|
||||
counter = counter + 1
|
||||
metricsMap[metric] = true
|
||||
framework.Logf("Received %v timeseries for metric %v", len(ts), metric)
|
||||
e2elog.Logf("Received %v timeseries for metric %v", len(ts), metric)
|
||||
} else {
|
||||
framework.Logf("No timeseries for metric %v", metric)
|
||||
e2elog.Logf("No timeseries for metric %v", metric)
|
||||
}
|
||||
}
|
||||
if counter < 3 {
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/framework/config"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
@ -71,7 +72,7 @@ func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration)
|
||||
if maxRetries--; maxRetries <= 0 {
|
||||
break
|
||||
}
|
||||
framework.Logf("failed to retrieve kubelet stats -\n %v", errors)
|
||||
e2elog.Logf("failed to retrieve kubelet stats -\n %v", errors)
|
||||
time.Sleep(cadvisor.SleepDuration)
|
||||
}
|
||||
framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -264,20 +265,20 @@ func CreateAdapter(adapterDeploymentFile string) error {
|
||||
return err
|
||||
}
|
||||
stat, err := framework.RunKubectl("create", "-f", adapterURL)
|
||||
framework.Logf(stat)
|
||||
e2elog.Logf(stat)
|
||||
return err
|
||||
}
|
||||
|
||||
func createClusterAdminBinding() error {
|
||||
stdout, stderr, err := framework.RunCmd("gcloud", "config", "get-value", "core/account")
|
||||
if err != nil {
|
||||
framework.Logf(stderr)
|
||||
e2elog.Logf(stderr)
|
||||
return err
|
||||
}
|
||||
serviceAccount := strings.TrimSpace(stdout)
|
||||
framework.Logf("current service account: %q", serviceAccount)
|
||||
e2elog.Logf("current service account: %q", serviceAccount)
|
||||
stat, err := framework.RunKubectl("create", "clusterrolebinding", ClusterAdminBinding, "--clusterrole=cluster-admin", "--user="+serviceAccount)
|
||||
framework.Logf(stat)
|
||||
e2elog.Logf(stat)
|
||||
return err
|
||||
}
|
||||
|
||||
@ -306,32 +307,32 @@ func CreateDescriptors(service *gcm.Service, projectID string) error {
|
||||
func CleanupDescriptors(service *gcm.Service, projectID string) {
|
||||
_, err := service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectID, CustomMetricName)).Do()
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
|
||||
e2elog.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
|
||||
}
|
||||
_, err = service.Projects.MetricDescriptors.Delete(fmt.Sprintf("projects/%s/metricDescriptors/custom.googleapis.com/%s", projectID, UnusedMetricName)).Do()
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
|
||||
e2elog.Logf("Failed to delete descriptor for metric '%s': %v", CustomMetricName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// CleanupAdapter deletes Custom Metrics - Stackdriver adapter deployments.
|
||||
func CleanupAdapter(adapterDeploymentFile string) {
|
||||
stat, err := framework.RunKubectl("delete", "-f", adapterDeploymentFile)
|
||||
framework.Logf(stat)
|
||||
e2elog.Logf(stat)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete adapter deployments: %s", err)
|
||||
e2elog.Logf("Failed to delete adapter deployments: %s", err)
|
||||
}
|
||||
err = exec.Command("rm", adapterDeploymentFile).Run()
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete adapter deployment file: %s", err)
|
||||
e2elog.Logf("Failed to delete adapter deployment file: %s", err)
|
||||
}
|
||||
cleanupClusterAdminBinding()
|
||||
}
|
||||
|
||||
func cleanupClusterAdminBinding() {
|
||||
stat, err := framework.RunKubectl("delete", "clusterrolebinding", ClusterAdminBinding)
|
||||
framework.Logf(stat)
|
||||
e2elog.Logf(stat)
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete cluster admin binding: %s", err)
|
||||
e2elog.Logf("Failed to delete cluster admin binding: %s", err)
|
||||
}
|
||||
}
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/restmapper"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
customclient "k8s.io/metrics/pkg/client/custom_metrics"
|
||||
externalclient "k8s.io/metrics/pkg/client/external_metrics"
|
||||
)
|
||||
@ -257,11 +258,11 @@ func verifyResponseFromExternalMetricsAPI(f *framework.Framework, externalMetric
|
||||
func cleanupSDExporterPod(f *framework.Framework, cs clientset.Interface) {
|
||||
err := cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod1, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err)
|
||||
e2elog.Logf("Failed to delete %s pod: %v", stackdriverExporterPod1, err)
|
||||
}
|
||||
err = cs.CoreV1().Pods(f.Namespace.Name).Delete(stackdriverExporterPod2, &metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err)
|
||||
e2elog.Logf("Failed to delete %s pod: %v", stackdriverExporterPod2, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
"k8s.io/kubernetes/test/e2e/framework/metrics"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
|
||||
@ -71,7 +72,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
}
|
||||
}
|
||||
if !masterRegistered {
|
||||
framework.Logf("Master is node api.Registry. Skipping testing Scheduler metrics.")
|
||||
e2elog.Logf("Master is node api.Registry. Skipping testing Scheduler metrics.")
|
||||
return
|
||||
}
|
||||
response, err := grabber.GrabFromScheduler()
|
||||
@ -92,7 +93,7 @@ var _ = instrumentation.SIGDescribe("MetricsGrabber", func() {
|
||||
}
|
||||
}
|
||||
if !masterRegistered {
|
||||
framework.Logf("Master is node api.Registry. Skipping testing ControllerManager metrics.")
|
||||
e2elog.Logf("Master is node api.Registry. Skipping testing ControllerManager metrics.")
|
||||
return
|
||||
}
|
||||
response, err := grabber.GrabFromControllerManager()
|
||||
|
@ -30,6 +30,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
)
|
||||
|
||||
@ -171,7 +172,7 @@ func validateQueryReturnsCorrectValues(c clientset.Interface, query string, expe
|
||||
if len(samples) < minSamplesCount {
|
||||
return fmt.Errorf("Not enough samples for query '%v', got %v", query, samples)
|
||||
}
|
||||
framework.Logf("Executed query '%v' returned %v", query, samples)
|
||||
e2elog.Logf("Executed query '%v' returned %v", query, samples)
|
||||
for _, value := range samples {
|
||||
error := math.Abs(value-expectedValue) / expectedValue
|
||||
if error >= errorTolerance {
|
||||
@ -238,7 +239,7 @@ func fetchPrometheusTargetDiscovery(c clientset.Interface) (TargetDiscovery, err
|
||||
Raw()
|
||||
var qres promTargetsResponse
|
||||
if err != nil {
|
||||
framework.Logf(string(response))
|
||||
e2elog.Logf(string(response))
|
||||
return qres.Data, err
|
||||
}
|
||||
err = json.Unmarshal(response, &qres)
|
||||
@ -303,7 +304,7 @@ func queryPrometheus(c clientset.Interface, query string, start, end time.Time,
|
||||
Do().
|
||||
Raw()
|
||||
if err != nil {
|
||||
framework.Logf(string(response))
|
||||
e2elog.Logf(string(response))
|
||||
return nil, err
|
||||
}
|
||||
var qres promQueryResponse
|
||||
@ -369,7 +370,7 @@ func retryUntilSucceeds(validator func() error, timeout time.Duration) {
|
||||
if time.Since(startTime) >= timeout {
|
||||
break
|
||||
}
|
||||
framework.Logf(err.Error())
|
||||
e2elog.Logf(err.Error())
|
||||
time.Sleep(prometheusSleepBetweenAttempts)
|
||||
}
|
||||
framework.Failf(err.Error())
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||
instrumentation "k8s.io/kubernetes/test/e2e/instrumentation/common"
|
||||
|
||||
gcm "google.golang.org/api/monitoring/v3"
|
||||
@ -83,7 +84,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
|
||||
// and uncomment following lines (comment out the two lines above): (DON'T set the env var below)
|
||||
/*
|
||||
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
||||
framework.Logf("Couldn't get application default credentials, %v", err)
|
||||
e2elog.Logf("Couldn't get application default credentials, %v", err)
|
||||
if err != nil {
|
||||
framework.Failf("Error accessing application default credentials, %v", err)
|
||||
}
|
||||
@ -110,7 +111,7 @@ func testStackdriverMonitoring(f *framework.Framework, pods, allPodsCPU int, per
|
||||
pollingFunction := checkForMetrics(projectID, gcmService, time.Now(), metricsMap, allPodsCPU, perPodCPU)
|
||||
err = wait.Poll(pollFrequency, pollTimeout, pollingFunction)
|
||||
if err != nil {
|
||||
framework.Logf("Missing metrics: %+v\n", metricsMap)
|
||||
e2elog.Logf("Missing metrics: %+v\n", metricsMap)
|
||||
}
|
||||
framework.ExpectNoError(err)
|
||||
}
|
||||
@ -129,9 +130,9 @@ func checkForMetrics(projectID string, gcmService *gcm.Service, start time.Time,
|
||||
if len(ts) > 0 {
|
||||
counter = counter + 1
|
||||
metricsMap[metric] = true
|
||||
framework.Logf("Received %v timeseries for metric %v\n", len(ts), metric)
|
||||
e2elog.Logf("Received %v timeseries for metric %v\n", len(ts), metric)
|
||||
} else {
|
||||
framework.Logf("No timeseries for metric %v\n", metric)
|
||||
e2elog.Logf("No timeseries for metric %v\n", metric)
|
||||
}
|
||||
|
||||
var sum float64
|
||||
@ -148,10 +149,10 @@ func checkForMetrics(projectID string, gcmService *gcm.Service, start time.Time,
|
||||
}
|
||||
}
|
||||
sum = sum + *max.Value.DoubleValue
|
||||
framework.Logf("Received %v points for metric %v\n",
|
||||
e2elog.Logf("Received %v points for metric %v\n",
|
||||
len(t.Points), metric)
|
||||
}
|
||||
framework.Logf("Most recent cpu/utilization sum*cpu/limit: %v\n", sum*float64(cpuLimit))
|
||||
e2elog.Logf("Most recent cpu/utilization sum*cpu/limit: %v\n", sum*float64(cpuLimit))
|
||||
if math.Abs(sum*float64(cpuLimit)-float64(cpuUsed)) > tolerance*float64(cpuUsed) {
|
||||
return false, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user