mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #77413 from johnSchnake/frameworkLogRefactoringnodeWindowsAutoscale
Move node, windows, and autoscaling tests to framework/log
This commit is contained in:
commit
a9fc9754bb
@ -40,6 +40,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||||
"//test/e2e/common:go_default_library",
|
"//test/e2e/common:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/instrumentation/monitoring:go_default_library",
|
"//test/e2e/instrumentation/monitoring:go_default_library",
|
||||||
"//test/e2e/scheduling:go_default_library",
|
"//test/e2e/scheduling:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
|
@ -43,6 +43,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/scheduling"
|
"k8s.io/kubernetes/test/e2e/scheduling"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -915,10 +916,10 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
|
|||||||
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
|
||||||
time.Sleep(scaleUpTimeout)
|
time.Sleep(scaleUpTimeout)
|
||||||
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
currentNodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
framework.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
|
e2elog.Logf("Currently available nodes: %v, nodes available at the start of test: %v, disabled nodes: %v", len(currentNodes.Items), len(nodes.Items), nodesToBreakCount)
|
||||||
Expect(len(currentNodes.Items)).Should(Equal(len(nodes.Items) - nodesToBreakCount))
|
Expect(len(currentNodes.Items)).Should(Equal(len(nodes.Items) - nodesToBreakCount))
|
||||||
status, err := getClusterwideStatus(c)
|
status, err := getClusterwideStatus(c)
|
||||||
framework.Logf("Clusterwide status: %v", status)
|
e2elog.Logf("Clusterwide status: %v", status)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
Expect(status).Should(Equal("Unhealthy"))
|
Expect(status).Should(Equal("Unhealthy"))
|
||||||
}
|
}
|
||||||
|
@ -30,6 +30,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
"k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -237,7 +238,7 @@ func (tc *CustomMetricTestCase) Run() {
|
|||||||
// and uncomment following lines:
|
// and uncomment following lines:
|
||||||
/*
|
/*
|
||||||
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
ts, err := google.DefaultTokenSource(oauth2.NoContext)
|
||||||
framework.Logf("Couldn't get application default credentials, %v", err)
|
e2elog.Logf("Couldn't get application default credentials, %v", err)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("Error accessing application default credentials, %v", err)
|
framework.Failf("Error accessing application default credentials, %v", err)
|
||||||
}
|
}
|
||||||
@ -442,7 +443,7 @@ func waitForReplicas(deploymentName, namespace string, cs clientset.Interface, t
|
|||||||
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
|
framework.Failf("Failed to get replication controller %s: %v", deployment, err)
|
||||||
}
|
}
|
||||||
replicas := int(deployment.Status.ReadyReplicas)
|
replicas := int(deployment.Status.ReadyReplicas)
|
||||||
framework.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
|
e2elog.Logf("waiting for %d replicas (current: %d)", desiredReplicas, replicas)
|
||||||
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
|
return replicas == desiredReplicas, nil // Expected number of replicas found. Exit.
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -22,13 +22,14 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -240,7 +241,7 @@ func getScheduableCores(nodes []v1.Node) int64 {
|
|||||||
|
|
||||||
scInt64, scOk := sc.AsInt64()
|
scInt64, scOk := sc.AsInt64()
|
||||||
if !scOk {
|
if !scOk {
|
||||||
framework.Logf("Unable to compute integer values of schedulable cores in the cluster")
|
e2elog.Logf("Unable to compute integer values of schedulable cores in the cluster")
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return scInt64
|
return scInt64
|
||||||
@ -258,7 +259,7 @@ func deleteDNSScalingConfigMap(c clientset.Interface) error {
|
|||||||
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
|
if err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
framework.Logf("DNS autoscaling ConfigMap deleted.")
|
e2elog.Logf("DNS autoscaling ConfigMap deleted.")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -285,7 +286,7 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) e
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
framework.Logf("DNS autoscaling ConfigMap updated.")
|
e2elog.Logf("DNS autoscaling ConfigMap updated.")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -319,14 +320,14 @@ func deleteDNSAutoscalerPod(c clientset.Interface) error {
|
|||||||
if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil {
|
if err := c.CoreV1().Pods(metav1.NamespaceSystem).Delete(podName, nil); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
framework.Logf("DNS autoscaling pod %v deleted.", podName)
|
e2elog.Logf("DNS autoscaling pod %v deleted.", podName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) {
|
func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) {
|
||||||
var current int
|
var current int
|
||||||
var expected int
|
var expected int
|
||||||
framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
|
e2elog.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
current, err = getDNSReplicas(c)
|
current, err = getDNSReplicas(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -334,7 +335,7 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectRep
|
|||||||
}
|
}
|
||||||
expected = getExpected(c)
|
expected = getExpected(c)
|
||||||
if current != expected {
|
if current != expected {
|
||||||
framework.Logf("Replicas not as expected: got %v, expected %v", current, expected)
|
e2elog.Logf("Replicas not as expected: got %v, expected %v", current, expected)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -343,12 +344,12 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectRep
|
|||||||
if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
|
if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
|
||||||
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err)
|
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err)
|
||||||
}
|
}
|
||||||
framework.Logf("kube-dns reaches expected replicas: %v", expected)
|
e2elog.Logf("kube-dns reaches expected replicas: %v", expected)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
|
func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
|
||||||
framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
|
e2elog.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
|
||||||
condition := func() (bool, error) {
|
condition := func() (bool, error) {
|
||||||
configMap, err = fetchDNSScalingConfigMap(c)
|
configMap, err = fetchDNSScalingConfigMap(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -37,6 +37,7 @@ go_library(
|
|||||||
"//test/e2e/common:go_default_library",
|
"//test/e2e/common:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/job:go_default_library",
|
"//test/e2e/framework/job:go_default_library",
|
||||||
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/e2e/framework/volume:go_default_library",
|
"//test/e2e/framework/volume:go_default_library",
|
||||||
"//test/utils:go_default_library",
|
"//test/utils:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
|
@ -19,6 +19,7 @@ package node
|
|||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -35,7 +36,7 @@ var _ = SIGDescribe("AppArmor", func() {
|
|||||||
if !CurrentGinkgoTestDescription().Failed {
|
if !CurrentGinkgoTestDescription().Failed {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
|
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, e2elog.Logf)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should enforce an AppArmor profile", func() {
|
It("should enforce an AppArmor profile", func() {
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -63,10 +64,10 @@ var _ = SIGDescribe("crictl", func() {
|
|||||||
// Log the stdout/stderr output.
|
// Log the stdout/stderr output.
|
||||||
// TODO: Verify the output.
|
// TODO: Verify the output.
|
||||||
if len(stdout) > 0 {
|
if len(stdout) > 0 {
|
||||||
framework.Logf("Got stdout from %q:\n %s\n", host, strings.TrimSpace(stdout))
|
e2elog.Logf("Got stdout from %q:\n %s\n", host, strings.TrimSpace(stdout))
|
||||||
}
|
}
|
||||||
if len(stderr) > 0 {
|
if len(stderr) > 0 {
|
||||||
framework.Logf("Got stderr from %q:\n %s\n", host, strings.TrimSpace(stderr))
|
e2elog.Logf("Got stderr from %q:\n %s\n", host, strings.TrimSpace(stderr))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -20,13 +20,14 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -88,7 +89,7 @@ var _ = SIGDescribe("Events", func() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Failf("Failed to get pod: %v", err)
|
framework.Failf("Failed to get pod: %v", err)
|
||||||
}
|
}
|
||||||
framework.Logf("%+v\n", podWithUid)
|
e2elog.Logf("%+v\n", podWithUid)
|
||||||
var events *v1.EventList
|
var events *v1.EventList
|
||||||
// Check for scheduler event about the pod.
|
// Check for scheduler event about the pod.
|
||||||
By("checking for scheduler event about the pod")
|
By("checking for scheduler event about the pod")
|
||||||
@ -105,7 +106,7 @@ var _ = SIGDescribe("Events", func() {
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if len(events.Items) > 0 {
|
if len(events.Items) > 0 {
|
||||||
framework.Logf("Saw scheduler event for our pod.")
|
e2elog.Logf("Saw scheduler event for our pod.")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -125,7 +126,7 @@ var _ = SIGDescribe("Events", func() {
|
|||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if len(events.Items) > 0 {
|
if len(events.Items) > 0 {
|
||||||
framework.Logf("Saw kubelet event for our pod.")
|
e2elog.Logf("Saw kubelet event for our pod.")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -22,13 +22,14 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
"k8s.io/kubernetes/test/e2e/framework/volume"
|
"k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
@ -50,10 +51,10 @@ const (
|
|||||||
// podNamePrefix and namespace.
|
// podNamePrefix and namespace.
|
||||||
func getPodMatches(c clientset.Interface, nodeName string, podNamePrefix string, namespace string) sets.String {
|
func getPodMatches(c clientset.Interface, nodeName string, podNamePrefix string, namespace string) sets.String {
|
||||||
matches := sets.NewString()
|
matches := sets.NewString()
|
||||||
framework.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
|
e2elog.Logf("Checking pods on node %v via /runningpods endpoint", nodeName)
|
||||||
runningPods, err := framework.GetKubeletPods(c, nodeName)
|
runningPods, err := framework.GetKubeletPods(c, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Error checking running pods on %v: %v", nodeName, err)
|
e2elog.Logf("Error checking running pods on %v: %v", nodeName, err)
|
||||||
return matches
|
return matches
|
||||||
}
|
}
|
||||||
for _, pod := range runningPods.Items {
|
for _, pod := range runningPods.Items {
|
||||||
@ -90,7 +91,7 @@ func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, p
|
|||||||
if seen.Len() == targetNumPods {
|
if seen.Len() == targetNumPods {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
framework.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
|
e2elog.Logf("Waiting for %d pods to be running on the node; %d are currently running;", targetNumPods, seen.Len())
|
||||||
return false, nil
|
return false, nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -211,7 +212,7 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
framework.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
|
e2elog.Logf("Wait up to %v for host's (%v) %q to be %v", timeout, nodeIP, test.feature, condMsg)
|
||||||
err = wait.Poll(poll, timeout, func() (bool, error) {
|
err = wait.Poll(poll, timeout, func() (bool, error) {
|
||||||
result, err := framework.NodeExec(nodeIP, test.cmd)
|
result, err := framework.NodeExec(nodeIP, test.cmd)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
@ -229,9 +230,9 @@ func checkPodCleanup(c clientset.Interface, pod *v1.Pod, expectClean bool) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if expectClean {
|
if expectClean {
|
||||||
framework.Logf("Pod's host has been cleaned up")
|
e2elog.Logf("Pod's host has been cleaned up")
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("Pod's host has not been cleaned up (per expectation)")
|
e2elog.Logf("Pod's host has not been cleaned up (per expectation)")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -345,7 +346,7 @@ var _ = SIGDescribe("kubelet", func() {
|
|||||||
start := time.Now()
|
start := time.Now()
|
||||||
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0,
|
Expect(waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, 0,
|
||||||
itArg.timeout)).NotTo(HaveOccurred())
|
itArg.timeout)).NotTo(HaveOccurred())
|
||||||
framework.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
|
e2elog.Logf("Deleting %d pods on %d nodes completed in %v after the RC was deleted", totalPods, len(nodeNames),
|
||||||
time.Since(start))
|
time.Since(start))
|
||||||
if resourceMonitor != nil {
|
if resourceMonitor != nil {
|
||||||
resourceMonitor.LogCPUSummary()
|
resourceMonitor.LogCPUSummary()
|
||||||
|
@ -26,6 +26,7 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
@ -54,10 +55,10 @@ func logPodsOnNodes(c clientset.Interface, nodeNames []string) {
|
|||||||
for _, n := range nodeNames {
|
for _, n := range nodeNames {
|
||||||
podList, err := framework.GetKubeletRunningPods(c, n)
|
podList, err := framework.GetKubeletRunningPods(c, n)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Unable to retrieve kubelet pods for node %v", n)
|
e2elog.Logf("Unable to retrieve kubelet pods for node %v", n)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
framework.Logf("%d pods are running on node %v", len(podList.Items), n)
|
e2elog.Logf("%d pods are running on node %v", len(podList.Items), n)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -90,7 +91,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
|
|||||||
deadline := time.Now().Add(monitoringTime)
|
deadline := time.Now().Add(monitoringTime)
|
||||||
for time.Now().Before(deadline) {
|
for time.Now().Before(deadline) {
|
||||||
timeLeft := deadline.Sub(time.Now())
|
timeLeft := deadline.Sub(time.Now())
|
||||||
framework.Logf("Still running...%v left", timeLeft)
|
e2elog.Logf("Still running...%v left", timeLeft)
|
||||||
if timeLeft < reportingPeriod {
|
if timeLeft < reportingPeriod {
|
||||||
time.Sleep(timeLeft)
|
time.Sleep(timeLeft)
|
||||||
} else {
|
} else {
|
||||||
@ -104,13 +105,13 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
|
|||||||
usageSummary, err := rm.GetLatest()
|
usageSummary, err := rm.GetLatest()
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
// TODO(random-liu): Remove the original log when we migrate to new perfdash
|
// TODO(random-liu): Remove the original log when we migrate to new perfdash
|
||||||
framework.Logf("%s", rm.FormatResourceUsage(usageSummary))
|
e2elog.Logf("%s", rm.FormatResourceUsage(usageSummary))
|
||||||
// Log perf result
|
// Log perf result
|
||||||
framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
|
framework.PrintPerfData(framework.ResourceUsageToPerfData(rm.GetMasterNodeLatest(usageSummary)))
|
||||||
verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
|
verifyMemoryLimits(f.ClientSet, expectedMemory, usageSummary)
|
||||||
|
|
||||||
cpuSummary := rm.GetCPUSummary()
|
cpuSummary := rm.GetCPUSummary()
|
||||||
framework.Logf("%s", rm.FormatCPUSummary(cpuSummary))
|
e2elog.Logf("%s", rm.FormatCPUSummary(cpuSummary))
|
||||||
// Log perf result
|
// Log perf result
|
||||||
framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
|
framework.PrintPerfData(framework.CPUUsageToPerfData(rm.GetMasterNodeCPUSummary(cpuSummary)))
|
||||||
verifyCPULimits(expectedCPU, cpuSummary)
|
verifyCPULimits(expectedCPU, cpuSummary)
|
||||||
@ -144,9 +145,9 @@ func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsageP
|
|||||||
errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
|
errList = append(errList, fmt.Sprintf("node %v:\n %s", nodeName, strings.Join(nodeErrs, ", ")))
|
||||||
heapStats, err := framework.GetKubeletHeapStats(c, nodeName)
|
heapStats, err := framework.GetKubeletHeapStats(c, nodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Unable to get heap stats from %q", nodeName)
|
e2elog.Logf("Unable to get heap stats from %q", nodeName)
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
|
e2elog.Logf("Heap stats on %q\n:%v", nodeName, heapStats)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -210,7 +211,7 @@ var _ = SIGDescribe("Kubelet [Serial] [Slow]", func() {
|
|||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
rm.Stop()
|
rm.Stop()
|
||||||
result := om.GetLatestRuntimeOperationErrorRate()
|
result := om.GetLatestRuntimeOperationErrorRate()
|
||||||
framework.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
|
e2elog.Logf("runtime operation error metrics:\n%s", framework.FormatRuntimeOperationErrorRate(result))
|
||||||
})
|
})
|
||||||
SIGDescribe("regular resource usage tracking", func() {
|
SIGDescribe("regular resource usage tracking", func() {
|
||||||
// We assume that the scheduler will make reasonable scheduling choices
|
// We assume that the scheduler will make reasonable scheduling choices
|
||||||
|
@ -19,10 +19,11 @@ package node
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -165,7 +166,7 @@ var _ = SIGDescribe("Mount propagation", func() {
|
|||||||
for _, mountName := range dirNames {
|
for _, mountName := range dirNames {
|
||||||
cmd := fmt.Sprintf("cat /mnt/test/%s/file", mountName)
|
cmd := fmt.Sprintf("cat /mnt/test/%s/file", mountName)
|
||||||
stdout, stderr, err := f.ExecShellInPodWithFullOutput(podName, cmd)
|
stdout, stderr, err := f.ExecShellInPodWithFullOutput(podName, cmd)
|
||||||
framework.Logf("pod %s mount %s: stdout: %q, stderr: %q error: %v", podName, mountName, stdout, stderr, err)
|
e2elog.Logf("pod %s mount %s: stdout: %q, stderr: %q error: %v", podName, mountName, stdout, stderr, err)
|
||||||
msg := fmt.Sprintf("When checking pod %s and directory %s", podName, mountName)
|
msg := fmt.Sprintf("When checking pod %s and directory %s", podName, mountName)
|
||||||
shouldBeVisible := mounts.Has(mountName)
|
shouldBeVisible := mounts.Has(mountName)
|
||||||
if shouldBeVisible {
|
if shouldBeVisible {
|
||||||
|
@ -24,10 +24,11 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
"k8s.io/apimachinery/pkg/fields"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -174,7 +175,7 @@ var _ = SIGDescribe("NodeProblemDetector [DisabledForLargeClusters]", func() {
|
|||||||
workingSetStatsMsg += fmt.Sprintf(" %s[%.1f|%.1f|%.1f];", nodes.Items[i].Name,
|
workingSetStatsMsg += fmt.Sprintf(" %s[%.1f|%.1f|%.1f];", nodes.Items[i].Name,
|
||||||
workingSetStats[host][0], workingSetStats[host][len(workingSetStats[host])/2], workingSetStats[host][len(workingSetStats[host])-1])
|
workingSetStats[host][0], workingSetStats[host][len(workingSetStats[host])/2], workingSetStats[host][len(workingSetStats[host])-1])
|
||||||
}
|
}
|
||||||
framework.Logf("Node-Problem-Detector CPU and Memory Stats:\n\t%s\n\t%s\n\t%s", cpuStatsMsg, rssStatsMsg, workingSetStatsMsg)
|
e2elog.Logf("Node-Problem-Detector CPU and Memory Stats:\n\t%s\n\t%s\n\t%s", cpuStatsMsg, rssStatsMsg, workingSetStatsMsg)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -22,11 +22,12 @@ import (
|
|||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -48,11 +49,11 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]"
|
|||||||
|
|
||||||
count++
|
count++
|
||||||
if count%50 == 0 {
|
if count%50 == 0 {
|
||||||
framework.Logf("count: %v", count)
|
e2elog.Logf("count: %v", count)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.Logf("created: %v", count)
|
e2elog.Logf("created: %v", count)
|
||||||
|
|
||||||
// The gc controller polls every 30s and fires off a goroutine per
|
// The gc controller polls every 30s and fires off a goroutine per
|
||||||
// pod to terminate.
|
// pod to terminate.
|
||||||
@ -65,11 +66,11 @@ var _ = SIGDescribe("Pod garbage collector [Feature:PodGarbageCollector] [Slow]"
|
|||||||
pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) {
|
pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) {
|
||||||
pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
pods, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(metav1.ListOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Failed to list pod %v", err)
|
e2elog.Logf("Failed to list pod %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if len(pods.Items) != gcThreshold {
|
if len(pods.Items) != gcThreshold {
|
||||||
framework.Logf("Number of observed pods %v, waiting for %v", len(pods.Items), gcThreshold)
|
e2elog.Logf("Number of observed pods %v, waiting for %v", len(pods.Items), gcThreshold)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
@ -25,13 +25,14 @@ import (
|
|||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -143,7 +144,7 @@ var _ = SIGDescribe("Pods Extended", func() {
|
|||||||
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
|
||||||
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
|
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
|
e2elog.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
for _, kubeletPod := range podList.Items {
|
for _, kubeletPod := range podList.Items {
|
||||||
@ -151,12 +152,12 @@ var _ = SIGDescribe("Pods Extended", func() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
|
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
|
||||||
framework.Logf("deletion has not yet been observed")
|
e2elog.Logf("deletion has not yet been observed")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
|
e2elog.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
|
||||||
return true, nil
|
return true, nil
|
||||||
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
|
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
|
||||||
|
|
||||||
|
@ -22,13 +22,14 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/pkg/master/ports"
|
"k8s.io/kubernetes/pkg/master/ports"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -145,11 +146,11 @@ func testPreStop(c clientset.Interface, ns string) {
|
|||||||
}
|
}
|
||||||
By(fmt.Sprintf("Error validating prestop: %v", err))
|
By(fmt.Sprintf("Error validating prestop: %v", err))
|
||||||
} else {
|
} else {
|
||||||
framework.Logf("Saw: %s", string(body))
|
e2elog.Logf("Saw: %s", string(body))
|
||||||
state := State{}
|
state := State{}
|
||||||
err := json.Unmarshal(body, &state)
|
err := json.Unmarshal(body, &state)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Error parsing: %v", err)
|
e2elog.Logf("Error parsing: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if state.Received["prestop"] != 0 {
|
if state.Received["prestop"] != 0 {
|
||||||
@ -212,7 +213,7 @@ var _ = SIGDescribe("PreStop", func() {
|
|||||||
if pod.Name != kubeletPod.Name {
|
if pod.Name != kubeletPod.Name {
|
||||||
continue
|
continue
|
||||||
} else if kubeletPod.Status.Phase == v1.PodRunning {
|
} else if kubeletPod.Status.Phase == v1.PodRunning {
|
||||||
framework.Logf("pod is running")
|
e2elog.Logf("pod is running")
|
||||||
return true, err
|
return true, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
@ -93,10 +94,10 @@ var _ = SIGDescribe("SSH", func() {
|
|||||||
}
|
}
|
||||||
// Show stdout, stderr for logging purposes.
|
// Show stdout, stderr for logging purposes.
|
||||||
if len(stdout) > 0 {
|
if len(stdout) > 0 {
|
||||||
framework.Logf("Got stdout from %s: %s", host, strings.TrimSpace(stdout))
|
e2elog.Logf("Got stdout from %s: %s", host, strings.TrimSpace(stdout))
|
||||||
}
|
}
|
||||||
if len(stderr) > 0 {
|
if len(stderr) > 0 {
|
||||||
framework.Logf("Got stderr from %s: %s", host, strings.TrimSpace(stderr))
|
e2elog.Logf("Got stderr from %s: %s", host, strings.TrimSpace(stderr))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,11 +20,12 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
batch "k8s.io/api/batch/v1"
|
batch "k8s.io/api/batch/v1"
|
||||||
"k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/pkg/util/slice"
|
"k8s.io/kubernetes/pkg/util/slice"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
jobutil "k8s.io/kubernetes/test/e2e/framework/job"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
@ -44,7 +45,7 @@ func cleanupJob(f *framework.Framework, job *batch.Job) {
|
|||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
c := f.ClientSet
|
c := f.ClientSet
|
||||||
|
|
||||||
framework.Logf("Remove the Job's dummy finalizer; the Job should be deleted cascadingly")
|
e2elog.Logf("Remove the Job's dummy finalizer; the Job should be deleted cascadingly")
|
||||||
removeFinalizerFunc := func(j *batch.Job) {
|
removeFinalizerFunc := func(j *batch.Job) {
|
||||||
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
|
j.ObjectMeta.Finalizers = slice.RemoveString(j.ObjectMeta.Finalizers, dummyFinalizer, nil)
|
||||||
}
|
}
|
||||||
@ -70,19 +71,19 @@ func testFinishedJob(f *framework.Framework) {
|
|||||||
job.ObjectMeta.Finalizers = []string{dummyFinalizer}
|
job.ObjectMeta.Finalizers = []string{dummyFinalizer}
|
||||||
defer cleanupJob(f, job)
|
defer cleanupJob(f, job)
|
||||||
|
|
||||||
framework.Logf("Create a Job %s/%s with TTL", ns, job.Name)
|
e2elog.Logf("Create a Job %s/%s with TTL", ns, job.Name)
|
||||||
job, err := jobutil.CreateJob(c, ns, job)
|
job, err := jobutil.CreateJob(c, ns, job)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
framework.Logf("Wait for the Job to finish")
|
e2elog.Logf("Wait for the Job to finish")
|
||||||
err = jobutil.WaitForJobFinish(c, ns, job.Name)
|
err = jobutil.WaitForJobFinish(c, ns, job.Name)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
framework.Logf("Wait for TTL after finished controller to delete the Job")
|
e2elog.Logf("Wait for TTL after finished controller to delete the Job")
|
||||||
err = jobutil.WaitForJobDeleting(c, ns, job.Name)
|
err = jobutil.WaitForJobDeleting(c, ns, job.Name)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
framework.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
|
e2elog.Logf("Check Job's deletionTimestamp and compare with the time when the Job finished")
|
||||||
job, err = jobutil.GetJob(c, ns, job.Name)
|
job, err = jobutil.GetJob(c, ns, job.Name)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
finishTime := jobutil.FinishTime(job)
|
finishTime := jobutil.FinishTime(job)
|
||||||
|
@ -31,6 +31,7 @@ go_library(
|
|||||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||||
"//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library",
|
"//staging/src/k8s.io/kubelet/config/v1beta1:go_default_library",
|
||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
|
"//test/e2e/framework/log:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||||
|
@ -35,6 +35,7 @@ import (
|
|||||||
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
|
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
@ -84,7 +85,7 @@ type nodeMemory struct {
|
|||||||
func checkNodeAllocatableTest(f *framework.Framework) {
|
func checkNodeAllocatableTest(f *framework.Framework) {
|
||||||
|
|
||||||
nodeMem := getNodeMemory(f)
|
nodeMem := getNodeMemory(f)
|
||||||
framework.Logf("nodeMem says: %+v", nodeMem)
|
e2elog.Logf("nodeMem says: %+v", nodeMem)
|
||||||
|
|
||||||
// calculate the allocatable mem based on capacity - reserved amounts
|
// calculate the allocatable mem based on capacity - reserved amounts
|
||||||
calculatedNodeAlloc := nodeMem.capacity.Copy()
|
calculatedNodeAlloc := nodeMem.capacity.Copy()
|
||||||
@ -125,7 +126,7 @@ func overrideAllocatableMemoryTest(f *framework.Framework, allocatablePods int)
|
|||||||
for _, e := range eventList.Items {
|
for _, e := range eventList.Items {
|
||||||
// Look for an event that shows FailedScheduling
|
// Look for an event that shows FailedScheduling
|
||||||
if e.Type == "Warning" && e.Reason == "FailedScheduling" && e.InvolvedObject.Name == failurePods[0].ObjectMeta.Name {
|
if e.Type == "Warning" && e.Reason == "FailedScheduling" && e.InvolvedObject.Name == failurePods[0].ObjectMeta.Name {
|
||||||
framework.Logf("Found %+v event with message %+v", e.Reason, e.Message)
|
e2elog.Logf("Found %+v event with message %+v", e.Reason, e.Message)
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -293,11 +294,11 @@ func pollConfigz(timeout time.Duration, pollInterval time.Duration, nodeName str
|
|||||||
gomega.Eventually(func() bool {
|
gomega.Eventually(func() bool {
|
||||||
resp, err = client.Do(req)
|
resp, err = client.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Failed to get /configz, retrying. Error: %v", err)
|
e2elog.Logf("Failed to get /configz, retrying. Error: %v", err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if resp.StatusCode != 200 {
|
if resp.StatusCode != 200 {
|
||||||
framework.Logf("/configz response status not 200, retrying. Response was: %+v", resp)
|
e2elog.Logf("/configz response status not 200, retrying. Response was: %+v", resp)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user