e2e: move LogFailedContainers out of e2e test framework util.go

This commit is contained in:
SataQiu 2019-11-13 18:34:33 +08:00 committed by 邱世达
parent 97d45fe3c8
commit 50bc528a7e
43 changed files with 248 additions and 115 deletions

View File

@ -66,6 +66,7 @@ go_library(
"//test/e2e/common:go_default_library", "//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/auth:go_default_library", "//test/e2e/framework/auth:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/e2e/framework/log:go_default_library", "//test/e2e/framework/log:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",

View File

@ -87,6 +87,7 @@ go_library(
"//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/ssh:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",
"//test/utils/crd:go_default_library", "//test/utils/crd:go_default_library",

View File

@ -25,6 +25,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/test/e2e/apps" "k8s.io/kubernetes/test/e2e/apps"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -45,7 +46,7 @@ var _ = SIGDescribe("Etcd failure [Disruptive]", func() {
framework.SkipUnlessProviderIs("gce") framework.SkipUnlessProviderIs("gce")
framework.SkipUnlessSSHKeyPresent() framework.SkipUnlessSSHKeyPresent()
err := framework.RunRC(testutils.RCConfig{ err := e2erc.RunRC(testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: "baz", Name: "baz",
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,

View File

@ -68,6 +68,7 @@ go_library(
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library", "//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/framework/replicaset:go_default_library", "//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/service:go_default_library", "//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/ssh:go_default_library",

View File

@ -34,6 +34,7 @@ import (
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -209,7 +210,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
Replicas: numPods, Replicas: numPods,
CreatedPods: &[]*v1.Pod{}, CreatedPods: &[]*v1.Pod{},
} }
framework.ExpectNoError(framework.RunRC(config)) framework.ExpectNoError(e2erc.RunRC(config))
replacePods(*config.CreatedPods, existingPods) replacePods(*config.CreatedPods, existingPods)
stopCh = make(chan struct{}) stopCh = make(chan struct{})
@ -260,7 +261,7 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
// to the same size achieves this, because the scale operation advances the RC's sequence number // to the same size achieves this, because the scale operation advances the RC's sequence number
// and awaits it to be observed and reported back in the RC's status. // and awaits it to be observed and reported back in the RC's status.
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true) e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods, true)
// Only check the keys, the pods can be different if the kubelet updated it. // Only check the keys, the pods can be different if the kubelet updated it.
// TODO: Can it really? // TODO: Can it really?
@ -291,9 +292,9 @@ var _ = SIGDescribe("DaemonRestart [Disruptive]", func() {
restarter.kill() restarter.kill()
// This is best effort to try and create pods while the scheduler is down, // This is best effort to try and create pods while the scheduler is down,
// since we don't know exactly when it is restarted after the kill signal. // since we don't know exactly when it is restarted after the kill signal.
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false)) framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, false))
restarter.waitUp() restarter.waitUp()
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true)) framework.ExpectNoError(e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rcName, numPods+5, true))
}) })
ginkgo.It("Kubelet should not restart containers across restart", func() { ginkgo.It("Kubelet should not restart containers across restart", func() {

View File

@ -44,6 +44,7 @@ go_library(
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/pv:go_default_library", "//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/instrumentation/monitoring:go_default_library", "//test/e2e/instrumentation/monitoring:go_default_library",
"//test/e2e/scheduling:go_default_library", "//test/e2e/scheduling:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",

View File

@ -32,6 +32,7 @@ import (
"k8s.io/klog" "k8s.io/klog"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -345,8 +346,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas
timeToWait := 5 * time.Minute timeToWait := 5 * time.Minute
podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait) podsConfig := reserveMemoryRCConfig(f, "unschedulable-pod", unschedulablePodReplicas, totalMemReservation, timeToWait)
framework.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable) e2erc.RunRC(*podsConfig) // Ignore error (it will occur because pods are unschedulable)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name) defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, podsConfig.Name)
// Ensure that no new nodes have been added so far. // Ensure that no new nodes have been added so far.
readyNodeCount, _ := e2enode.TotalReady(f.ClientSet) readyNodeCount, _ := e2enode.TotalReady(f.ClientSet)
@ -379,7 +380,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
// run rc based on config // run rc based on config
ginkgo.By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name)) ginkgo.By(fmt.Sprintf("Running RC %v from config", config.extraPods.Name))
start := time.Now() start := time.Now()
framework.ExpectNoError(framework.RunRC(*config.extraPods)) framework.ExpectNoError(e2erc.RunRC(*config.extraPods))
// check results // check results
if tolerateMissingNodeCount > 0 { if tolerateMissingNodeCount > 0 {
// Tolerate some number of nodes not to be created. // Tolerate some number of nodes not to be created.
@ -397,7 +398,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
} }
timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes)) timeTrack(start, fmt.Sprintf("Scale up to %v", config.expectedResult.nodes))
return func() error { return func() error {
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, config.extraPods.Name) return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, config.extraPods.Name)
} }
} }
@ -475,10 +476,10 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p
HostPorts: map[string]int{"port1": port}, HostPorts: map[string]int{"port1": port},
MemRequest: request, MemRequest: request,
} }
err := framework.RunRC(*config) err := e2erc.RunRC(*config)
framework.ExpectNoError(err) framework.ExpectNoError(err)
return func() error { return func() error {
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id) return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
} }
} }
@ -515,10 +516,10 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
// Create the target RC // Create the target RC
rcConfig := reserveMemoryRCConfig(f, id, totalPods, totalPods*podMemRequestMegabytes, timeout) rcConfig := reserveMemoryRCConfig(f, id, totalPods, totalPods*podMemRequestMegabytes, timeout)
framework.ExpectNoError(framework.RunRC(*rcConfig)) framework.ExpectNoError(e2erc.RunRC(*rcConfig))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, f.ClientSet))
return func() error { return func() error {
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id) return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
} }
} }

View File

@ -47,6 +47,7 @@ import (
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epv "k8s.io/kubernetes/test/e2e/framework/pv" e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
"k8s.io/kubernetes/test/e2e/scheduling" "k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -169,7 +170,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() { ginkgo.It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ginkgo.By("Creating unschedulable pod") ginkgo.By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout) ReserveMemory(f, "memory-reservation", 1, int(1.1*float64(memAllocatableMb)), false, defaultTimeout)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ginkgo.By("Waiting for scale up hoping it won't happen") ginkgo.By("Waiting for scale up hoping it won't happen")
// Verify that the appropriate event was generated // Verify that the appropriate event was generated
@ -196,7 +197,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
simpleScaleUpTest := func(unready int) { simpleScaleUpTest := func(unready int) {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased // Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFuncWithUnready(f.ClientSet,
@ -229,7 +230,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule a pod which requires GPU") ginkgo.By("Schedule a pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc")) framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size == nodeCount+1 }, scaleUpTimeout))
@ -251,7 +252,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule a single pod which requires GPU") ginkgo.By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc")) framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
ginkgo.By("Enable autoscaler") ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2)) framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 2))
@ -259,7 +260,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1) framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1)
ginkgo.By("Scale GPU deployment") ginkgo.By("Scale GPU deployment")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true) e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, f.Namespace.Name, "gpu-pod-rc", 2, true)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout)) func(size int) bool { return size == nodeCount+2 }, scaleUpTimeout))
@ -286,7 +287,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs") ginkgo.By("Schedule bunch of pods beyond point of filling default pool but do not request any GPUs")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased // Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
@ -310,7 +311,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule a single pod which requires GPU") ginkgo.By("Schedule a single pod which requires GPU")
framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc")) framework.ExpectNoError(ScheduleAnySingleGpuPod(f, "gpu-pod-rc"))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
ginkgo.By("Enable autoscaler") ginkgo.By("Enable autoscaler")
framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1)) framework.ExpectNoError(enableAutoscaler(gpuPoolName, 0, 1))
@ -318,7 +319,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1) framework.ExpectEqual(len(getPoolNodes(f, gpuPoolName)), 1)
ginkgo.By("Remove the only POD requiring GPU") ginkgo.By("Remove the only POD requiring GPU")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc") e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "gpu-pod-rc")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size == nodeCount }, scaleDownTimeout)) func(size int) bool { return size == nodeCount }, scaleDownTimeout))
@ -341,7 +342,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Schedule more pods than can fit and wait for cluster to scale-up") ginkgo.By("Schedule more pods than can fit and wait for cluster to scale-up")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second) ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool { status, err = waitForScaleUpStatus(c, func(s *scaleUpStatus) bool {
return s.status == caOngoingScaleUpStatus return s.status == caOngoingScaleUpStatus
@ -395,7 +396,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Reserving 0.1x more memory than the cluster holds to trigger scale up") ginkgo.By("Reserving 0.1x more memory than the cluster holds to trigger scale up")
totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb)) totalMemoryReservation := int(1.1 * float64(nodeCount*memAllocatableMb+extraMemMb))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout) ReserveMemory(f, "memory-reservation", 100, totalMemoryReservation, false, defaultTimeout)
// Verify, that cluster size is increased // Verify, that cluster size is increased
@ -419,7 +420,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() { ginkgo.It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false) scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "host-port")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
@ -434,12 +435,12 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
} }
ginkgo.By("starting a pod with anti-affinity on each node") ginkgo.By("starting a pod with anti-affinity on each node")
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
ginkgo.By("scheduling extra pods with anti-affinity to existing ones") ginkgo.By("scheduling extra pods with anti-affinity to existing ones")
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels)) framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
@ -453,14 +454,14 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
"anti-affinity": "yes", "anti-affinity": "yes",
} }
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
ginkgo.By("waiting for all pods before triggering scale up") ginkgo.By("waiting for all pods before triggering scale up")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
ginkgo.By("creating a pod requesting EmptyDir") ginkgo.By("creating a pod requesting EmptyDir")
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes)) framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, "extra-pod", labels, labels, emptyDirVolumes))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "extra-pod")
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(c, nodeCount+newPods, scaleUpTimeout))
@ -517,7 +518,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
} }
framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels)) framework.ExpectNoError(runAntiAffinityPods(f, f.Namespace.Name, pods, "some-pod", labels, labels))
defer func() { defer func() {
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod") e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "some-pod")
klog.Infof("RC and pods not using volume deleted") klog.Infof("RC and pods not using volume deleted")
}() }()
@ -530,7 +531,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
volumes := buildVolumes(pv, pvc) volumes := buildVolumes(pv, pvc)
framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes)) framework.ExpectNoError(runVolumeAntiAffinityPods(f, f.Namespace.Name, newPods, pvcPodName, labels, labels, volumes))
defer func() { defer func() {
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, pvcPodName) e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, pvcPodName)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
}() }()
@ -635,7 +636,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
defer removeLabels(registeredNodes) defer removeLabels(registeredNodes)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector")) framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "node-selector"))
}) })
ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() { ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -653,7 +654,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
extraPods := extraNodes + 1 extraPods := extraNodes + 1
totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb)) totalMemoryReservation := int(float64(extraPods) * 1.5 * float64(memAllocatableMb))
ginkgo.By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods)) ginkgo.By(fmt.Sprintf("Creating rc with %v pods too big to fit default-pool but fitting extra-pool", extraPods))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout) ReserveMemory(f, "memory-reservation", extraPods, totalMemoryReservation, false, defaultTimeout)
// Apparently GKE master is restarted couple minutes after the node pool is added // Apparently GKE master is restarted couple minutes after the node pool is added
@ -794,7 +795,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.By("Run a scale-up test") ginkgo.By("Run a scale-up test")
ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second) ReserveMemory(f, "memory-reservation", 1, 100, false, 1*time.Second)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
// Verify that cluster size is increased // Verify that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
@ -922,7 +923,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction) e2enetwork.TestUnderTemporaryNetworkFailure(c, "default", ntb, testFunction)
} else { } else {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout) ReserveMemory(f, "memory-reservation", 100, nodeCount*memAllocatableMb, false, defaultTimeout)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, "memory-reservation")
time.Sleep(scaleUpTimeout) time.Sleep(scaleUpTimeout)
currentNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet) currentNodes, err := e2enode.GetReadySchedulableNodes(f.ClientSet)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -1020,7 +1021,7 @@ func runDrainTest(f *framework.Framework, migSizes map[string]int, namespace str
labelMap := map[string]string{"test_id": testID} labelMap := map[string]string{"test_id": testID}
framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0)) framework.ExpectNoError(runReplicatedPodOnEachNode(f, nodes.Items, namespace, podsPerNode, "reschedulable-pods", labelMap, 0))
defer framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, "reschedulable-pods") defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, namespace, "reschedulable-pods")
ginkgo.By("Create a PodDisruptionBudget") ginkgo.By("Create a PodDisruptionBudget")
minAvailable := intstr.FromInt(numPods - pdbSize) minAvailable := intstr.FromInt(numPods - pdbSize)
@ -1303,7 +1304,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
PriorityClassName: priorityClassName, PriorityClassName: priorityClassName,
} }
for start := time.Now(); time.Since(start) < rcCreationRetryTimeout; time.Sleep(rcCreationRetryDelay) { for start := time.Now(); time.Since(start) < rcCreationRetryTimeout; time.Sleep(rcCreationRetryDelay) {
err := framework.RunRC(*config) err := e2erc.RunRC(*config)
if err != nil && strings.Contains(err.Error(), "Error creating replication controller") { if err != nil && strings.Contains(err.Error(), "Error creating replication controller") {
klog.Warningf("Failed to create memory reservation: %v", err) klog.Warningf("Failed to create memory reservation: %v", err)
continue continue
@ -1312,7 +1313,7 @@ func reserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
return func() error { return func() error {
return framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id) return e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, id)
} }
} }
framework.Failf("Failed to reserve memory within timeout") framework.Failf("Failed to reserve memory within timeout")
@ -1547,7 +1548,7 @@ func ScheduleGpuPod(f *framework.Framework, id string, gpuType string, gpuLimit
config.NodeSelector = map[string]string{gpuLabel: gpuType} config.NodeSelector = map[string]string{gpuLabel: gpuType}
} }
err := framework.RunRC(*config) err := e2erc.RunRC(*config)
if err != nil { if err != nil {
return err return err
} }
@ -1566,7 +1567,7 @@ func runAntiAffinityPods(f *framework.Framework, namespace string, pods int, id
Replicas: pods, Replicas: pods,
Labels: podLabels, Labels: podLabels,
} }
err := framework.RunRC(*config) err := e2erc.RunRC(*config)
if err != nil { if err != nil {
return err return err
} }
@ -1589,7 +1590,7 @@ func runVolumeAntiAffinityPods(f *framework.Framework, namespace string, pods in
Replicas: pods, Replicas: pods,
Labels: podLabels, Labels: podLabels,
} }
err := framework.RunRC(*config) err := e2erc.RunRC(*config)
if err != nil { if err != nil {
return err return err
} }
@ -1670,7 +1671,7 @@ func runReplicatedPodOnEachNode(f *framework.Framework, nodes []v1.Node, namespa
Labels: labels, Labels: labels,
MemRequest: memRequest, MemRequest: memRequest,
} }
err := framework.RunRC(*config) err := e2erc.RunRC(*config)
if err != nil { if err != nil {
return err return err
} }

View File

@ -75,6 +75,7 @@ go_library(
"//test/e2e/framework/network:go_default_library", "//test/e2e/framework/network:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/framework/volume:go_default_library", "//test/e2e/framework/volume:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",
"//test/utils/image:go_default_library", "//test/utils/image:go_default_library",

View File

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -204,7 +205,7 @@ func RestartNodes(c clientset.Interface, nodes []v1.Node) error {
func rcByNamePort(name string, replicas int32, image string, containerArgs []string, port int, protocol v1.Protocol, func rcByNamePort(name string, replicas int32, image string, containerArgs []string, port int, protocol v1.Protocol,
labels map[string]string, gracePeriod *int64) *v1.ReplicationController { labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
return framework.RcByNameContainer(name, replicas, image, labels, v1.Container{ return e2erc.ByNameContainer(name, replicas, image, labels, v1.Container{
Name: name, Name: name,
Image: image, Image: image,
Args: containerArgs, Args: containerArgs,

View File

@ -38,6 +38,7 @@ import (
"k8s.io/component-base/version" "k8s.io/component-base/version"
commontest "k8s.io/kubernetes/test/e2e/common" commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
@ -259,7 +260,7 @@ func setupSuite() {
// number equal to the number of allowed not-ready nodes). // number equal to the number of allowed not-ready nodes).
if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil { if err := e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), int32(framework.TestContext.AllowedNotReadyNodes), podStartupTimeout, map[string]string{}); err != nil {
framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) framework.DumpAllNamespaceInfo(c, metav1.NamespaceSystem)
framework.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf) e2ekubectl.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf)
runKubernetesServiceTestContainer(c, metav1.NamespaceDefault) runKubernetesServiceTestContainer(c, metav1.NamespaceDefault)
framework.Failf("Error waiting for all pods to be running and ready: %v", err) framework.Failf("Error waiting for all pods to be running and ready: %v", err)
} }

View File

@ -18,7 +18,6 @@ go_library(
"profile_gatherer.go", "profile_gatherer.go",
"provider.go", "provider.go",
"psp.go", "psp.go",
"rc_util.go",
"resource_usage_gatherer.go", "resource_usage_gatherer.go",
"size.go", "size.go",
"skip.go", "skip.go",
@ -30,7 +29,6 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/api/v1/pod:go_default_library", "//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/client/conditions:go_default_library", "//pkg/client/conditions:go_default_library",
"//pkg/controller:go_default_library", "//pkg/controller:go_default_library",
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
@ -116,6 +114,7 @@ filegroup(
"//test/e2e/framework/gpu:all-srcs", "//test/e2e/framework/gpu:all-srcs",
"//test/e2e/framework/ingress:all-srcs", "//test/e2e/framework/ingress:all-srcs",
"//test/e2e/framework/job:all-srcs", "//test/e2e/framework/job:all-srcs",
"//test/e2e/framework/kubectl:all-srcs",
"//test/e2e/framework/kubelet:all-srcs", "//test/e2e/framework/kubelet:all-srcs",
"//test/e2e/framework/lifecycle:all-srcs", "//test/e2e/framework/lifecycle:all-srcs",
"//test/e2e/framework/log:all-srcs", "//test/e2e/framework/log:all-srcs",
@ -132,6 +131,7 @@ filegroup(
"//test/e2e/framework/providers/openstack:all-srcs", "//test/e2e/framework/providers/openstack:all-srcs",
"//test/e2e/framework/providers/vsphere:all-srcs", "//test/e2e/framework/providers/vsphere:all-srcs",
"//test/e2e/framework/pv:all-srcs", "//test/e2e/framework/pv:all-srcs",
"//test/e2e/framework/rc:all-srcs",
"//test/e2e/framework/replicaset:all-srcs", "//test/e2e/framework/replicaset:all-srcs",
"//test/e2e/framework/resource:all-srcs", "//test/e2e/framework/resource:all-srcs",
"//test/e2e/framework/security:all-srcs", "//test/e2e/framework/security:all-srcs",

View File

@ -16,6 +16,8 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library", "//staging/src/k8s.io/client-go/scale:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/framework/replicaset:go_default_library", "//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/service:go_default_library", "//test/e2e/framework/service:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",

View File

@ -32,6 +32,8 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
"k8s.io/kubernetes/test/e2e/framework/replicaset" "k8s.io/kubernetes/test/e2e/framework/replicaset"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -483,14 +485,14 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
switch kind { switch kind {
case KindRC: case KindRC:
framework.ExpectNoError(framework.RunRC(rcConfig)) framework.ExpectNoError(e2erc.RunRC(rcConfig))
case KindDeployment: case KindDeployment:
dpConfig := testutils.DeploymentConfig{ dpConfig := testutils.DeploymentConfig{
RCConfig: rcConfig, RCConfig: rcConfig,
} }
ginkgo.By(fmt.Sprintf("creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace)) ginkgo.By(fmt.Sprintf("creating deployment %s in namespace %s", dpConfig.Name, dpConfig.Namespace))
dpConfig.NodeDumpFunc = framework.DumpNodeDebugInfo dpConfig.NodeDumpFunc = framework.DumpNodeDebugInfo
dpConfig.ContainerDumpFunc = framework.LogFailedContainers dpConfig.ContainerDumpFunc = e2ekubectl.LogFailedContainers
framework.ExpectNoError(testutils.RunDeployment(dpConfig)) framework.ExpectNoError(testutils.RunDeployment(dpConfig))
case KindReplicaSet: case KindReplicaSet:
rsConfig := testutils.ReplicaSetConfig{ rsConfig := testutils.ReplicaSetConfig{
@ -532,7 +534,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name st
Command: []string{"/controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"}, Command: []string{"/controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"},
DNSPolicy: &dnsClusterFirst, DNSPolicy: &dnsClusterFirst,
} }
framework.ExpectNoError(framework.RunRC(controllerRcConfig)) framework.ExpectNoError(e2erc.RunRC(controllerRcConfig))
// Wait for endpoints to propagate for the controller service. // Wait for endpoints to propagate for the controller service.
framework.ExpectNoError(framework.WaitForServiceEndpointsNum( framework.ExpectNoError(framework.WaitForServiceEndpointsNum(

View File

@ -0,0 +1,30 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["kubectl_utils.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/kubectl",
visibility = ["//visibility:public"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/pod:go_default_library",
"//test/utils:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -0,0 +1,60 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubectl
import (
"strings"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
testutils "k8s.io/kubernetes/test/utils"
)
// LogFailedContainers runs `kubectl logs` on a failed containers.
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
logFunc("Error getting pods in namespace '%s': %v", ns, err)
return
}
logFunc("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "", framework.Logf)
}
}
}
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
logs, err := e2epod.GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logs, err = e2epod.GetPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}
}
logFunc("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName)
logFunc("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
}
}
}

View File

@ -0,0 +1,33 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["rc_utils.go"],
importpath = "k8s.io/kubernetes/test/e2e/framework/rc",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2017 The Kubernetes Authors. Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.
@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package framework package rc
import ( import (
"fmt" "fmt"
@ -26,11 +26,13 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
scaleclient "k8s.io/client-go/scale" scaleclient "k8s.io/client-go/scale"
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
// RcByNameContainer returns a ReplicationController with specified name and container // ByNameContainer returns a ReplicationController with specified name and container
func RcByNameContainer(name string, replicas int32, image string, labels map[string]string, c v1.Container, func ByNameContainer(name string, replicas int32, image string, labels map[string]string, c v1.Container,
gracePeriod *int64) *v1.ReplicationController { gracePeriod *int64) *v1.ReplicationController {
zeroGracePeriod := int64(0) zeroGracePeriod := int64(0)
@ -68,19 +70,19 @@ func RcByNameContainer(name string, replicas int32, image string, labels map[str
// DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods. // DeleteRCAndWaitForGC deletes only the Replication Controller and waits for GC to delete the pods.
func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error { func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
return DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name) return framework.DeleteResourceAndWaitForGC(c, api.Kind("ReplicationController"), ns, name)
} }
// ScaleRC scales Replication Controller to be desired size. // ScaleRC scales Replication Controller to be desired size.
func ScaleRC(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error { func ScaleRC(clientset clientset.Interface, scalesGetter scaleclient.ScalesGetter, ns, name string, size uint, wait bool) error {
return ScaleResource(clientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.SchemeGroupVersion.WithResource("replicationcontrollers")) return framework.ScaleResource(clientset, scalesGetter, ns, name, size, wait, api.Kind("ReplicationController"), api.SchemeGroupVersion.WithResource("replicationcontrollers"))
} }
// RunRC Launches (and verifies correctness) of a Replication Controller // RunRC Launches (and verifies correctness) of a Replication Controller
// and will wait for all pods it spawns to become "Running". // and will wait for all pods it spawns to become "Running".
func RunRC(config testutils.RCConfig) error { func RunRC(config testutils.RCConfig) error {
ginkgo.By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace)) ginkgo.By(fmt.Sprintf("creating replication controller %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = DumpNodeDebugInfo config.NodeDumpFunc = framework.DumpNodeDebugInfo
config.ContainerDumpFunc = LogFailedContainers config.ContainerDumpFunc = e2ekubectl.LogFailedContainers
return testutils.RunRC(config) return testutils.RunRC(config)
} }

View File

@ -17,6 +17,7 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/typed/apps/v1:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library", "//vendor/github.com/onsi/ginkgo:go_default_library",
], ],

View File

@ -26,6 +26,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
appsclient "k8s.io/client-go/kubernetes/typed/apps/v1" appsclient "k8s.io/client-go/kubernetes/typed/apps/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
@ -33,7 +34,7 @@ import (
func RunReplicaSet(config testutils.ReplicaSetConfig) error { func RunReplicaSet(config testutils.ReplicaSetConfig) error {
ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace)) ginkgo.By(fmt.Sprintf("creating replicaset %s in namespace %s", config.Name, config.Namespace))
config.NodeDumpFunc = framework.DumpNodeDebugInfo config.NodeDumpFunc = framework.DumpNodeDebugInfo
config.ContainerDumpFunc = framework.LogFailedContainers config.ContainerDumpFunc = e2ekubectl.LogFailedContainers
return testutils.RunReplicaSet(config) return testutils.RunReplicaSet(config)
} }

View File

@ -38,6 +38,7 @@ go_library(
"//test/e2e/framework/network:go_default_library", "//test/e2e/framework/network:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/ssh:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",
"//test/utils/image:go_default_library", "//test/utils/image:go_default_library",

View File

@ -31,6 +31,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
@ -60,7 +61,7 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string
CreatedPods: &createdPods, CreatedPods: &createdPods,
MaxContainerFailures: &maxContainerFailures, MaxContainerFailures: &maxContainerFailures,
} }
err = framework.RunRC(config) err = e2erc.RunRC(config)
if err != nil { if err != nil {
return podNames, "", err return podNames, "", err
} }
@ -87,7 +88,7 @@ func StartServeHostnameService(c clientset.Interface, svc *v1.Service, ns string
// StopServeHostnameService stops the given service. // StopServeHostnameService stops the given service.
func StopServeHostnameService(clientset clientset.Interface, ns, name string) error { func StopServeHostnameService(clientset clientset.Interface, ns, name string) error {
if err := framework.DeleteRCAndWaitForGC(clientset, ns, name); err != nil { if err := e2erc.DeleteRCAndWaitForGC(clientset, ns, name); err != nil {
return err return err
} }
if err := clientset.CoreV1().Services(ns).Delete(name, nil); err != nil { if err := clientset.CoreV1().Services(ns).Delete(name, nil); err != nil {

View File

@ -45,6 +45,7 @@ import (
e2enetwork "k8s.io/kubernetes/test/e2e/framework/network" e2enetwork "k8s.io/kubernetes/test/e2e/framework/network"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -931,7 +932,7 @@ func (j *TestJig) CreateServicePods(replica int) error {
Timeout: framework.PodReadyBeforeTimeout, Timeout: framework.PodReadyBeforeTimeout,
Replicas: replica, Replicas: replica,
} }
return framework.RunRC(config) return e2erc.RunRC(config)
} }
// CreateTCPUDPServicePods creates a replication controller with the label same as service. Service listens to TCP and UDP. // CreateTCPUDPServicePods creates a replication controller with the label same as service. Service listens to TCP and UDP.
@ -947,5 +948,5 @@ func (j *TestJig) CreateTCPUDPServicePods(replica int) error {
Timeout: framework.PodReadyBeforeTimeout, Timeout: framework.PodReadyBeforeTimeout,
Replicas: replica, Replicas: replica,
} }
return framework.RunRC(config) return e2erc.RunRC(config)
} }

View File

@ -253,38 +253,6 @@ func NodeOSDistroIs(supportedNodeOsDistros ...string) bool {
return false return false
} }
func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
logs, err := e2epod.GetPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logs, err = e2epod.GetPreviousPodLogs(c, pod.Namespace, pod.Name, container.Name)
if err != nil {
logFunc("Failed to get logs of pod %v, container %v, err: %v", pod.Name, container.Name, err)
}
}
logFunc("Logs of %v/%v:%v on node %v", pod.Namespace, pod.Name, container.Name, pod.Spec.NodeName)
logFunc("%s : STARTLOG\n%s\nENDLOG for container %v:%v:%v", containerNameSubstr, logs, pod.Namespace, pod.Name, container.Name)
}
}
}
// LogFailedContainers runs `kubectl logs` on a failed containers.
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
podList, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
logFunc("Error getting pods in namespace '%s': %v", ns, err)
return
}
logFunc("Running kubectl logs on non-ready containers in %v", ns)
for _, pod := range podList.Items {
if res, err := testutils.PodRunningReady(&pod); !res || err != nil {
kubectlLogPod(c, pod, "", Logf)
}
}
}
// DeleteNamespaces deletes all namespaces that match the given delete and skip filters. // DeleteNamespaces deletes all namespaces that match the given delete and skip filters.
// Filter is by simple strings.Contains; first skip filter, then delete filter. // Filter is by simple strings.Contains; first skip filter, then delete filter.
// Returns the list of deleted namespaces or an error. // Returns the list of deleted namespaces or an error.

View File

@ -69,6 +69,7 @@ go_library(
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/framework/service:go_default_library", "//test/e2e/framework/service:go_default_library",
"//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/ssh:go_default_library",
"//test/e2e/network/scale:go_default_library", "//test/e2e/network/scale:go_default_library",

View File

@ -34,6 +34,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -160,9 +161,9 @@ var _ = SIGDescribe("Proxy", func() {
Labels: labels, Labels: labels,
CreatedPods: &pods, CreatedPods: &pods,
} }
err = framework.RunRC(cfg) err = e2erc.RunRC(cfg)
framework.ExpectNoError(err) framework.ExpectNoError(err)
defer framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, cfg.Name) defer e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, cfg.Name)
err = waitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name) err = waitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -43,6 +43,7 @@ import (
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
"k8s.io/kubernetes/test/e2e/framework/providers/gce" "k8s.io/kubernetes/test/e2e/framework/providers/gce"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2eservice "k8s.io/kubernetes/test/e2e/framework/service" e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -1417,7 +1418,7 @@ var _ = SIGDescribe("Services", func() {
PublishNotReadyAddresses: true, PublishNotReadyAddresses: true,
}, },
} }
rcSpec := framework.RcByNameContainer(t.Name, 1, t.Image, t.Labels, v1.Container{ rcSpec := e2erc.ByNameContainer(t.Name, 1, t.Image, t.Labels, v1.Container{
Args: []string{"netexec", fmt.Sprintf("--http-port=%d", port)}, Args: []string{"netexec", fmt.Sprintf("--http-port=%d", port)},
Name: t.Name, Name: t.Name,
Image: t.Image, Image: t.Image,
@ -1470,7 +1471,7 @@ var _ = SIGDescribe("Services", func() {
} }
ginkgo.By("Scaling down replication controller to zero") ginkgo.By("Scaling down replication controller to zero")
framework.ScaleRC(f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false) e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, t.Namespace, rcSpec.Name, 0, false)
ginkgo.By("Update service to not tolerate unready services") ginkgo.By("Update service to not tolerate unready services")
_, err = e2eservice.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) { _, err = e2eservice.UpdateService(f.ClientSet, t.Namespace, t.ServiceName, func(s *v1.Service) {
@ -2254,7 +2255,7 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
err := e2eservice.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, e2eservice.KubeProxyEndpointLagTimeout, expectedSuccess, threshold) err := e2eservice.TestHTTPHealthCheckNodePort(publicIP, healthCheckNodePort, path, e2eservice.KubeProxyEndpointLagTimeout, expectedSuccess, threshold)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName)) framework.ExpectNoError(e2erc.DeleteRCAndWaitForGC(f.ClientSet, namespace, serviceName))
} }
}) })

View File

@ -31,6 +31,7 @@ import (
"k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/flowcontrol" "k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -138,7 +139,7 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int, acceptab
Replicas: 1, Replicas: 1,
PollInterval: time.Second, PollInterval: time.Second,
} }
if err := framework.RunRC(cfg); err != nil { if err := e2erc.RunRC(cfg); err != nil {
return nil, err return nil, err
} }

View File

@ -40,10 +40,12 @@ go_library(
"//staging/src/k8s.io/client-go/kubernetes:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/job:go_default_library", "//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/e2e/framework/kubelet:go_default_library", "//test/e2e/framework/kubelet:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/perf:go_default_library", "//test/e2e/framework/perf:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/framework/security:go_default_library", "//test/e2e/framework/security:go_default_library",
"//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/volume:go_default_library", "//test/e2e/framework/volume:go_default_library",

View File

@ -18,6 +18,7 @@ package node
import ( import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2esecurity "k8s.io/kubernetes/test/e2e/framework/security" e2esecurity "k8s.io/kubernetes/test/e2e/framework/security"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -35,7 +36,7 @@ var _ = SIGDescribe("AppArmor", func() {
if !ginkgo.CurrentGinkgoTestDescription().Failed { if !ginkgo.CurrentGinkgoTestDescription().Failed {
return return
} }
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
}) })
ginkgo.It("should enforce an AppArmor profile", func() { ginkgo.It("should enforce an AppArmor profile", func() {

View File

@ -32,6 +32,7 @@ import (
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
e2essh "k8s.io/kubernetes/test/e2e/framework/ssh" e2essh "k8s.io/kubernetes/test/e2e/framework/ssh"
"k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/framework/volume"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -340,7 +341,7 @@ var _ = SIGDescribe("kubelet", func() {
ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods)) ginkgo.By(fmt.Sprintf("Creating a RC of %d pods and wait until all pods of this RC are running", totalPods))
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID())) rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
err := framework.RunRC(testutils.RCConfig{ err := e2erc.RunRC(testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: rcName, Name: rcName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
@ -351,7 +352,7 @@ var _ = SIGDescribe("kubelet", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
// Perform a sanity check so that we know all desired pods are // Perform a sanity check so that we know all desired pods are
// running on the nodes according to kubelet. The timeout is set to // running on the nodes according to kubelet. The timeout is set to
// only 30 seconds here because framework.RunRC already waited for all pods to // only 30 seconds here because e2erc.RunRC already waited for all pods to
// transition to the running status. // transition to the running status.
err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods, time.Second*30) err = waitTillNPodsRunningOnNodes(f.ClientSet, nodeNames, rcName, ns, totalPods, time.Second*30)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -360,7 +361,7 @@ var _ = SIGDescribe("kubelet", func() {
} }
ginkgo.By("Deleting the RC") ginkgo.By("Deleting the RC")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
// Check that the pods really are gone by querying /runningpods on the // Check that the pods really are gone by querying /runningpods on the
// node. The /runningpods handler checks the container runtime (or its // node. The /runningpods handler checks the container runtime (or its
// cache) and returns a list of running pods. Some possible causes of // cache) and returns a list of running pods. Some possible causes of

View File

@ -29,6 +29,7 @@ import (
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eperf "k8s.io/kubernetes/test/e2e/framework/perf" e2eperf "k8s.io/kubernetes/test/e2e/framework/perf"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
"k8s.io/kubernetes/test/e2e/perftype" "k8s.io/kubernetes/test/e2e/perftype"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -70,7 +71,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID())) rcName := fmt.Sprintf("resource%d-%s", totalPods, string(uuid.NewUUID()))
// TODO: Use a more realistic workload // TODO: Use a more realistic workload
err := framework.RunRC(testutils.RCConfig{ err := e2erc.RunRC(testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: rcName, Name: rcName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
@ -118,7 +119,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
verifyCPULimits(expectedCPU, cpuSummary) verifyCPULimits(expectedCPU, cpuSummary)
ginkgo.By("Deleting the RC") ginkgo.By("Deleting the RC")
framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
} }
func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsagePerContainer, actual e2ekubelet.ResourceUsagePerNode) { func verifyMemoryLimits(c clientset.Interface, expected e2ekubelet.ResourceUsagePerContainer, actual e2ekubelet.ResourceUsagePerNode) {

View File

@ -47,6 +47,7 @@ go_library(
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/framework/pv:go_default_library", "//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/framework/replicaset:go_default_library", "//test/e2e/framework/replicaset:go_default_library",
"//test/e2e/framework/service:go_default_library", "//test/e2e/framework/service:go_default_library",
"//test/utils:go_default_library", "//test/utils:go_default_library",

View File

@ -32,6 +32,7 @@ import (
e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet" e2ekubelet "k8s.io/kubernetes/test/e2e/framework/kubelet"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
k8utilnet "k8s.io/utils/net" k8utilnet "k8s.io/utils/net"
@ -78,7 +79,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{}) rc, err := cs.CoreV1().ReplicationControllers(ns).Get(RCName, metav1.GetOptions{})
if err == nil && *(rc.Spec.Replicas) != 0 { if err == nil && *(rc.Spec.Replicas) != 0 {
ginkgo.By("Cleaning up the replication controller") ginkgo.By("Cleaning up the replication controller")
err := framework.DeleteRCAndWaitForGC(f.ClientSet, ns, RCName) err := e2erc.DeleteRCAndWaitForGC(f.ClientSet, ns, RCName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
}) })
@ -783,7 +784,7 @@ func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectR
Replicas: replicas, Replicas: replicas,
HostPorts: map[string]int{"port1": 4321}, HostPorts: map[string]int{"port1": 4321},
} }
err := framework.RunRC(*config) err := e2erc.RunRC(*config)
if expectRunning { if expectRunning {
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
@ -803,7 +804,7 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
HostPorts: map[string]int{"port1": 4321}, HostPorts: map[string]int{"port1": 4321},
NodeSelector: nodeSelector, NodeSelector: nodeSelector,
} }
err := framework.RunRC(*config) err := e2erc.RunRC(*config)
if expectRunning { if expectRunning {
return err return err
} }

View File

@ -40,6 +40,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -248,7 +249,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
// Cleanup the replication controller when we are done. // Cleanup the replication controller when we are done.
defer func() { defer func() {
// Resize the replication controller to zero to get rid of pods. // Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rc.Name); err != nil { if err := e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rc.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err) framework.Logf("Failed to cleanup replication controller %v: %v.", rc.Name, err)
} }
}() }()
@ -290,7 +291,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
ginkgo.By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1)) ginkgo.By(fmt.Sprintf("Scale the RC: %s to len(nodeList.Item)-1 : %v.", rc.Name, len(nodeList.Items)-1))
framework.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true) e2erc.ScaleRC(f.ClientSet, f.ScalesGetter, ns, rc.Name, uint(len(nodeList.Items)-1), true)
testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{ testPods, err := cs.CoreV1().Pods(ns).List(metav1.ListOptions{
LabelSelector: "name=scheduler-priority-avoid-pod", LabelSelector: "name=scheduler-priority-avoid-pod",
}) })

View File

@ -30,6 +30,7 @@ import (
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
) )
@ -209,7 +210,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string, ar
// Cleanup the replication controller when we are done. // Cleanup the replication controller when we are done.
defer func() { defer func() {
// Resize the replication controller to zero to get rid of pods. // Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, controller.Name); err != nil { if err := e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
} }
}() }()

View File

@ -71,6 +71,7 @@ go_library(
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",
"//test/e2e/framework/providers/gce:go_default_library", "//test/e2e/framework/providers/gce:go_default_library",
"//test/e2e/framework/pv:go_default_library", "//test/e2e/framework/pv:go_default_library",
"//test/e2e/framework/rc:go_default_library",
"//test/e2e/framework/ssh:go_default_library", "//test/e2e/framework/ssh:go_default_library",
"//test/e2e/framework/statefulset:go_default_library", "//test/e2e/framework/statefulset:go_default_library",
"//test/e2e/framework/testfiles:go_default_library", "//test/e2e/framework/testfiles:go_default_library",

View File

@ -27,6 +27,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod" e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
"k8s.io/kubernetes/test/e2e/storage/utils" "k8s.io/kubernetes/test/e2e/storage/utils"
imageutils "k8s.io/kubernetes/test/utils/image" imageutils "k8s.io/kubernetes/test/utils/image"
@ -400,7 +401,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volume
framework.ExpectNoError(err, "error creating replication controller") framework.ExpectNoError(err, "error creating replication controller")
defer func() { defer func() {
err := framework.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName) err := e2erc.DeleteRCAndWaitForGC(f.ClientSet, f.Namespace.Name, rcName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() }()

View File

@ -37,6 +37,7 @@ go_library(
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/autoscaling:go_default_library", "//test/e2e/framework/autoscaling:go_default_library",
"//test/e2e/framework/job:go_default_library", "//test/e2e/framework/job:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/security:go_default_library", "//test/e2e/framework/security:go_default_library",
"//test/e2e/framework/service:go_default_library", "//test/e2e/framework/service:go_default_library",

View File

@ -20,6 +20,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2esecurity "k8s.io/kubernetes/test/e2e/framework/security" e2esecurity "k8s.io/kubernetes/test/e2e/framework/security"
"github.com/onsi/ginkgo" "github.com/onsi/ginkgo"
@ -79,7 +80,7 @@ func (t *AppArmorUpgradeTest) Test(f *framework.Framework, done <-chan struct{},
func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) { func (t *AppArmorUpgradeTest) Teardown(f *framework.Framework) {
// rely on the namespace deletion to clean up everything // rely on the namespace deletion to clean up everything
ginkgo.By("Logging container failures") ginkgo.By("Logging container failures")
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
} }
func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) { func (t *AppArmorUpgradeTest) verifyPodStillUp(f *framework.Framework) {

View File

@ -189,6 +189,7 @@ go_test(
"//test/e2e/framework:go_default_library", "//test/e2e/framework:go_default_library",
"//test/e2e/framework/deviceplugin:go_default_library", "//test/e2e/framework/deviceplugin:go_default_library",
"//test/e2e/framework/gpu:go_default_library", "//test/e2e/framework/gpu:go_default_library",
"//test/e2e/framework/kubectl:go_default_library",
"//test/e2e/framework/metrics:go_default_library", "//test/e2e/framework/metrics:go_default_library",
"//test/e2e/framework/node:go_default_library", "//test/e2e/framework/node:go_default_library",
"//test/e2e/framework/pod:go_default_library", "//test/e2e/framework/pod:go_default_library",

View File

@ -23,6 +23,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletresourcemetricsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1" kubeletresourcemetricsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/resourcemetrics/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
"k8s.io/kubernetes/test/e2e/framework/metrics" "k8s.io/kubernetes/test/e2e/framework/metrics"
"k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/framework/volume"
@ -101,7 +102,7 @@ var _ = framework.KubeDescribe("ResourceMetricsAPI", func() {
return return
} }
if framework.TestContext.DumpLogsOnFailure { if framework.TestContext.DumpLogsOnFailure {
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
} }
ginkgo.By("Recording processes in system cgroups") ginkgo.By("Recording processes in system cgroups")
recordSystemCgroupProcesses() recordSystemCgroupProcesses()

View File

@ -28,6 +28,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" kubeletstatsv1alpha1 "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
"k8s.io/kubernetes/test/e2e/framework/volume" "k8s.io/kubernetes/test/e2e/framework/volume"
systemdutil "github.com/coreos/go-systemd/util" systemdutil "github.com/coreos/go-systemd/util"
@ -45,7 +46,7 @@ var _ = framework.KubeDescribe("Summary API [NodeConformance]", func() {
return return
} }
if framework.TestContext.DumpLogsOnFailure { if framework.TestContext.DumpLogsOnFailure {
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
} }
ginkgo.By("Recording processes in system cgroups") ginkgo.By("Recording processes in system cgroups")
recordSystemCgroupProcesses() recordSystemCgroupProcesses()