Initial breakout of scheduling e2es to help assist in both assignment

and refactoring.
This commit is contained in:
Timothy St. Clair 2017-03-12 16:49:33 -05:00
parent 5e29e1ee05
commit 6cc40678b6
13 changed files with 150 additions and 80 deletions

View File

@ -1,4 +1,14 @@
aliases:
sig-scheduling-maintainers:
- davidopp
- timothysc
- wojtek-t
sig-scheduling:
- davidopp
- timothysc
- wojtek-t
- k82cn
- jayunit100
sig-cli-maintainers:
- adohe
- brendandburns

View File

@ -6,3 +6,5 @@ reviewers:
- davidopp
- timothysc
- wojtek-t
- k82cn
- jayunit100

View File

@ -66,7 +66,6 @@ go_library(
"networking.go",
"networking_perf.go",
"nodeoutofdisk.go",
"opaque_resource.go",
"pd.go",
"persistent_volumes.go",
"persistent_volumes-disruptive.go",
@ -82,11 +81,9 @@ go_library(
"rc.go",
"reboot.go",
"replica_set.go",
"rescheduler.go",
"resize_nodes.go",
"resource_quota.go",
"restart.go",
"scheduler_predicates.go",
"security_context.go",
"service.go",
"service_accounts.go",
@ -147,7 +144,6 @@ go_library(
"//pkg/util:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/logs:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//plugin/pkg/admission/serviceaccount:go_default_library",
@ -156,6 +152,7 @@ go_library(
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/scheduling:go_default_library",
"//test/e2e/upgrades:go_default_library",
"//test/e2e_federation:go_default_library",
"//test/images/net/nat:go_default_library",
@ -173,7 +170,6 @@ go_library(
"//vendor:github.com/onsi/ginkgo/config",
"//vendor:github.com/onsi/ginkgo/reporters",
"//vendor:github.com/onsi/gomega",
"//vendor:github.com/stretchr/testify/assert",
"//vendor:github.com/vmware/govmomi/find",
"//vendor:github.com/vmware/govmomi/vim25/types",
"//vendor:golang.org/x/crypto/ssh",
@ -230,6 +226,7 @@ go_test(
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/metrics:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/scheduling:go_default_library",
"//test/utils:go_default_library",
"//vendor:github.com/onsi/ginkgo",
"//vendor:github.com/onsi/gomega",
@ -267,6 +264,7 @@ filegroup(
"//test/e2e/framework:all-srcs",
"//test/e2e/generated:all-srcs",
"//test/e2e/perftype:all-srcs",
"//test/e2e/scheduling:all-srcs",
"//test/e2e/testing-manifests:all-srcs",
"//test/e2e/upgrades:all-srcs",
],

View File

@ -32,6 +32,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
@ -166,7 +167,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
})
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
CreateHostPortPods(f, "host-port", nodeCount+2, false)
scheduling.CreateHostPortPods(f, "host-port", nodeCount+2, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "host-port")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
@ -509,40 +510,6 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
}
}
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port"))
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
}
err := framework.RunRC(*config)
if expectRunning {
framework.ExpectNoError(err)
}
}
func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
request := int64(millicores / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas,
CpuRequest: request,
}
framework.ExpectNoError(framework.RunRC(*config))
}
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
request := int64(1024 * 1024 * megabytes / replicas)

View File

@ -20,6 +20,7 @@ import (
"testing"
"k8s.io/kubernetes/test/e2e/framework"
_ "k8s.io/kubernetes/test/e2e/scheduling"
)
func init() {

View File

@ -5426,3 +5426,24 @@ func DescribeIng(ns string) {
"describe", "ing", fmt.Sprintf("--namespace=%v", ns))
Logf(desc)
}
// NewTestPod returns a pod that has the specified requests and limits
func (f *Framework) NewTestPod(name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: GetPauseImageName(f.ClientSet),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
},
}
}

View File

@ -62,6 +62,7 @@ import (
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/generated"
"k8s.io/kubernetes/test/e2e/scheduling"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
@ -1416,7 +1417,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
Effect: v1.TaintEffectNoSchedule,
}
nodeName := getNodeThatCanRunPod(f)
nodeName := scheduling.GetNodeThatCanRunPod(f)
By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString())
@ -1447,7 +1448,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
Effect: v1.TaintEffectNoSchedule,
}
nodeName := getNodeThatCanRunPod(f)
nodeName := scheduling.GetNodeThatCanRunPod(f)
By("adding the taint " + testTaint.ToString() + " to a node")
runKubectlRetryOrDie("taint", "nodes", nodeName, testTaint.ToString())

View File

@ -54,7 +54,7 @@ var _ = framework.KubeDescribe("LimitRange", func() {
Expect(err).NotTo(HaveOccurred())
By("Creating a Pod with no resource requirements")
pod := newTestPod(f, "pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
pod := f.NewTestPod("pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
@ -71,7 +71,7 @@ var _ = framework.KubeDescribe("LimitRange", func() {
}
By("Creating a Pod with partial resource requirements")
pod = newTestPod(f, "pod-partial-resources", getResourceList("", "150Mi"), getResourceList("300m", ""))
pod = f.NewTestPod("pod-partial-resources", getResourceList("", "150Mi"), getResourceList("300m", ""))
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
@ -92,12 +92,12 @@ var _ = framework.KubeDescribe("LimitRange", func() {
}
By("Failing to create a Pod with less than min resources")
pod = newTestPod(f, podName, getResourceList("10m", "50Mi"), v1.ResourceList{})
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi"), v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
By("Failing to create a Pod with more than max resources")
pod = newTestPod(f, podName, getResourceList("600m", "600Mi"), v1.ResourceList{})
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi"), v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
})
@ -166,24 +166,3 @@ func newLimitRange(name string, limitType v1.LimitType,
},
}
}
// newTestPod returns a pod that has the specified requests and limits
func newTestPod(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "pause",
Image: framework.GetPauseImageName(f.ClientSet),
Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
},
},
},
}
}

49
test/e2e/scheduling/BUILD Normal file
View File

@ -0,0 +1,49 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"opaque_resource.go",
"predicates.go",
"rescheduler.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/util/system:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor:github.com/onsi/ginkgo",
"//vendor:github.com/onsi/gomega",
"//vendor:github.com/stretchr/testify/assert",
"//vendor:k8s.io/apimachinery/pkg/api/errors",
"//vendor:k8s.io/apimachinery/pkg/api/resource",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/apimachinery/pkg/types",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/apimachinery/pkg/util/uuid",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,4 @@
approvers:
- sig-scheduling-maintainers
reviewers:
- sig-scheduling

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
package scheduling
import (
"fmt"
@ -69,7 +69,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
By("Creating a vanilla pod")
requests := v1.ResourceList{v1.ResourceCPU: resource.MustParse("0.1")}
limits := v1.ResourceList{v1.ResourceCPU: resource.MustParse("0.2")}
pod := newTestPod(f, "without-oir", requests, limits)
pod := f.NewTestPod("without-oir", requests, limits)
By("Observing an event that indicates the pod was scheduled")
action := func() error {
@ -96,7 +96,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
v1.ResourceCPU: resource.MustParse("0.2"),
opaqueResName: resource.MustParse("2"),
}
pod := newTestPod(f, "min-oir", requests, limits)
pod := f.NewTestPod("min-oir", requests, limits)
By("Observing an event that indicates the pod was scheduled")
action := func() error {
@ -118,7 +118,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
By("Observing an event that indicates the pod was not scheduled")
action := func() error {
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(newTestPod(f, "over-max-oir", requests, limits))
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(f.NewTestPod("over-max-oir", requests, limits))
return err
}
predicate := scheduleFailure("over-max-oir")
@ -224,8 +224,8 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
v1.ResourceCPU: resource.MustParse("0.2"),
opaqueResName: resource.MustParse("3"),
}
pod1 := newTestPod(f, "oir-1", requests, limits)
pod2 := newTestPod(f, "oir-2", requests, limits)
pod1 := f.NewTestPod("oir-1", requests, limits)
pod2 := f.NewTestPod("oir-2", requests, limits)
By("Observing an event that indicates one pod was scheduled")
action := func() error {

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
package scheduling
import (
"fmt"
@ -262,7 +262,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
})
It("validates that NodeSelector is respected if matching [Conformance]", func() {
nodeName := getNodeThatCanRunPod(f)
nodeName := GetNodeThatCanRunPod(f)
By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
@ -336,7 +336,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// Keep the same steps with the test on NodeSelector,
// but specify Affinity in Pod.Annotations, instead of NodeSelector.
It("validates that required NodeAffinity setting is respected if matching", func() {
nodeName := getNodeThatCanRunPod(f)
nodeName := GetNodeThatCanRunPod(f)
By("Trying to apply a random label on the found node.")
k := fmt.Sprintf("kubernetes.io/e2e-%s", string(uuid.NewUUID()))
@ -760,7 +760,7 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
Affinity: conf.Affinity,
Containers: []v1.Container{
{
Name: podName,
Name: conf.Name,
Image: framework.GetPauseImageName(f.ClientSet),
},
},
@ -940,7 +940,7 @@ func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, strin
return pod.Spec.NodeName, pod.Name
}
func getNodeThatCanRunPod(f *framework.Framework) string {
func GetNodeThatCanRunPod(f *framework.Framework) string {
By("Trying to launch a pod without a label to get a node which can launch it.")
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-label"})
}
@ -949,3 +949,21 @@ func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string {
By("Trying to launch a pod without a toleration to get a node which can launch it.")
return runPodAndGetNodeName(f, pausePodConfig{Name: "without-toleration"})
}
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port"))
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
}
err := framework.RunRC(*config)
if expectRunning {
framework.ExpectNoError(err)
}
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
package scheduling
import (
"fmt"
@ -30,6 +30,10 @@ import (
. "github.com/onsi/gomega"
)
const (
defaultTimeout = 3 * time.Minute
)
// This test requires Rescheduler to be enabled.
var _ = framework.KubeDescribe("Rescheduler [Serial]", func() {
f := framework.NewDefaultFramework("rescheduler")
@ -74,7 +78,7 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
timeout := 5 * time.Minute
replicas := millicores / 100
ReserveCpu(f, id, 1, 100)
reserveCpu(f, id, 1, 100)
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.Namespace.Name, id, uint(replicas), false))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
@ -109,3 +113,19 @@ func podRunningOrUnschedulable(pod *v1.Pod) bool {
running, _ := testutils.PodRunningReady(pod)
return running
}
func reserveCpu(f *framework.Framework, id string, replicas, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
request := int64(millicores / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas,
CpuRequest: request,
}
framework.ExpectNoError(framework.RunRC(*config))
}