Merge pull request #95777 from alculquicondor/default-pod-spread-integration

Add integration test for Default PodTopologySpread
This commit is contained in:
Kubernetes Prow Robot 2020-10-23 00:12:07 -07:00 committed by GitHub
commit 0c00f7b0f6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

View File

@ -25,6 +25,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait" "k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/scheduler" "k8s.io/kubernetes/pkg/scheduler"
schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
@ -254,8 +255,8 @@ func makeContainersWithImages(images []string) []v1.Container {
return containers return containers
} }
// TestEvenPodsSpreadPriority verifies that EvenPodsSpread priority functions well. // TestPodTopologySpreadScore verifies that the PodTopologySpread Score plugin works.
func TestEvenPodsSpreadPriority(t *testing.T) { func TestPodTopologySpreadScore(t *testing.T) {
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name) testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
defer testutils.CleanupTest(t, testCtx) defer testutils.CleanupTest(t, testCtx)
cs := testCtx.ClientSet cs := testCtx.ClientSet
@ -361,3 +362,96 @@ func TestEvenPodsSpreadPriority(t *testing.T) {
}) })
} }
} }
// TestDefaultPodTopologySpreadScore verifies that the PodTopologySpread Score plugin
// with the system default spreading spreads Pods belonging to a Service.
// The setup has 300 nodes over 3 zones.
func TestDefaultPodTopologySpreadScore(t *testing.T) {
testCtx := initTestSchedulerForPriorityTest(t, podtopologyspread.Name)
t.Cleanup(func() {
testutils.CleanupTest(t, testCtx)
})
cs := testCtx.ClientSet
ns := testCtx.NS.Name
zoneForNode := make(map[string]string)
for i := 0; i < 300; i++ {
nodeName := fmt.Sprintf("node-%d", i)
zone := fmt.Sprintf("zone-%d", i%3)
zoneForNode[nodeName] = zone
_, err := createNode(cs, st.MakeNode().Name(nodeName).Label(v1.LabelHostname, nodeName).Label(v1.LabelZoneFailureDomainStable, zone).Obj())
if err != nil {
t.Fatalf("Cannot create node: %v", err)
}
}
serviceName := "test-service"
svc := &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: serviceName,
Namespace: ns,
},
Spec: v1.ServiceSpec{
Selector: map[string]string{
"service": serviceName,
},
Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
},
}
_, err := cs.CoreV1().Services(ns).Create(testCtx.Ctx, svc, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Cannot create Service: %v", err)
}
pause := imageutils.GetPauseImageName()
totalPodCnt := 0
for _, nPods := range []int{3, 9, 15} {
// Append nPods each iteration.
t.Run(fmt.Sprintf("%d-pods", totalPodCnt+nPods), func(t *testing.T) {
for i := 0; i < nPods; i++ {
p := st.MakePod().Name(fmt.Sprintf("p-%d", totalPodCnt)).Label("service", serviceName).Container(pause).Obj()
_, err = cs.CoreV1().Pods(ns).Create(testCtx.Ctx, p, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Cannot create Pod: %v", err)
}
totalPodCnt++
}
var pods []v1.Pod
// Wait for all Pods scheduled.
err = wait.Poll(pollInterval, wait.ForeverTestTimeout, func() (bool, error) {
podList, err := cs.CoreV1().Pods(ns).List(testCtx.Ctx, metav1.ListOptions{})
if err != nil {
t.Fatalf("Cannot list pods to verify scheduling: %v", err)
}
for _, p := range podList.Items {
if p.Spec.NodeName == "" {
return false, nil
}
}
pods = podList.Items
return true, nil
})
// Verify zone spreading.
zoneCnts := make(map[string]int)
for _, p := range pods {
zoneCnts[zoneForNode[p.Spec.NodeName]]++
}
maxCnt := 0
minCnt := len(pods)
for _, c := range zoneCnts {
if c > maxCnt {
maxCnt = c
}
if c < minCnt {
minCnt = c
}
}
if skew := maxCnt - minCnt; skew != 0 {
t.Errorf("Zone skew is %d, should be 0", skew)
}
})
}
}