mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 18:31:15 +00:00
[e2e] Should spread Pods to schedulable cluster zones
Some Nodes like control plane ones should not be considered to spread Pods. Signed-off-by: Zhecheng Li <zhechengli@microsoft.com>
This commit is contained in:
parent
9a6725aa1d
commit
b8b18a0a05
@ -28,6 +28,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -46,17 +47,20 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
|
|||||||
var zoneCount int
|
var zoneCount int
|
||||||
var err error
|
var err error
|
||||||
var cleanUp func()
|
var cleanUp func()
|
||||||
|
var zoneNames sets.String
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
|
cs := f.ClientSet
|
||||||
|
|
||||||
if zoneCount <= 0 {
|
if zoneCount <= 0 {
|
||||||
zoneCount, err = getZoneCount(f.ClientSet)
|
zoneNames, err = e2enode.GetSchedulableClusterZones(cs)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
zoneCount = len(zoneNames)
|
||||||
}
|
}
|
||||||
ginkgo.By(fmt.Sprintf("Checking for multi-zone cluster. Zone count = %d", zoneCount))
|
ginkgo.By(fmt.Sprintf("Checking for multi-zone cluster. Schedulable zone count = %d", zoneCount))
|
||||||
msg := fmt.Sprintf("Zone count is %d, only run for multi-zone clusters, skipping test", zoneCount)
|
msg := fmt.Sprintf("Schedulable zone count is %d, only run for multi-zone clusters, skipping test", zoneCount)
|
||||||
e2eskipper.SkipUnlessAtLeast(zoneCount, 2, msg)
|
e2eskipper.SkipUnlessAtLeast(zoneCount, 2, msg)
|
||||||
// TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread
|
// TODO: SkipUnlessDefaultScheduler() // Non-default schedulers might not spread
|
||||||
|
|
||||||
cs := f.ClientSet
|
|
||||||
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||||
nodeList, err := e2enode.GetReadySchedulableNodes(cs)
|
nodeList, err := e2enode.GetReadySchedulableNodes(cs)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -71,17 +75,17 @@ var _ = SIGDescribe("Multi-AZ Clusters", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
ginkgo.It("should spread the pods of a service across zones [Serial]", func() {
|
ginkgo.It("should spread the pods of a service across zones [Serial]", func() {
|
||||||
SpreadServiceOrFail(f, 5*zoneCount, imageutils.GetPauseImageName())
|
SpreadServiceOrFail(f, 5*zoneCount, zoneNames, imageutils.GetPauseImageName())
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("should spread the pods of a replication controller across zones [Serial]", func() {
|
ginkgo.It("should spread the pods of a replication controller across zones [Serial]", func() {
|
||||||
SpreadRCOrFail(f, int32(5*zoneCount), framework.ServeHostnameImage, []string{"serve-hostname"})
|
SpreadRCOrFail(f, int32(5*zoneCount), zoneNames, framework.ServeHostnameImage, []string{"serve-hostname"})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
// SpreadServiceOrFail check that the pods comprising a service
|
// SpreadServiceOrFail check that the pods comprising a service
|
||||||
// get spread evenly across available zones
|
// get spread evenly across available zones
|
||||||
func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
|
func SpreadServiceOrFail(f *framework.Framework, replicaCount int, zoneNames sets.String, image string) {
|
||||||
// First create the service
|
// First create the service
|
||||||
serviceName := "test-service"
|
serviceName := "test-service"
|
||||||
serviceSpec := &v1.Service{
|
serviceSpec := &v1.Service{
|
||||||
@ -130,8 +134,6 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// Now make sure they're spread across zones
|
// Now make sure they're spread across zones
|
||||||
zoneNames, err := e2enode.GetClusterZones(f.ClientSet)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
checkZoneSpreading(f.ClientSet, pods, zoneNames.List())
|
checkZoneSpreading(f.ClientSet, pods, zoneNames.List())
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -146,15 +148,6 @@ func getZoneNameForNode(node v1.Node) (string, error) {
|
|||||||
node.Name, v1.LabelFailureDomainBetaZone, v1.LabelTopologyZone)
|
node.Name, v1.LabelFailureDomainBetaZone, v1.LabelTopologyZone)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return the number of zones in which we have nodes in this cluster.
|
|
||||||
func getZoneCount(c clientset.Interface) (int, error) {
|
|
||||||
zoneNames, err := e2enode.GetClusterZones(c)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
return len(zoneNames), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find the name of the zone in which the pod is scheduled
|
// Find the name of the zone in which the pod is scheduled
|
||||||
func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) {
|
func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) {
|
||||||
ginkgo.By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName))
|
ginkgo.By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName))
|
||||||
@ -195,7 +188,7 @@ func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []str
|
|||||||
|
|
||||||
// SpreadRCOrFail Check that the pods comprising a replication
|
// SpreadRCOrFail Check that the pods comprising a replication
|
||||||
// controller get spread evenly across available zones
|
// controller get spread evenly across available zones
|
||||||
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string, args []string) {
|
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, zoneNames sets.String, image string, args []string) {
|
||||||
name := "ubelite-spread-rc-" + string(uuid.NewUUID())
|
name := "ubelite-spread-rc-" + string(uuid.NewUUID())
|
||||||
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
|
ginkgo.By(fmt.Sprintf("Creating replication controller %s", name))
|
||||||
controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), &v1.ReplicationController{
|
controller, err := f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(context.TODO(), &v1.ReplicationController{
|
||||||
@ -244,7 +237,5 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string, ar
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
// Now make sure they're spread across zones
|
// Now make sure they're spread across zones
|
||||||
zoneNames, err := e2enode.GetClusterZones(f.ClientSet)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
checkZoneSpreading(f.ClientSet, pods, zoneNames.List())
|
checkZoneSpreading(f.ClientSet, pods, zoneNames.List())
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user