From 67bc23411b60b9f21fc43e2d57d4d46df3bcace7 Mon Sep 17 00:00:00 2001 From: ravisantoshgudimetla Date: Wed, 30 Jun 2021 17:08:47 -0400 Subject: [PATCH] [storage] [test] Ensure proper resource creation Ensure resources are created in zone with schedulable nodes. For example, if we have 4 zones with 3 zones having worker nodes and 1 zone having master nodes(unscheduable for workloads), we should not create resources like PV, PVC or pods in that zone. --- test/e2e/framework/node/resource.go | 25 ++++++++++++++++++++++ test/e2e/storage/ubernetes_lite_volumes.go | 4 ++-- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/test/e2e/framework/node/resource.go b/test/e2e/framework/node/resource.go index db6ec61adce..93bbaae48dc 100644 --- a/test/e2e/framework/node/resource.go +++ b/test/e2e/framework/node/resource.go @@ -561,6 +561,31 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) { return zones, nil } +// GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable. +func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) { + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err) + } + + // collect values of zone label from all nodes + zones := sets.NewString() + for _, node := range nodes.Items { + // We should have at least 1 node in the zone which is schedulable. + if !IsNodeSchedulable(&node) { + continue + } + if zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found { + zones.Insert(zone) + } + + if zone, found := node.Labels[v1.LabelTopologyZone]; found { + zones.Insert(zone) + } + } + return zones, nil +} + // CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking. func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string { nodes, err := GetBoundedReadySchedulableNodes(c, maxCount) diff --git a/test/e2e/storage/ubernetes_lite_volumes.go b/test/e2e/storage/ubernetes_lite_volumes.go index 5aa27c7850d..70e642946e2 100644 --- a/test/e2e/storage/ubernetes_lite_volumes.go +++ b/test/e2e/storage/ubernetes_lite_volumes.go @@ -54,7 +54,7 @@ var _ = utils.SIGDescribe("Multi-AZ Cluster Volumes", func() { // Return the number of zones in which we have nodes in this cluster. func getZoneCount(c clientset.Interface) (int, error) { - zoneNames, err := e2enode.GetClusterZones(c) + zoneNames, err := e2enode.GetSchedulableClusterZones(c) if err != nil { return -1, err } @@ -75,7 +75,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) c := f.ClientSet ns := f.Namespace.Name - zones, err := e2enode.GetClusterZones(c) + zones, err := e2enode.GetSchedulableClusterZones(c) framework.ExpectNoError(err) zonelist := zones.List() ginkgo.By("Creating static PVs across zones")