diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index 2834e3925b5..ba95b362928 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -26,7 +26,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" clientset "k8s.io/client-go/kubernetes" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" @@ -110,9 +109,9 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) Expect(err).NotTo(HaveOccurred()) // Now make sure they're spread across zones - zoneNames, err := getZoneNames(f.ClientSet) + zoneNames, err := framework.GetClusterZones(f.ClientSet) Expect(err).NotTo(HaveOccurred()) - Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true)) + Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true)) } // Find the name of the zone in which a Node is running @@ -126,25 +125,9 @@ func getZoneNameForNode(node v1.Node) (string, error) { node.Name, kubeletapis.LabelZoneFailureDomain) } -// TODO (verult) Merge with framework.GetClusterZones() -// Find the names of all zones in which we have nodes in this cluster. -func getZoneNames(c clientset.Interface) ([]string, error) { - zoneNames := sets.NewString() - nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) - if err != nil { - return nil, err - } - for _, node := range nodes.Items { - zoneName, err := getZoneNameForNode(node) - Expect(err).NotTo(HaveOccurred()) - zoneNames.Insert(zoneName) - } - return zoneNames.List(), nil -} - // Return the number of zones in which we have nodes in this cluster. func getZoneCount(c clientset.Interface) (int, error) { - zoneNames, err := getZoneNames(c) + zoneNames, err := framework.GetClusterZones(c) if err != nil { return -1, err } @@ -239,7 +222,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { Expect(err).NotTo(HaveOccurred()) // Now make sure they're spread across zones - zoneNames, err := getZoneNames(f.ClientSet) + zoneNames, err := framework.GetClusterZones(f.ClientSet) Expect(err).NotTo(HaveOccurred()) - Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true)) + Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true)) } diff --git a/test/e2e/scheduling/ubernetes_lite_volumes.go b/test/e2e/scheduling/ubernetes_lite_volumes.go index 7d7e95687e4..8db74c79688 100644 --- a/test/e2e/scheduling/ubernetes_lite_volumes.go +++ b/test/e2e/scheduling/ubernetes_lite_volumes.go @@ -185,9 +185,9 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) c := f.ClientSet ns := f.Namespace.Name - zones, err := getZoneNames(c) + zones, err := framework.GetClusterZones(c) Expect(err).NotTo(HaveOccurred()) - + zonelist := zones.List() By("Creating static PVs across zones") configs := make([]*staticPVTestConfig, podCount) for i := range configs { @@ -208,7 +208,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string) }() for i, config := range configs { - zone := zones[i%len(zones)] + zone := zonelist[i%len(zones)] config.pvSource, err = framework.CreatePVSource(zone) Expect(err).NotTo(HaveOccurred())