Merge pull request #67294 from tanshanshan/todo811

Automatic merge from submit-queue (batch tested with PRs 67294, 67320, 67335, 67334, 67325). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

use framework.GetClusterZones() instead of getZoneNames()

**What this PR does / why we need it**:
fix todo: 
use framework.GetClusterZones() instead of getZoneNames()
**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2018-08-15 00:07:09 -07:00 committed by GitHub
commit dcc9d82374
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 8 additions and 25 deletions

View File

@ -26,7 +26,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
@ -110,9 +109,9 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
Expect(err).NotTo(HaveOccurred())
// Now make sure they're spread across zones
zoneNames, err := getZoneNames(f.ClientSet)
zoneNames, err := framework.GetClusterZones(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true))
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true))
}
// Find the name of the zone in which a Node is running
@ -126,25 +125,9 @@ func getZoneNameForNode(node v1.Node) (string, error) {
node.Name, kubeletapis.LabelZoneFailureDomain)
}
// TODO (verult) Merge with framework.GetClusterZones()
// Find the names of all zones in which we have nodes in this cluster.
func getZoneNames(c clientset.Interface) ([]string, error) {
zoneNames := sets.NewString()
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
if err != nil {
return nil, err
}
for _, node := range nodes.Items {
zoneName, err := getZoneNameForNode(node)
Expect(err).NotTo(HaveOccurred())
zoneNames.Insert(zoneName)
}
return zoneNames.List(), nil
}
// Return the number of zones in which we have nodes in this cluster.
func getZoneCount(c clientset.Interface) (int, error) {
zoneNames, err := getZoneNames(c)
zoneNames, err := framework.GetClusterZones(c)
if err != nil {
return -1, err
}
@ -239,7 +222,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
Expect(err).NotTo(HaveOccurred())
// Now make sure they're spread across zones
zoneNames, err := getZoneNames(f.ClientSet)
zoneNames, err := framework.GetClusterZones(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true))
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true))
}

View File

@ -185,9 +185,9 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
c := f.ClientSet
ns := f.Namespace.Name
zones, err := getZoneNames(c)
zones, err := framework.GetClusterZones(c)
Expect(err).NotTo(HaveOccurred())
zonelist := zones.List()
By("Creating static PVs across zones")
configs := make([]*staticPVTestConfig, podCount)
for i := range configs {
@ -208,7 +208,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
}()
for i, config := range configs {
zone := zones[i%len(zones)]
zone := zonelist[i%len(zones)]
config.pvSource, err = framework.CreatePVSource(zone)
Expect(err).NotTo(HaveOccurred())