mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #67294 from tanshanshan/todo811
Automatic merge from submit-queue (batch tested with PRs 67294, 67320, 67335, 67334, 67325). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. use framework.GetClusterZones() instead of getZoneNames() **What this PR does / why we need it**: fix todo: use framework.GetClusterZones() instead of getZoneNames() **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes # **Special notes for your reviewer**: **Release note**: ```release-note NONE ```
This commit is contained in:
commit
dcc9d82374
@ -26,7 +26,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
"k8s.io/apimachinery/pkg/util/uuid"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||||
@ -110,9 +109,9 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
// Now make sure they're spread across zones
|
// Now make sure they're spread across zones
|
||||||
zoneNames, err := getZoneNames(f.ClientSet)
|
zoneNames, err := framework.GetClusterZones(f.ClientSet)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true))
|
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find the name of the zone in which a Node is running
|
// Find the name of the zone in which a Node is running
|
||||||
@ -126,25 +125,9 @@ func getZoneNameForNode(node v1.Node) (string, error) {
|
|||||||
node.Name, kubeletapis.LabelZoneFailureDomain)
|
node.Name, kubeletapis.LabelZoneFailureDomain)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO (verult) Merge with framework.GetClusterZones()
|
|
||||||
// Find the names of all zones in which we have nodes in this cluster.
|
|
||||||
func getZoneNames(c clientset.Interface) ([]string, error) {
|
|
||||||
zoneNames := sets.NewString()
|
|
||||||
nodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
for _, node := range nodes.Items {
|
|
||||||
zoneName, err := getZoneNameForNode(node)
|
|
||||||
Expect(err).NotTo(HaveOccurred())
|
|
||||||
zoneNames.Insert(zoneName)
|
|
||||||
}
|
|
||||||
return zoneNames.List(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return the number of zones in which we have nodes in this cluster.
|
// Return the number of zones in which we have nodes in this cluster.
|
||||||
func getZoneCount(c clientset.Interface) (int, error) {
|
func getZoneCount(c clientset.Interface) (int, error) {
|
||||||
zoneNames, err := getZoneNames(c)
|
zoneNames, err := framework.GetClusterZones(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
@ -239,7 +222,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
|
|||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
// Now make sure they're spread across zones
|
// Now make sure they're spread across zones
|
||||||
zoneNames, err := getZoneNames(f.ClientSet)
|
zoneNames, err := framework.GetClusterZones(f.ClientSet)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames)).To(Equal(true))
|
Expect(checkZoneSpreading(f.ClientSet, pods, zoneNames.List())).To(Equal(true))
|
||||||
}
|
}
|
||||||
|
@ -185,9 +185,9 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
|||||||
c := f.ClientSet
|
c := f.ClientSet
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
|
|
||||||
zones, err := getZoneNames(c)
|
zones, err := framework.GetClusterZones(c)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
zonelist := zones.List()
|
||||||
By("Creating static PVs across zones")
|
By("Creating static PVs across zones")
|
||||||
configs := make([]*staticPVTestConfig, podCount)
|
configs := make([]*staticPVTestConfig, podCount)
|
||||||
for i := range configs {
|
for i := range configs {
|
||||||
@ -208,7 +208,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
for i, config := range configs {
|
for i, config := range configs {
|
||||||
zone := zones[i%len(zones)]
|
zone := zonelist[i%len(zones)]
|
||||||
config.pvSource, err = framework.CreatePVSource(zone)
|
config.pvSource, err = framework.CreatePVSource(zone)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user