mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
[storage] [test] Ensure proper resource creation
Ensure resources are created in zone with schedulable nodes. For example, if we have 4 zones with 3 zones having worker nodes and 1 zone having master nodes(unscheduable for workloads), we should not create resources like PV, PVC or pods in that zone.
This commit is contained in:
parent
9c360b6185
commit
67bc23411b
@ -561,6 +561,31 @@ func GetClusterZones(c clientset.Interface) (sets.String, error) {
|
|||||||
return zones, nil
|
return zones, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetSchedulableClusterZones returns the values of zone label collected from all nodes which are schedulable.
|
||||||
|
func GetSchedulableClusterZones(c clientset.Interface) (sets.String, error) {
|
||||||
|
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Error getting nodes while attempting to list cluster zones: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// collect values of zone label from all nodes
|
||||||
|
zones := sets.NewString()
|
||||||
|
for _, node := range nodes.Items {
|
||||||
|
// We should have at least 1 node in the zone which is schedulable.
|
||||||
|
if !IsNodeSchedulable(&node) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if zone, found := node.Labels[v1.LabelFailureDomainBetaZone]; found {
|
||||||
|
zones.Insert(zone)
|
||||||
|
}
|
||||||
|
|
||||||
|
if zone, found := node.Labels[v1.LabelTopologyZone]; found {
|
||||||
|
zones.Insert(zone)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return zones, nil
|
||||||
|
}
|
||||||
|
|
||||||
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
|
// CreatePodsPerNodeForSimpleApp creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
|
||||||
func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
|
func CreatePodsPerNodeForSimpleApp(c clientset.Interface, namespace, appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
|
||||||
nodes, err := GetBoundedReadySchedulableNodes(c, maxCount)
|
nodes, err := GetBoundedReadySchedulableNodes(c, maxCount)
|
||||||
|
@ -54,7 +54,7 @@ var _ = utils.SIGDescribe("Multi-AZ Cluster Volumes", func() {
|
|||||||
|
|
||||||
// Return the number of zones in which we have nodes in this cluster.
|
// Return the number of zones in which we have nodes in this cluster.
|
||||||
func getZoneCount(c clientset.Interface) (int, error) {
|
func getZoneCount(c clientset.Interface) (int, error) {
|
||||||
zoneNames, err := e2enode.GetClusterZones(c)
|
zoneNames, err := e2enode.GetSchedulableClusterZones(c)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return -1, err
|
return -1, err
|
||||||
}
|
}
|
||||||
@ -75,7 +75,7 @@ func PodsUseStaticPVsOrFail(f *framework.Framework, podCount int, image string)
|
|||||||
c := f.ClientSet
|
c := f.ClientSet
|
||||||
ns := f.Namespace.Name
|
ns := f.Namespace.Name
|
||||||
|
|
||||||
zones, err := e2enode.GetClusterZones(c)
|
zones, err := e2enode.GetSchedulableClusterZones(c)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
zonelist := zones.List()
|
zonelist := zones.List()
|
||||||
ginkgo.By("Creating static PVs across zones")
|
ginkgo.By("Creating static PVs across zones")
|
||||||
|
Loading…
Reference in New Issue
Block a user