Use worker nodes for WaitForStableCluster()

WaitForStableCluster() checks all pods run on worker nodes, and the
function used to refer master nodes to skip checking controller plane
pods.
GetMasterAndWorkerNodes() was used for getting master nodes, but the
implementation is not good because it usesDeprecatedMightBeMasterNode().

This makes WaitForStableCluster() refer worker nodes directly to avoid
using GetMasterAndWorkerNodes().
This commit is contained in:
Kenichi Omichi 2020-06-24 01:15:48 +00:00
parent e2d8f6c278
commit 5edf15ea97
6 changed files with 27 additions and 26 deletions

View File

@ -363,8 +363,9 @@ func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, e
return nodes, nil
}
// GetMasterAndWorkerNodes will return a list masters and schedulable worker nodes
func GetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
// DeprecatedGetMasterAndWorkerNodes will return a list masters and schedulable worker nodes
// NOTE: This function has been deprecated because of calling DeprecatedMightBeMasterNode().
func DeprecatedGetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
nodes := &v1.NodeList{}
masters := sets.NewString()
all, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})

View File

@ -41,10 +41,10 @@ func SIGDescribe(text string, body func()) bool {
}
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
func WaitForStableCluster(c clientset.Interface, workerNodes sets.String) int {
startTime := time.Now()
// Wait for all pods to be scheduled.
allScheduledPods, allNotScheduledPods := getScheduledAndUnscheduledPods(c, masterNodes, metav1.NamespaceAll)
allScheduledPods, allNotScheduledPods := getScheduledAndUnscheduledPods(c, workerNodes, metav1.NamespaceAll)
for len(allNotScheduledPods) != 0 {
time.Sleep(waitTime)
if startTime.Add(timeout).Before(time.Now()) {
@ -55,7 +55,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
framework.Failf("Timed out after %v waiting for stable cluster.", timeout)
break
}
allScheduledPods, allNotScheduledPods = getScheduledAndUnscheduledPods(c, masterNodes, metav1.NamespaceAll)
allScheduledPods, allNotScheduledPods = getScheduledAndUnscheduledPods(c, workerNodes, metav1.NamespaceAll)
}
return len(allScheduledPods)
}
@ -79,7 +79,7 @@ func WaitForPodsToBeDeleted(c clientset.Interface) {
}
// getScheduledAndUnscheduledPods lists scheduled and not scheduled pods in the given namespace, with succeeded and failed pods filtered out.
func getScheduledAndUnscheduledPods(c clientset.Interface, masterNodes sets.String, ns string) (scheduledPods, notScheduledPods []v1.Pod) {
func getScheduledAndUnscheduledPods(c clientset.Interface, workerNodes sets.String, ns string) (scheduledPods, notScheduledPods []v1.Pod) {
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err, fmt.Sprintf("listing all pods in namespace %q while waiting for stable cluster", ns))
// API server returns also Pods that succeeded. We need to filter them out.
@ -90,7 +90,7 @@ func getScheduledAndUnscheduledPods(c clientset.Interface, masterNodes sets.Stri
}
}
pods.Items = filteredPods
return GetPodsScheduled(masterNodes, pods)
return GetPodsScheduled(workerNodes, pods)
}
// getDeletingPods returns whether there are any pods marked for deletion.

View File

@ -53,7 +53,7 @@ const (
var localStorageVersion = utilversion.MustParseSemantic("v1.8.0-beta.0")
// variable set in BeforeEach, never modified afterwards
var masterNodes sets.String
var workerNodes sets.String
type pausePodConfig struct {
Name string
@ -95,17 +95,14 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
framework.AllNodesReady(cs, time.Minute)
// NOTE: Here doesn't get nodeList for supporting a master nodes which can host workload pods.
masterNodes, _, err = e2enode.GetMasterAndWorkerNodes(cs)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}
framework.ExpectNoErrorWithOffset(0, err)
for _, n := range nodeList.Items {
workerNodes.Insert(n.Name)
}
err = framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)
@ -135,7 +132,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
nodeMaxAllocatable = allocatable.Value()
}
}
WaitForStableCluster(cs, masterNodes)
WaitForStableCluster(cs, workerNodes)
pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
@ -215,7 +212,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
var beardsecond v1.ResourceName = "example.com/beardsecond"
ginkgo.BeforeEach(func() {
WaitForStableCluster(cs, masterNodes)
WaitForStableCluster(cs, workerNodes)
ginkgo.By("Add RuntimeClass and fake resource")
// find a node which can run a pod:
@ -323,7 +320,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
Description: Scheduling Pods MUST fail if the resource requests exceed Machine capacity.
*/
framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() {
WaitForStableCluster(cs, masterNodes)
WaitForStableCluster(cs, workerNodes)
nodeMaxAllocatable := int64(0)
nodeToAllocatableMap := make(map[string]int64)
for _, node := range nodeList.Items {
@ -436,7 +433,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod"
WaitForStableCluster(cs, masterNodes)
WaitForStableCluster(cs, workerNodes)
conf := pausePodConfig{
Name: podName,
@ -491,7 +488,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
podName := "restricted-pod"
WaitForStableCluster(cs, masterNodes)
WaitForStableCluster(cs, workerNodes)
conf := pausePodConfig{
Name: podName,
@ -933,7 +930,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action Action, ns, podN
func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
allPods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
framework.ExpectNoError(err)
scheduledPods, notScheduledPods := GetPodsScheduled(masterNodes, allPods)
scheduledPods, notScheduledPods := GetPodsScheduled(workerNodes, allPods)
framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))
framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
@ -1045,10 +1042,10 @@ func translateIPv4ToIPv6(ip string) string {
return ip
}
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods on worker nodes.
func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) {
if workerNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
framework.ExpectEqual(scheduledCondition != nil, true)

View File

@ -97,11 +97,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
}
e2enode.WaitForTotalHealthy(cs, time.Minute)
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}
framework.ExpectNoErrorWithOffset(0, err)
for _, n := range nodeList.Items {
workerNodes.Insert(n.Name)
}
err = framework.CheckTestingNSDeletedExcept(cs, ns)
framework.ExpectNoError(err)

View File

@ -144,7 +144,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
var err error
e2enode.WaitForTotalHealthy(cs, time.Minute)
_, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
if err != nil {
framework.Logf("Unexpected error occurred: %v", err)
}

View File

@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
scParameters = make(map[string]string)
_, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
framework.ExpectNoError(err)
masternodes, _, err := e2enode.GetMasterAndWorkerNodes(client)
masternodes, _, err := e2enode.DeprecatedGetMasterAndWorkerNodes(client)
framework.ExpectNoError(err)
gomega.Expect(masternodes).NotTo(gomega.BeEmpty())
masterNode = masternodes.List()[0]