mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 05:27:21 +00:00
Merge pull request #92450 from oomichi/dont-use-GetMasterAnd
Use worker nodes for WaitForStableCluster()
This commit is contained in:
commit
c09809d15b
@ -363,8 +363,9 @@ func GetReadyNodesIncludingTainted(c clientset.Interface) (nodes *v1.NodeList, e
|
|||||||
return nodes, nil
|
return nodes, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetMasterAndWorkerNodes will return a list masters and schedulable worker nodes
|
// DeprecatedGetMasterAndWorkerNodes will return a list masters and schedulable worker nodes
|
||||||
func GetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
|
// NOTE: This function has been deprecated because of calling DeprecatedMightBeMasterNode().
|
||||||
|
func DeprecatedGetMasterAndWorkerNodes(c clientset.Interface) (sets.String, *v1.NodeList, error) {
|
||||||
nodes := &v1.NodeList{}
|
nodes := &v1.NodeList{}
|
||||||
masters := sets.NewString()
|
masters := sets.NewString()
|
||||||
all, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
all, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
|
@ -41,10 +41,10 @@ func SIGDescribe(text string, body func()) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
|
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
|
||||||
func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
|
func WaitForStableCluster(c clientset.Interface, workerNodes sets.String) int {
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
// Wait for all pods to be scheduled.
|
// Wait for all pods to be scheduled.
|
||||||
allScheduledPods, allNotScheduledPods := getScheduledAndUnscheduledPods(c, masterNodes, metav1.NamespaceAll)
|
allScheduledPods, allNotScheduledPods := getScheduledAndUnscheduledPods(c, workerNodes, metav1.NamespaceAll)
|
||||||
for len(allNotScheduledPods) != 0 {
|
for len(allNotScheduledPods) != 0 {
|
||||||
time.Sleep(waitTime)
|
time.Sleep(waitTime)
|
||||||
if startTime.Add(timeout).Before(time.Now()) {
|
if startTime.Add(timeout).Before(time.Now()) {
|
||||||
@ -55,7 +55,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
|
|||||||
framework.Failf("Timed out after %v waiting for stable cluster.", timeout)
|
framework.Failf("Timed out after %v waiting for stable cluster.", timeout)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
allScheduledPods, allNotScheduledPods = getScheduledAndUnscheduledPods(c, masterNodes, metav1.NamespaceAll)
|
allScheduledPods, allNotScheduledPods = getScheduledAndUnscheduledPods(c, workerNodes, metav1.NamespaceAll)
|
||||||
}
|
}
|
||||||
return len(allScheduledPods)
|
return len(allScheduledPods)
|
||||||
}
|
}
|
||||||
@ -79,7 +79,7 @@ func WaitForPodsToBeDeleted(c clientset.Interface) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getScheduledAndUnscheduledPods lists scheduled and not scheduled pods in the given namespace, with succeeded and failed pods filtered out.
|
// getScheduledAndUnscheduledPods lists scheduled and not scheduled pods in the given namespace, with succeeded and failed pods filtered out.
|
||||||
func getScheduledAndUnscheduledPods(c clientset.Interface, masterNodes sets.String, ns string) (scheduledPods, notScheduledPods []v1.Pod) {
|
func getScheduledAndUnscheduledPods(c clientset.Interface, workerNodes sets.String, ns string) (scheduledPods, notScheduledPods []v1.Pod) {
|
||||||
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
pods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||||
framework.ExpectNoError(err, fmt.Sprintf("listing all pods in namespace %q while waiting for stable cluster", ns))
|
framework.ExpectNoError(err, fmt.Sprintf("listing all pods in namespace %q while waiting for stable cluster", ns))
|
||||||
// API server returns also Pods that succeeded. We need to filter them out.
|
// API server returns also Pods that succeeded. We need to filter them out.
|
||||||
@ -90,7 +90,7 @@ func getScheduledAndUnscheduledPods(c clientset.Interface, masterNodes sets.Stri
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
pods.Items = filteredPods
|
pods.Items = filteredPods
|
||||||
return GetPodsScheduled(masterNodes, pods)
|
return GetPodsScheduled(workerNodes, pods)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getDeletingPods returns whether there are any pods marked for deletion.
|
// getDeletingPods returns whether there are any pods marked for deletion.
|
||||||
|
@ -53,7 +53,7 @@ const (
|
|||||||
var localStorageVersion = utilversion.MustParseSemantic("v1.8.0-beta.0")
|
var localStorageVersion = utilversion.MustParseSemantic("v1.8.0-beta.0")
|
||||||
|
|
||||||
// variable set in BeforeEach, never modified afterwards
|
// variable set in BeforeEach, never modified afterwards
|
||||||
var masterNodes sets.String
|
var workerNodes sets.String
|
||||||
|
|
||||||
type pausePodConfig struct {
|
type pausePodConfig struct {
|
||||||
Name string
|
Name string
|
||||||
@ -95,17 +95,14 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
|
|
||||||
framework.AllNodesReady(cs, time.Minute)
|
framework.AllNodesReady(cs, time.Minute)
|
||||||
|
|
||||||
// NOTE: Here doesn't get nodeList for supporting a master nodes which can host workload pods.
|
|
||||||
masterNodes, _, err = e2enode.GetMasterAndWorkerNodes(cs)
|
|
||||||
if err != nil {
|
|
||||||
framework.Logf("Unexpected error occurred: %v", err)
|
|
||||||
}
|
|
||||||
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
|
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Unexpected error occurred: %v", err)
|
framework.Logf("Unexpected error occurred: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
framework.ExpectNoErrorWithOffset(0, err)
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
|
for _, n := range nodeList.Items {
|
||||||
|
workerNodes.Insert(n.Name)
|
||||||
|
}
|
||||||
|
|
||||||
err = framework.CheckTestingNSDeletedExcept(cs, ns)
|
err = framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -135,7 +132,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
nodeMaxAllocatable = allocatable.Value()
|
nodeMaxAllocatable = allocatable.Value()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
WaitForStableCluster(cs, masterNodes)
|
WaitForStableCluster(cs, workerNodes)
|
||||||
|
|
||||||
pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
|
pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
@ -215,7 +212,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
var beardsecond v1.ResourceName = "example.com/beardsecond"
|
var beardsecond v1.ResourceName = "example.com/beardsecond"
|
||||||
|
|
||||||
ginkgo.BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
WaitForStableCluster(cs, masterNodes)
|
WaitForStableCluster(cs, workerNodes)
|
||||||
ginkgo.By("Add RuntimeClass and fake resource")
|
ginkgo.By("Add RuntimeClass and fake resource")
|
||||||
|
|
||||||
// find a node which can run a pod:
|
// find a node which can run a pod:
|
||||||
@ -323,7 +320,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
Description: Scheduling Pods MUST fail if the resource requests exceed Machine capacity.
|
Description: Scheduling Pods MUST fail if the resource requests exceed Machine capacity.
|
||||||
*/
|
*/
|
||||||
framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() {
|
framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() {
|
||||||
WaitForStableCluster(cs, masterNodes)
|
WaitForStableCluster(cs, workerNodes)
|
||||||
nodeMaxAllocatable := int64(0)
|
nodeMaxAllocatable := int64(0)
|
||||||
nodeToAllocatableMap := make(map[string]int64)
|
nodeToAllocatableMap := make(map[string]int64)
|
||||||
for _, node := range nodeList.Items {
|
for _, node := range nodeList.Items {
|
||||||
@ -436,7 +433,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
|
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
|
||||||
podName := "restricted-pod"
|
podName := "restricted-pod"
|
||||||
|
|
||||||
WaitForStableCluster(cs, masterNodes)
|
WaitForStableCluster(cs, workerNodes)
|
||||||
|
|
||||||
conf := pausePodConfig{
|
conf := pausePodConfig{
|
||||||
Name: podName,
|
Name: podName,
|
||||||
@ -491,7 +488,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
|||||||
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
|
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
|
||||||
podName := "restricted-pod"
|
podName := "restricted-pod"
|
||||||
|
|
||||||
WaitForStableCluster(cs, masterNodes)
|
WaitForStableCluster(cs, workerNodes)
|
||||||
|
|
||||||
conf := pausePodConfig{
|
conf := pausePodConfig{
|
||||||
Name: podName,
|
Name: podName,
|
||||||
@ -933,7 +930,7 @@ func WaitForSchedulerAfterAction(f *framework.Framework, action Action, ns, podN
|
|||||||
func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
|
func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
|
||||||
allPods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
allPods, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
scheduledPods, notScheduledPods := GetPodsScheduled(masterNodes, allPods)
|
scheduledPods, notScheduledPods := GetPodsScheduled(workerNodes, allPods)
|
||||||
|
|
||||||
framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))
|
framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))
|
||||||
framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
|
framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
|
||||||
@ -1045,10 +1042,10 @@ func translateIPv4ToIPv6(ip string) string {
|
|||||||
return ip
|
return ip
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
|
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods on worker nodes.
|
||||||
func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
|
func GetPodsScheduled(workerNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
|
||||||
for _, pod := range pods.Items {
|
for _, pod := range pods.Items {
|
||||||
if !masterNodes.Has(pod.Spec.NodeName) {
|
if workerNodes.Has(pod.Spec.NodeName) {
|
||||||
if pod.Spec.NodeName != "" {
|
if pod.Spec.NodeName != "" {
|
||||||
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
_, scheduledCondition := podutil.GetPodCondition(&pod.Status, v1.PodScheduled)
|
||||||
framework.ExpectEqual(scheduledCondition != nil, true)
|
framework.ExpectEqual(scheduledCondition != nil, true)
|
||||||
|
@ -97,11 +97,14 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||||
masterNodes, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
|
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Unexpected error occurred: %v", err)
|
framework.Logf("Unexpected error occurred: %v", err)
|
||||||
}
|
}
|
||||||
framework.ExpectNoErrorWithOffset(0, err)
|
framework.ExpectNoErrorWithOffset(0, err)
|
||||||
|
for _, n := range nodeList.Items {
|
||||||
|
workerNodes.Insert(n.Name)
|
||||||
|
}
|
||||||
|
|
||||||
err = framework.CheckTestingNSDeletedExcept(cs, ns)
|
err = framework.CheckTestingNSDeletedExcept(cs, ns)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
@ -144,7 +144,7 @@ var _ = SIGDescribe("SchedulerPriorities [Serial]", func() {
|
|||||||
var err error
|
var err error
|
||||||
|
|
||||||
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
e2enode.WaitForTotalHealthy(cs, time.Minute)
|
||||||
_, nodeList, err = e2enode.GetMasterAndWorkerNodes(cs)
|
nodeList, err = e2enode.GetReadySchedulableNodes(cs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
framework.Logf("Unexpected error occurred: %v", err)
|
framework.Logf("Unexpected error occurred: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -94,7 +94,7 @@ var _ = utils.SIGDescribe("Storage Policy Based Volume Provisioning [Feature:vsp
|
|||||||
scParameters = make(map[string]string)
|
scParameters = make(map[string]string)
|
||||||
_, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
_, err := e2enode.GetRandomReadySchedulableNode(f.ClientSet)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
masternodes, _, err := e2enode.GetMasterAndWorkerNodes(client)
|
masternodes, _, err := e2enode.DeprecatedGetMasterAndWorkerNodes(client)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
gomega.Expect(masternodes).NotTo(gomega.BeEmpty())
|
gomega.Expect(masternodes).NotTo(gomega.BeEmpty())
|
||||||
masterNode = masternodes.List()[0]
|
masterNode = masternodes.List()[0]
|
||||||
|
Loading…
Reference in New Issue
Block a user