mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #84806 from damemi/waitforstablecluster
Update WaitForStableCluster to wait for only system pods to exist
This commit is contained in:
commit
0bf790b99f
@ -2301,38 +2301,6 @@ func retryCmd(command string, args ...string) (string, string, error) {
|
||||
return stdout, stderr, err
|
||||
}
|
||||
|
||||
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
|
||||
func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
|
||||
timeout := 10 * time.Minute
|
||||
startTime := time.Now()
|
||||
|
||||
allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
// API server returns also Pods that succeeded. We need to filter them out.
|
||||
currentPods := make([]v1.Pod, 0, len(allPods.Items))
|
||||
for _, pod := range allPods.Items {
|
||||
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
|
||||
currentPods = append(currentPods, pod)
|
||||
}
|
||||
|
||||
}
|
||||
allPods.Items = currentPods
|
||||
scheduledPods, currentlyNotScheduledPods := e2epod.GetPodsScheduled(masterNodes, allPods)
|
||||
for len(currentlyNotScheduledPods) != 0 {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
ExpectNoError(err)
|
||||
scheduledPods, currentlyNotScheduledPods = e2epod.GetPodsScheduled(masterNodes, allPods)
|
||||
|
||||
if startTime.Add(timeout).Before(time.Now()) {
|
||||
Failf("Timed out after %v waiting for stable cluster.", timeout)
|
||||
break
|
||||
}
|
||||
}
|
||||
return len(scheduledPods)
|
||||
}
|
||||
|
||||
// E2ETestNodePreparer implements testutils.TestNodePreparer interface, which is used
|
||||
// to create/modify Nodes before running a test.
|
||||
type E2ETestNodePreparer struct {
|
||||
|
@ -16,9 +16,69 @@ limitations under the License.
|
||||
|
||||
package scheduling
|
||||
|
||||
import "github.com/onsi/ginkgo"
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
)
|
||||
|
||||
// SIGDescribe annotates the test with the SIG label.
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return ginkgo.Describe("[sig-scheduling] "+text, body)
|
||||
}
|
||||
|
||||
// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
|
||||
func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
|
||||
timeout := 10 * time.Minute
|
||||
startTime := time.Now()
|
||||
|
||||
allPods := getAllPods(c)
|
||||
scheduledSystemPods, currentlyNotScheduledSystemPods := getSystemPods(c)
|
||||
systemPods := scheduledSystemPods + currentlyNotScheduledSystemPods
|
||||
|
||||
// Wait for system pods to be scheduled, and for pods in all other namespaces to be deleted
|
||||
for currentlyNotScheduledSystemPods != 0 || systemPods != allPods {
|
||||
time.Sleep(2 * time.Second)
|
||||
|
||||
scheduledSystemPods, currentlyNotScheduledSystemPods := getSystemPods(c)
|
||||
systemPods = scheduledSystemPods + currentlyNotScheduledSystemPods
|
||||
allPods = getAllPods(c)
|
||||
|
||||
if startTime.Add(timeout).Before(time.Now()) {
|
||||
framework.Failf("Timed out after %v waiting for stable cluster.", timeout)
|
||||
break
|
||||
}
|
||||
}
|
||||
return scheduledSystemPods
|
||||
}
|
||||
|
||||
// getAllPods lists all pods in the cluster, with succeeded and failed pods filtered out and returns the count
|
||||
func getAllPods(c clientset.Interface) int {
|
||||
allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "listing all pods in kube-system namespace while waiting for stable cluster")
|
||||
// API server returns also Pods that succeeded. We need to filter them out.
|
||||
currentPods := make([]v1.Pod, 0, len(allPods.Items))
|
||||
for _, pod := range allPods.Items {
|
||||
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
|
||||
currentPods = append(currentPods, pod)
|
||||
}
|
||||
|
||||
}
|
||||
allPods.Items = currentPods
|
||||
return len(allPods.Items)
|
||||
}
|
||||
|
||||
// getSystemPods lists the pods in the kube-system namespace and returns the number of scheduled and unscheduled pods
|
||||
func getSystemPods(c clientset.Interface) (int, int) {
|
||||
systemPods, err := c.CoreV1().Pods(metav1.NamespaceSystem).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err, "listing all pods in kube-system namespace while waiting for stable cluster")
|
||||
scheduledSystemPods, currentlyNotScheduledSystemPods := e2epod.GetPodsScheduled(masterNodes, systemPods)
|
||||
return len(scheduledSystemPods), len(currentlyNotScheduledSystemPods)
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
totalPodCapacity += podCapacity.Value()
|
||||
}
|
||||
|
||||
currentlyScheduledPods := framework.WaitForStableCluster(cs, masterNodes)
|
||||
currentlyScheduledPods := WaitForStableCluster(cs, masterNodes)
|
||||
podsNeededForSaturation := int(totalPodCapacity) - currentlyScheduledPods
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Starting additional %v Pods to fully saturate the cluster max pods and trying to start another one", podsNeededForSaturation))
|
||||
@ -171,7 +171,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
nodeMaxAllocatable = allocatable.Value()
|
||||
}
|
||||
}
|
||||
framework.WaitForStableCluster(cs, masterNodes)
|
||||
WaitForStableCluster(cs, masterNodes)
|
||||
|
||||
pods, err := cs.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
@ -246,7 +246,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
Description: Scheduling Pods MUST fail if the resource limits exceed Machine capacity.
|
||||
*/
|
||||
framework.ConformanceIt("validates resource limits of pods that are allowed to run ", func() {
|
||||
framework.WaitForStableCluster(cs, masterNodes)
|
||||
WaitForStableCluster(cs, masterNodes)
|
||||
nodeMaxAllocatable := int64(0)
|
||||
nodeToAllocatableMap := make(map[string]int64)
|
||||
for _, node := range nodeList.Items {
|
||||
@ -356,7 +356,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
|
||||
podName := "restricted-pod"
|
||||
|
||||
framework.WaitForStableCluster(cs, masterNodes)
|
||||
WaitForStableCluster(cs, masterNodes)
|
||||
|
||||
conf := pausePodConfig{
|
||||
Name: podName,
|
||||
@ -411,7 +411,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
|
||||
ginkgo.By("Trying to schedule Pod with nonempty NodeSelector.")
|
||||
podName := "restricted-pod"
|
||||
|
||||
framework.WaitForStableCluster(cs, masterNodes)
|
||||
WaitForStableCluster(cs, masterNodes)
|
||||
|
||||
conf := pausePodConfig{
|
||||
Name: podName,
|
||||
|
Loading…
Reference in New Issue
Block a user