mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-08 04:32:37 +00:00
Merge pull request #113270 from rrangith/fix/create-pvc-for-pending-pod
Automatically recreate PVC for pending STS pod
This commit is contained in:
@@ -143,6 +143,23 @@ func WaitForNodeToBeReady(ctx context.Context, c clientset.Interface, name strin
|
||||
return WaitConditionToBe(ctx, c, name, v1.NodeReady, true, timeout)
|
||||
}
|
||||
|
||||
func WaitForNodeSchedulable(c clientset.Interface, name string, timeout time.Duration, wantSchedulable bool) bool {
|
||||
framework.Logf("Waiting up to %v for node %s to be schedulable: %t", timeout, name, wantSchedulable)
|
||||
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
framework.Logf("Couldn't get node %s", name)
|
||||
continue
|
||||
}
|
||||
|
||||
if IsNodeSchedulable(node) == wantSchedulable {
|
||||
return true
|
||||
}
|
||||
}
|
||||
framework.Logf("Node %s didn't reach desired schedulable status (%t) within %v", name, wantSchedulable, timeout)
|
||||
return false
|
||||
}
|
||||
|
||||
// CheckReady waits up to timeout for cluster to has desired size and
|
||||
// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes.
|
||||
func CheckReady(ctx context.Context, c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {
|
||||
|
@@ -171,6 +171,31 @@ func WaitForStatusReplicas(ctx context.Context, c clientset.Interface, ss *appsv
|
||||
}
|
||||
}
|
||||
|
||||
// WaitForStatusCurrentReplicas waits for the ss.Status.CurrentReplicas to be equal to expectedReplicas
|
||||
func WaitForStatusCurrentReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) {
|
||||
framework.Logf("Waiting for statefulset status.currentReplicas updated to %d", expectedReplicas)
|
||||
|
||||
ns, name := ss.Namespace, ss.Name
|
||||
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
|
||||
func() (bool, error) {
|
||||
ssGet, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if ssGet.Status.ObservedGeneration < ss.Generation {
|
||||
return false, nil
|
||||
}
|
||||
if ssGet.Status.CurrentReplicas != expectedReplicas {
|
||||
framework.Logf("Waiting for stateful set status.currentReplicas to become %d, currently %d", expectedReplicas, ssGet.Status.CurrentReplicas)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if pollErr != nil {
|
||||
framework.Failf("Failed waiting for stateful set status.currentReplicas updated to %d: %v", expectedReplicas, pollErr)
|
||||
}
|
||||
}
|
||||
|
||||
// Saturate waits for all Pods in ss to become Running and Ready.
|
||||
func Saturate(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet) {
|
||||
var i int32
|
||||
|
Reference in New Issue
Block a user