PR feedback

This commit is contained in:
Rahul Rangith 2022-11-14 15:04:16 -05:00
parent c1cc18ccd5
commit e6a90aa48a
3 changed files with 65 additions and 13 deletions

View File

@ -1349,7 +1349,7 @@ var _ = SIGDescribe("StatefulSet", func() {
})
})
ginkgo.It("PVC should be recreated when pod is pending due to missing PVC [Feature:AutomaticPVCRecreation]", func() {
ginkgo.It("PVC should be recreated when pod is pending due to missing PVC [Feature:AutomaticPVCRecreation][Disruptive][Serial]", func() {
// This test must be run in an environment that will allow the test pod to be scheduled on a node with local volumes
ssName := "test-ss"
headlessSvcName := "test"
@ -1394,36 +1394,37 @@ var _ = SIGDescribe("StatefulSet", func() {
ginkgo.By("Cordoning Node")
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
framework.ExpectNoError(err)
cordoned := true
// wait for the node to be patched
time.Sleep(5 * time.Second)
node, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
framework.ExpectEqual(node.Spec.Unschedulable, true)
defer func() {
if cordoned {
uncordonNode(c, oldData, newData, nodeName)
}
}()
// wait for the node to be unschedulable
e2enode.WaitForNodeSchedulable(c, nodeName, 10*time.Second, false)
ginkgo.By("Deleting Pod")
err = c.CoreV1().Pods(ns).Delete(context.TODO(), podName, metav1.DeleteOptions{})
framework.ExpectNoError(err)
// wait for the pod to be recreated
time.Sleep(10 * time.Second)
e2estatefulset.WaitForStatusCurrentReplicas(c, ss, 1)
_, err = c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
framework.ExpectNoError(err)
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()})
framework.ExpectNoError(err)
framework.ExpectEqual(len(pvcList.Items), 1)
pvcName := pvcList.Items[0].Name
ginkgo.By("Deleting PVC")
err = c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvcName, metav1.DeleteOptions{})
framework.ExpectNoError(err)
ginkgo.By("Uncordoning Node")
// uncordon node, by reverting patch
revertPatchBytes, err := strategicpatch.CreateTwoWayMergePatch(newData, oldData, v1.Node{})
framework.ExpectNoError(err)
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, revertPatchBytes, metav1.PatchOptions{})
framework.ExpectNoError(err)
uncordonNode(c, oldData, newData, nodeName)
cordoned = false
ginkgo.By("Confirming PVC recreated")
err = verifyStatefulSetPVCsExist(c, ss, []int{0})
@ -1436,6 +1437,15 @@ var _ = SIGDescribe("StatefulSet", func() {
})
})
func uncordonNode(c clientset.Interface, oldData, newData []byte, nodeName string) {
ginkgo.By("Uncordoning Node")
// uncordon node, by reverting patch
revertPatchBytes, err := strategicpatch.CreateTwoWayMergePatch(newData, oldData, v1.Node{})
framework.ExpectNoError(err)
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, revertPatchBytes, metav1.PatchOptions{})
framework.ExpectNoError(err)
}
func kubectlExecWithRetries(ns string, args ...string) (out string) {
var err error
for i := 0; i < 3; i++ {

View File

@ -143,6 +143,23 @@ func WaitForNodeToBeReady(ctx context.Context, c clientset.Interface, name strin
return WaitConditionToBe(ctx, c, name, v1.NodeReady, true, timeout)
}
func WaitForNodeSchedulable(c clientset.Interface, name string, timeout time.Duration, wantSchedulable bool) bool {
framework.Logf("Waiting up to %v for node %s to be schedulable: %t", timeout, name, wantSchedulable)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(poll) {
node, err := c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
framework.Logf("Couldn't get node %s", name)
continue
}
if IsNodeSchedulable(node) == wantSchedulable {
return true
}
}
framework.Logf("Node %s didn't reach desired schedulable status (%t) within %v", name, wantSchedulable, timeout)
return false
}
// CheckReady waits up to timeout for cluster to has desired size and
// there is no not-ready nodes in it. By cluster size we mean number of schedulable Nodes.
func CheckReady(ctx context.Context, c clientset.Interface, size int, timeout time.Duration) ([]v1.Node, error) {

View File

@ -171,6 +171,31 @@ func WaitForStatusReplicas(ctx context.Context, c clientset.Interface, ss *appsv
}
}
// WaitForStatusCurrentReplicas waits for the ss.Status.CurrentReplicas to be equal to expectedReplicas
func WaitForStatusCurrentReplicas(c clientset.Interface, ss *appsv1.StatefulSet, expectedReplicas int32) {
framework.Logf("Waiting for statefulset status.currentReplicas updated to %d", expectedReplicas)
ns, name := ss.Namespace, ss.Name
pollErr := wait.PollImmediate(StatefulSetPoll, StatefulSetTimeout,
func() (bool, error) {
ssGet, err := c.AppsV1().StatefulSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err
}
if ssGet.Status.ObservedGeneration < ss.Generation {
return false, nil
}
if ssGet.Status.CurrentReplicas != expectedReplicas {
framework.Logf("Waiting for stateful set status.currentReplicas to become %d, currently %d", expectedReplicas, ssGet.Status.CurrentReplicas)
return false, nil
}
return true, nil
})
if pollErr != nil {
framework.Failf("Failed waiting for stateful set status.currentReplicas updated to %d: %v", expectedReplicas, pollErr)
}
}
// Saturate waits for all Pods in ss to become Running and Ready.
func Saturate(ctx context.Context, c clientset.Interface, ss *appsv1.StatefulSet) {
var i int32