mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-08 20:50:24 +00:00
Automatically recreate pvc when sts pod is stuck in pending
This commit is contained in:
@@ -38,6 +38,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@@ -1347,6 +1348,92 @@ var _ = SIGDescribe("StatefulSet", func() {
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("PVC should be recreated when pod is pending due to missing PVC [Feature:AutomaticPVCRecreation]", func() {
|
||||
// This test must be run in an environment that will allow the test pod to be scheduled on a node with local volumes
|
||||
ssName := "test-ss"
|
||||
headlessSvcName := "test"
|
||||
// Define StatefulSet Labels
|
||||
ssPodLabels := map[string]string{
|
||||
"name": "sample-pod",
|
||||
"pod": WebserverImageName,
|
||||
}
|
||||
|
||||
statefulPodMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||
ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, statefulPodMounts, nil, ssPodLabels)
|
||||
e2epv.SkipIfNoDefaultStorageClass(c)
|
||||
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Confirming PVC exists")
|
||||
err = verifyStatefulSetPVCsExist(c, ss, []int{0})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Confirming Pod is ready")
|
||||
e2estatefulset.WaitForStatusReadyReplicas(c, ss, 1)
|
||||
podName := getStatefulSetPodNameAtIndex(0, ss)
|
||||
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
nodeName := pod.Spec.NodeName
|
||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
oldData, err := json.Marshal(node)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
node.Spec.Unschedulable = true
|
||||
|
||||
newData, err := json.Marshal(node)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// cordon node, to make sure pod does not get scheduled to the node until the pvc is deleted
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, v1.Node{})
|
||||
framework.ExpectNoError(err)
|
||||
ginkgo.By("Cordoning Node")
|
||||
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// wait for the node to be patched
|
||||
time.Sleep(5 * time.Second)
|
||||
node, err = c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectEqual(node.Spec.Unschedulable, true)
|
||||
|
||||
ginkgo.By("Deleting Pod")
|
||||
err = c.CoreV1().Pods(ns).Delete(context.TODO(), podName, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
// wait for the pod to be recreated
|
||||
time.Sleep(10 * time.Second)
|
||||
_, err = c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
pvcList, err := c.CoreV1().PersistentVolumeClaims(ns).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()})
|
||||
framework.ExpectNoError(err)
|
||||
pvcName := pvcList.Items[0].Name
|
||||
|
||||
ginkgo.By("Deleting PVC")
|
||||
err = c.CoreV1().PersistentVolumeClaims(ns).Delete(context.TODO(), pvcName, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Uncordoning Node")
|
||||
// uncordon node, by reverting patch
|
||||
revertPatchBytes, err := strategicpatch.CreateTwoWayMergePatch(newData, oldData, v1.Node{})
|
||||
framework.ExpectNoError(err)
|
||||
_, err = c.CoreV1().Nodes().Patch(context.TODO(), nodeName, types.StrategicMergePatchType, revertPatchBytes, metav1.PatchOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Confirming PVC recreated")
|
||||
err = verifyStatefulSetPVCsExist(c, ss, []int{0})
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
ginkgo.By("Confirming Pod is ready after being recreated")
|
||||
e2estatefulset.WaitForStatusReadyReplicas(c, ss, 1)
|
||||
_, err = c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
|
||||
func kubectlExecWithRetries(ns string, args ...string) (out string) {
|
||||
|
Reference in New Issue
Block a user