mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Make e2e test not rely on local volumes
This commit is contained in:
parent
3cf636b22e
commit
392cd5ce8c
@ -22,7 +22,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
apps "k8s.io/api/apps/v1"
|
apps "k8s.io/api/apps/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
errorutils "k8s.io/apimachinery/pkg/util/errors"
|
errorutils "k8s.io/apimachinery/pkg/util/errors"
|
||||||
|
@ -2441,13 +2441,6 @@ func (om *fakeObjectManager) DeletePod(pod *v1.Pod) error {
|
|||||||
return nil // Not found, no error in deleting.
|
return nil // Not found, no error in deleting.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (om *fakeObjectManager) createPersistentVolumeClaims(set *apps.StatefulSet, pod *v1.Pod) error {
|
|
||||||
for _, claim := range getPersistentVolumeClaims(set, pod) {
|
|
||||||
om.claimsIndexer.Update(&claim)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (om *fakeObjectManager) CreateClaim(claim *v1.PersistentVolumeClaim) error {
|
func (om *fakeObjectManager) CreateClaim(claim *v1.PersistentVolumeClaim) error {
|
||||||
om.claimsIndexer.Update(claim)
|
om.claimsIndexer.Update(claim)
|
||||||
return nil
|
return nil
|
||||||
|
@ -1349,8 +1349,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
ginkgo.It("PVC should be recreated when pod is pending due to missing PVC [Feature:AutomaticPVCRecreation][Disruptive][Serial]", func() {
|
ginkgo.It("PVC should be recreated when pod is pending due to missing PVC [Disruptive][Serial]", func() {
|
||||||
// This test must be run in an environment that will allow the test pod to be scheduled on a node with local volumes
|
|
||||||
ssName := "test-ss"
|
ssName := "test-ss"
|
||||||
headlessSvcName := "test"
|
headlessSvcName := "test"
|
||||||
// Define StatefulSet Labels
|
// Define StatefulSet Labels
|
||||||
@ -1359,11 +1358,18 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
"pod": WebserverImageName,
|
"pod": WebserverImageName,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
readyNode, err := e2enode.GetReadySchedulableWorkerNode(c)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
hostLabel := "kubernetes.io/hostname"
|
||||||
|
hostLabelVal := readyNode.Labels[hostLabel]
|
||||||
|
|
||||||
statefulPodMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
statefulPodMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
|
||||||
ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, statefulPodMounts, nil, ssPodLabels)
|
ss := e2estatefulset.NewStatefulSet(ssName, ns, headlessSvcName, 1, statefulPodMounts, nil, ssPodLabels)
|
||||||
|
ss.Spec.Template.Spec.NodeSelector = map[string]string{hostLabel: hostLabelVal} // force the pod on a specific node
|
||||||
|
|
||||||
e2epv.SkipIfNoDefaultStorageClass(c)
|
e2epv.SkipIfNoDefaultStorageClass(c)
|
||||||
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
|
ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
|
||||||
_, err := c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
|
_, err = c.AppsV1().StatefulSets(ns).Create(context.TODO(), ss, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
ginkgo.By("Confirming PVC exists")
|
ginkgo.By("Confirming PVC exists")
|
||||||
@ -1377,6 +1383,7 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
nodeName := pod.Spec.NodeName
|
nodeName := pod.Spec.NodeName
|
||||||
|
framework.ExpectEqual(nodeName, readyNode.Name)
|
||||||
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
node, err := c.CoreV1().Nodes().Get(context.TODO(), nodeName, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -1432,8 +1439,9 @@ var _ = SIGDescribe("StatefulSet", func() {
|
|||||||
|
|
||||||
ginkgo.By("Confirming Pod is ready after being recreated")
|
ginkgo.By("Confirming Pod is ready after being recreated")
|
||||||
e2estatefulset.WaitForStatusReadyReplicas(c, ss, 1)
|
e2estatefulset.WaitForStatusReadyReplicas(c, ss, 1)
|
||||||
_, err = c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
pod, err = c.CoreV1().Pods(ns).Get(context.TODO(), podName, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
framework.ExpectEqual(pod.Spec.NodeName, readyNode.Name) // confirm the pod was scheduled back to the original node
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user