mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-06 10:43:56 +00:00
Remove test Pods sharing a single local PV
The test runs two pods accessing the same local volume, which is duplicate with "Two pods mounting a local volume at the same time" test.
This commit is contained in:
parent
406d2dfe61
commit
052b06bdad
@ -641,79 +641,6 @@ var _ = utils.SIGDescribe("PersistentVolumes-local ", func() {
|
||||
framework.ExpectNoError(waitErr, "some pods failed to complete within %v", completeTimeout)
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.Context("Pods sharing a single local PV [Serial]", func() {
|
||||
var (
|
||||
pv *v1.PersistentVolume
|
||||
)
|
||||
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
localVolume := &localTestVolume{
|
||||
ltr: &utils.LocalTestResource{
|
||||
Node: config.randomNode,
|
||||
Path: "/tmp",
|
||||
},
|
||||
localVolumeType: DirectoryLocalVolumeType,
|
||||
}
|
||||
pvConfig := makeLocalPVConfig(config, localVolume)
|
||||
var err error
|
||||
pv, err = e2epv.CreatePV(ctx, config.client, f.Timeouts, e2epv.MakePersistentVolume(pvConfig))
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.AfterEach(func(ctx context.Context) {
|
||||
if pv == nil {
|
||||
return
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("Clean PV %s", pv.Name))
|
||||
err := config.client.CoreV1().PersistentVolumes().Delete(ctx, pv.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
ginkgo.It("all pods should be running", func(ctx context.Context) {
|
||||
var (
|
||||
pvc *v1.PersistentVolumeClaim
|
||||
pods = map[string]*v1.Pod{}
|
||||
count = 2
|
||||
err error
|
||||
)
|
||||
pvc = e2epv.MakePersistentVolumeClaim(makeLocalPVCConfig(config, DirectoryLocalVolumeType), config.ns)
|
||||
ginkgo.By(fmt.Sprintf("Create a PVC %s", pvc.Name))
|
||||
pvc, err = e2epv.CreatePVC(ctx, config.client, config.ns, pvc)
|
||||
framework.ExpectNoError(err)
|
||||
ginkgo.By(fmt.Sprintf("Create %d pods to use this PVC", count))
|
||||
podConfig := e2epod.Config{
|
||||
NS: config.ns,
|
||||
PVCs: []*v1.PersistentVolumeClaim{pvc},
|
||||
SeLinuxLabel: selinuxLabel,
|
||||
}
|
||||
for i := 0; i < count; i++ {
|
||||
|
||||
pod, err := e2epod.MakeSecPod(&podConfig)
|
||||
framework.ExpectNoError(err)
|
||||
pod, err = config.client.CoreV1().Pods(config.ns).Create(ctx, pod, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
pods[pod.Name] = pod
|
||||
}
|
||||
ginkgo.By("Wait for all pods are running")
|
||||
const runningTimeout = 5 * time.Minute
|
||||
waitErr := wait.PollImmediate(time.Second, runningTimeout, func() (done bool, err error) {
|
||||
podsList, err := config.client.CoreV1().Pods(config.ns).List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
runningPods := 0
|
||||
for _, pod := range podsList.Items {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodRunning:
|
||||
runningPods++
|
||||
}
|
||||
}
|
||||
return runningPods == count, nil
|
||||
})
|
||||
framework.ExpectNoError(waitErr, "Some pods are not running within %v", runningTimeout)
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func deletePodAndPVCs(ctx context.Context, config *localTestConfig, pod *v1.Pod) error {
|
||||
|
Loading…
Reference in New Issue
Block a user