Merge pull request #126991 from carlory/fix-126956

e2e-node: should succeed with multiple pods and same image
This commit is contained in:
Kubernetes Prow Robot 2024-09-09 23:24:39 +01:00 committed by GitHub
commit 1fdb8e3fac
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -31,6 +31,7 @@ import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
"k8s.io/kubernetes/test/e2e/nodefeature"
@ -60,23 +61,14 @@ var _ = SIGDescribe("ImageVolume", nodefeature.ImageVolume, func() {
e2eskipper.SkipUnlessFeatureGateEnabled(features.ImageVolume)
})
createPod := func(ctx context.Context, volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
var selinuxOptions *v1.SELinuxOptions
if selinux.GetEnabled() {
selinuxOptions = &v1.SELinuxOptions{
User: defaultSELinuxUser,
Role: defaultSELinuxRole,
Type: defaultSELinuxType,
Level: defaultSELinuxLevel,
}
ginkgo.By(fmt.Sprintf("Using SELinux on pod: %v", selinuxOptions))
}
createPod := func(ctx context.Context, podName, nodeName string, volumes []v1.Volume, volumeMounts []v1.VolumeMount, selinuxOptions *v1.SELinuxOptions) {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
NodeName: nodeName,
RestartPolicy: v1.RestartPolicyAlways,
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: selinuxOptions,
@ -93,20 +85,34 @@ var _ = SIGDescribe("ImageVolume", nodefeature.ImageVolume, func() {
},
}
ginkgo.By(fmt.Sprintf("Creating a pod (%v/%v)", f.Namespace.Name, podName))
ginkgo.By(fmt.Sprintf("Creating a pod (%s/%s)", f.Namespace.Name, podName))
e2epod.NewPodClient(f).Create(ctx, pod)
}
f.It("should succeed with pod and pull policy of Always", func(ctx context.Context) {
var selinuxOptions *v1.SELinuxOptions
if selinux.GetEnabled() {
selinuxOptions = &v1.SELinuxOptions{
User: defaultSELinuxUser,
Role: defaultSELinuxRole,
Type: defaultSELinuxType,
Level: defaultSELinuxLevel,
}
ginkgo.By(fmt.Sprintf("Using SELinux on pod: %v", selinuxOptions))
}
createPod(ctx,
podName,
"",
[]v1.Volume{{Name: volumeName, VolumeSource: v1.VolumeSource{Image: &v1.ImageVolumeSource{Reference: validImageRef, PullPolicy: v1.PullAlways}}}},
[]v1.VolumeMount{{Name: volumeName, MountPath: volumePathPrefix}},
selinuxOptions,
)
ginkgo.By(fmt.Sprintf("Waiting for the pod (%v/%v) to be running", f.Namespace.Name, podName))
ginkgo.By(fmt.Sprintf("Waiting for the pod (%s/%s) to be running", f.Namespace.Name, podName))
err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, podName, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to await for the pod to be running: (%v/%v)", f.Namespace.Name, podName)
framework.ExpectNoError(err, "Failed to await for the pod to be running: (%s/%s)", f.Namespace.Name, podName)
ginkgo.By(fmt.Sprintf("Verifying the volume mount contents for path: %s", volumePathPrefix))
@ -118,7 +124,20 @@ var _ = SIGDescribe("ImageVolume", nodefeature.ImageVolume, func() {
})
f.It("should succeed with pod and multiple volumes", func(ctx context.Context) {
var selinuxOptions *v1.SELinuxOptions
if selinux.GetEnabled() {
selinuxOptions = &v1.SELinuxOptions{
User: defaultSELinuxUser,
Role: defaultSELinuxRole,
Type: defaultSELinuxType,
Level: defaultSELinuxLevel,
}
ginkgo.By(fmt.Sprintf("Using SELinux on pod: %v", selinuxOptions))
}
createPod(ctx,
podName,
"",
[]v1.Volume{
{Name: volumeName + "-0", VolumeSource: v1.VolumeSource{Image: &v1.ImageVolumeSource{Reference: validImageRef}}},
{Name: volumeName + "-1", VolumeSource: v1.VolumeSource{Image: &v1.ImageVolumeSource{Reference: validImageRef}}},
@ -127,11 +146,12 @@ var _ = SIGDescribe("ImageVolume", nodefeature.ImageVolume, func() {
{Name: volumeName + "-0", MountPath: volumePathPrefix + "-0"},
{Name: volumeName + "-1", MountPath: volumePathPrefix + "-1"},
},
selinuxOptions,
)
ginkgo.By(fmt.Sprintf("Waiting for the pod (%v/%v) to be running", f.Namespace.Name, podName))
ginkgo.By(fmt.Sprintf("Waiting for the pod (%s/%s) to be running", f.Namespace.Name, podName))
err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, podName, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to await for the pod to be running: (%v/%v)", f.Namespace.Name, podName)
framework.ExpectNoError(err, "Failed to await for the pod to be running: (%s/%s)", f.Namespace.Name, podName)
for i := range 2 {
volumePath := fmt.Sprintf("%s-%d", volumePathPrefix, i)
@ -146,29 +166,123 @@ var _ = SIGDescribe("ImageVolume", nodefeature.ImageVolume, func() {
})
f.It("should fail if image volume is not existing", func(ctx context.Context) {
var selinuxOptions *v1.SELinuxOptions
if selinux.GetEnabled() {
selinuxOptions = &v1.SELinuxOptions{
User: defaultSELinuxUser,
Role: defaultSELinuxRole,
Type: defaultSELinuxType,
Level: defaultSELinuxLevel,
}
ginkgo.By(fmt.Sprintf("Using SELinux on pod: %v", selinuxOptions))
}
createPod(ctx,
podName,
"",
[]v1.Volume{{Name: volumeName, VolumeSource: v1.VolumeSource{Image: &v1.ImageVolumeSource{Reference: invalidImageRef}}}},
[]v1.VolumeMount{{Name: volumeName, MountPath: volumePathPrefix}},
selinuxOptions,
)
ginkgo.By(fmt.Sprintf("Waiting for the pod (%v/%v) to fail", f.Namespace.Name, podName))
ginkgo.By(fmt.Sprintf("Waiting for the pod (%s/%s) to fail", f.Namespace.Name, podName))
err := e2epod.WaitForPodContainerToFail(ctx, f.ClientSet, f.Namespace.Name, podName, 0, images.ErrImagePullBackOff.Error(), time.Minute)
framework.ExpectNoError(err, "Failed to await for the pod to be running: (%v/%v)", f.Namespace.Name, podName)
framework.ExpectNoError(err, "Failed to await for the pod to be running: (%s/%s)", f.Namespace.Name, podName)
})
f.It("should succeed if image volume is not existing but unused", func(ctx context.Context) {
var selinuxOptions *v1.SELinuxOptions
if selinux.GetEnabled() {
selinuxOptions = &v1.SELinuxOptions{
User: defaultSELinuxUser,
Role: defaultSELinuxRole,
Type: defaultSELinuxType,
Level: defaultSELinuxLevel,
}
ginkgo.By(fmt.Sprintf("Using SELinux on pod: %v", selinuxOptions))
}
createPod(ctx,
podName,
"",
[]v1.Volume{{Name: volumeName, VolumeSource: v1.VolumeSource{Image: &v1.ImageVolumeSource{Reference: invalidImageRef}}}},
nil,
selinuxOptions,
)
ginkgo.By(fmt.Sprintf("Waiting for the pod (%v/%v) to be running", f.Namespace.Name, podName))
ginkgo.By(fmt.Sprintf("Waiting for the pod (%s/%s) to be running", f.Namespace.Name, podName))
err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, podName, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to await for the pod to be running: (%v/%v)", f.Namespace.Name, podName)
framework.ExpectNoError(err, "Failed to await for the pod to be running: (%s/%s)", f.Namespace.Name, podName)
ginkgo.By(fmt.Sprintf("Verifying the volume mount is not used for path: %s", volumePathPrefix))
output := e2epod.ExecCommandInContainer(f, podName, containerName, "/bin/ls", filepath.Dir(volumePathPrefix))
gomega.Expect(output).NotTo(gomega.ContainSubstring(strings.TrimPrefix(volumePathPrefix, "/")))
})
f.It("should succeed with multiple pods and same image on the same node", func(ctx context.Context) {
node, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
framework.ExpectNoError(err, "Failed to get a ready schedulable node")
baseName := "test-pod"
anotherSELinuxLevel := "s0:c100,c200"
for i := range 2 {
podName := fmt.Sprintf("%s-%d", baseName, i)
var selinuxOptions *v1.SELinuxOptions
if selinux.GetEnabled() {
if i == 0 {
selinuxOptions = &v1.SELinuxOptions{
User: defaultSELinuxUser,
Role: defaultSELinuxRole,
Type: defaultSELinuxType,
Level: defaultSELinuxLevel,
}
} else {
selinuxOptions = &v1.SELinuxOptions{
User: defaultSELinuxUser,
Role: defaultSELinuxRole,
Type: defaultSELinuxType,
Level: anotherSELinuxLevel,
}
}
ginkgo.By(fmt.Sprintf("Using SELinux on pod %q: %v", podName, selinuxOptions))
}
createPod(ctx,
podName,
node.Name,
[]v1.Volume{{Name: volumeName, VolumeSource: v1.VolumeSource{Image: &v1.ImageVolumeSource{Reference: validImageRef}}}},
[]v1.VolumeMount{{Name: volumeName, MountPath: volumePathPrefix}},
selinuxOptions,
)
ginkgo.By(fmt.Sprintf("Waiting for the pod (%s/%s) to be running", f.Namespace.Name, podName))
err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, podName, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to await for the pod to be running: (%s/%s)", f.Namespace.Name, podName)
ginkgo.By(fmt.Sprintf("Verifying the volume mount contents for path: %s", volumePathPrefix))
firstFileContents := e2epod.ExecCommandInContainer(f, podName, containerName, "/bin/cat", filepath.Join(volumePathPrefix, "dir", "file"))
gomega.Expect(firstFileContents).To(gomega.Equal("1"))
secondFileContents := e2epod.ExecCommandInContainer(f, podName, containerName, "/bin/cat", filepath.Join(volumePathPrefix, "file"))
gomega.Expect(secondFileContents).To(gomega.Equal("2"))
}
podName := baseName + "-0"
ginkgo.By(fmt.Sprintf("Rechecking the pod (%s/%s) after another pod is running as expected", f.Namespace.Name, podName))
err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, podName, f.Namespace.Name)
framework.ExpectNoError(err, "Failed to await for the pod to be running: (%s/%s)", f.Namespace.Name, podName)
ginkgo.By(fmt.Sprintf("Verifying the volume mount contents for path: %s", volumePathPrefix))
firstFileContents := e2epod.ExecCommandInContainer(f, podName, containerName, "/bin/cat", filepath.Join(volumePathPrefix, "dir", "file"))
gomega.Expect(firstFileContents).To(gomega.Equal("1"))
secondFileContents := e2epod.ExecCommandInContainer(f, podName, containerName, "/bin/cat", filepath.Join(volumePathPrefix, "file"))
gomega.Expect(secondFileContents).To(gomega.Equal("2"))
})
})