mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 23:47:50 +00:00
Cleanup tests/e2e/common directory - part 1
This commit is contained in:
49
test/e2e/common/storage/BUILD
Normal file
49
test/e2e/common/storage/BUILD
Normal file
@@ -0,0 +1,49 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"configmap_volume.go",
|
||||
"downwardapi_volume.go",
|
||||
"empty_dir.go",
|
||||
"framework.go",
|
||||
"host_path.go",
|
||||
"projected_combined.go",
|
||||
"projected_configmap.go",
|
||||
"projected_downwardapi.go",
|
||||
"projected_secret.go",
|
||||
"secrets_volume.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/common/storage",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/e2e/framework/pod:go_default_library",
|
||||
"//test/e2e/framework/skipper:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/onsi/ginkgo:go_default_library",
|
||||
"//vendor/github.com/onsi/gomega:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
4
test/e2e/common/storage/OWNERS
Normal file
4
test/e2e/common/storage/OWNERS
Normal file
@@ -0,0 +1,4 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
labels:
|
||||
- sig/storage
|
||||
737
test/e2e/common/storage/configmap_volume.go
Normal file
737
test/e2e/common/storage/configmap_volume.go
Normal file
@@ -0,0 +1,737 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("ConfigMap", func() {
|
||||
f := framework.NewDefaultFramework("configmap")
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: ConfigMap Volume, without mapping
|
||||
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
|
||||
doConfigMapE2EWithoutMappings(f, false, 0, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: ConfigMap Volume, without mapping, volume mode set
|
||||
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of '0x400'
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func() {
|
||||
defaultMode := int32(0400)
|
||||
doConfigMapE2EWithoutMappings(f, false, 0, &defaultMode)
|
||||
})
|
||||
|
||||
ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func() {
|
||||
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
doConfigMapE2EWithoutMappings(f, true, 1001, &defaultMode)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: ConfigMap Volume, without mapping, non-root user
|
||||
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
|
||||
doConfigMapE2EWithoutMappings(f, true, 0, nil)
|
||||
})
|
||||
|
||||
ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func() {
|
||||
// Windows does not support RunAsUser / FSGroup SecurityContext options.
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
doConfigMapE2EWithoutMappings(f, true, 1001, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: ConfigMap Volume, with mapping
|
||||
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST default to 0x644.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
|
||||
doConfigMapE2EWithMappings(f, false, 0, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: ConfigMap Volume, with mapping, volume mode set
|
||||
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. File mode is changed to a custom value of '0x400'. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The data content of the file MUST be readable and verified and file modes MUST be set to the custom value of '0x400'
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func() {
|
||||
mode := int32(0400)
|
||||
doConfigMapE2EWithMappings(f, false, 0, &mode)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: ConfigMap Volume, with mapping, non-root user
|
||||
Description: Create a ConfigMap, create a Pod that mounts a volume and populates the volume with data stored in the ConfigMap. Files are mapped to a path in the volume. Pod is run as a non-root user with uid=1000. The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount. The file on the volume MUST have file mode set to default value of 0x644.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
|
||||
doConfigMapE2EWithMappings(f, true, 0, nil)
|
||||
})
|
||||
|
||||
ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func() {
|
||||
// Windows does not support RunAsUser / FSGroup SecurityContext options.
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
doConfigMapE2EWithMappings(f, true, 1001, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: ConfigMap Volume, update
|
||||
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the ConfigMap is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod.
|
||||
*/
|
||||
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
|
||||
name := "configmap-test-upd-" + string(uuid.NewUUID())
|
||||
volumeName := "configmap-volume"
|
||||
volumeMountPath := "/etc/configmap-volume"
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := createConfigMapVolumeMounttestPod(f.Namespace.Name, volumeName, name, volumeMountPath,
|
||||
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1")
|
||||
|
||||
ginkgo.By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
}
|
||||
|
||||
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name))
|
||||
configMap.ResourceVersion = "" // to force update
|
||||
configMap.Data["data-1"] = "value-2"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("waiting to observe update in volume")
|
||||
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2"))
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.12
|
||||
Testname: ConfigMap Volume, text data, binary data
|
||||
Description: The ConfigMap that is created with text data and binary data MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. ConfigMap's text data and binary data MUST be verified by reading the content from the mounted files in the Pod.
|
||||
*/
|
||||
framework.ConformanceIt("binary data should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
|
||||
name := "configmap-test-upd-" + string(uuid.NewUUID())
|
||||
volumeName := "configmap-volume"
|
||||
volumeMountPath := "/etc/configmap-volume"
|
||||
containerName := "configmap-volume-binary-test"
|
||||
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
BinaryData: map[string][]byte{
|
||||
"dump.bin": {0xde, 0xca, 0xfe, 0xba, 0xd0, 0xfe, 0xff},
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := createConfigMapVolumeMounttestPod(f.Namespace.Name, volumeName, name, volumeMountPath,
|
||||
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volume/data-1")
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
|
||||
Name: containerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"hexdump", "-C", "/etc/configmap-volume/dump.bin"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
ginkgo.By("Creating the pod")
|
||||
f.PodClient().Create(pod)
|
||||
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
|
||||
pollLogs1 := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
}
|
||||
pollLogs2 := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[1].Name)
|
||||
}
|
||||
|
||||
ginkgo.By("Waiting for pod with text data")
|
||||
gomega.Eventually(pollLogs1, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
ginkgo.By("Waiting for pod with binary data")
|
||||
gomega.Eventually(pollLogs2, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("de ca fe ba d0 fe ff"))
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: ConfigMap Volume, create, update and delete
|
||||
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to custom path in the Pod. When the config map is updated the change to the config map MUST be verified by reading the content from the mounted file in the Pod. Also when the item(file) is deleted from the map that MUST result in a error reading that item(file).
|
||||
*/
|
||||
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
trueVal := true
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
|
||||
deleteName := "cm-test-opt-del-" + string(uuid.NewUUID())
|
||||
deleteContainerName := "delcm-volume-test"
|
||||
deleteVolumeName := "deletecm-volume"
|
||||
deleteConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: deleteName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
updateName := "cm-test-opt-upd-" + string(uuid.NewUUID())
|
||||
updateContainerName := "updcm-volume-test"
|
||||
updateVolumeName := "updatecm-volume"
|
||||
updateConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: updateName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "createcm-volume-test"
|
||||
createVolumeName := "createcm-volume"
|
||||
createConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: createName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
|
||||
var err error
|
||||
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
|
||||
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: deleteName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: updateName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: createName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: deleteContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/delete/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "delete"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/update/data-3"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "update"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
ginkgo.By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
|
||||
updateConfigMap.ResourceVersion = "" // to force update
|
||||
delete(updateConfigMap.Data, "data-1")
|
||||
updateConfigMap.Data["data-3"] = "value-3"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
|
||||
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
|
||||
}
|
||||
|
||||
ginkgo.By("waiting to observe update in volume")
|
||||
|
||||
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
|
||||
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/configmap-volumes/delete/data-1"))
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: ConfigMap Volume, multiple volume maps
|
||||
Description: The ConfigMap that is created MUST be accessible to read from the newly created Pod using the volume mount that is mapped to multiple paths in the Pod. The content MUST be accessible from all the mapped volume mounts.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
|
||||
var (
|
||||
name = "configmap-test-volume-" + string(uuid.NewUUID())
|
||||
volumeName = "configmap-volume"
|
||||
volumeMountPath = "/etc/configmap-volume"
|
||||
volumeName2 = "configmap-volume-2"
|
||||
volumeMountPath2 = "/etc/configmap-volume-2"
|
||||
configMap = newConfigMap(f, name)
|
||||
)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "configmap-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--file_content=/etc/configmap-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
MountPath: volumeMountPath2,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("consume configMaps", pod, 0, []string{
|
||||
"content of file \"/etc/configmap-volume/data-1\": value-1",
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.21
|
||||
Testname: ConfigMap Volume, immutability
|
||||
Description: Create a ConfigMap. Update it's data field, the update MUST succeed.
|
||||
Mark the ConfigMap as immutable, the update MUST succeed. Try to update its data, the update MUST fail.
|
||||
Try to mark the ConfigMap back as not immutable, the update MUST fail.
|
||||
Try to update the ConfigMap`s metadata (labels), the update must succeed.
|
||||
Try to delete the ConfigMap, the deletion must succeed.
|
||||
*/
|
||||
framework.ConformanceIt("should be immutable if `immutable` field is set", func() {
|
||||
name := "immutable"
|
||||
configMap := newConfigMap(f, name)
|
||||
|
||||
currentConfigMap, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create config map %q in namespace %q", configMap.Name, configMap.Namespace)
|
||||
|
||||
currentConfigMap.Data["data-4"] = "value-4"
|
||||
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace)
|
||||
|
||||
// Mark config map as immutable.
|
||||
trueVal := true
|
||||
currentConfigMap.Immutable = &trueVal
|
||||
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to mark config map %q in namespace %q as immutable", configMap.Name, configMap.Namespace)
|
||||
|
||||
// Ensure data can't be changed now.
|
||||
currentConfigMap.Data["data-5"] = "value-5"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{})
|
||||
framework.ExpectEqual(apierrors.IsInvalid(err), true)
|
||||
|
||||
// Ensure config map can't be switched from immutable to mutable.
|
||||
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace)
|
||||
framework.ExpectEqual(*currentConfigMap.Immutable, true)
|
||||
|
||||
falseVal := false
|
||||
currentConfigMap.Immutable = &falseVal
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{})
|
||||
framework.ExpectEqual(apierrors.IsInvalid(err), true)
|
||||
|
||||
// Ensure that metadata can be changed.
|
||||
currentConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to get config map %q in namespace %q", configMap.Name, configMap.Namespace)
|
||||
currentConfigMap.Labels = map[string]string{"label1": "value1"}
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), currentConfigMap, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to update config map %q in namespace %q", configMap.Name, configMap.Namespace)
|
||||
|
||||
// Ensure that immutable config map can be deleted.
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Failed to delete config map %q in namespace %q", configMap.Name, configMap.Namespace)
|
||||
})
|
||||
|
||||
// The pod is in pending during volume creation until the configMap objects are available
|
||||
// or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional.
|
||||
// Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
pod, err := createNonOptionalConfigMapPod(f, volumeMountPath)
|
||||
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
// ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap,
|
||||
// the volume setup will error unless it is marked optional, during the pod creation.
|
||||
// Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
pod, err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath)
|
||||
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
|
||||
})
|
||||
})
|
||||
|
||||
func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
|
||||
return &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
"data-2": "value-2",
|
||||
"data-3": "value-3",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func doConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) {
|
||||
groupID := int64(fsGroup)
|
||||
|
||||
var (
|
||||
name = "configmap-test-volume-" + string(uuid.NewUUID())
|
||||
volumeName = "configmap-volume"
|
||||
volumeMountPath = "/etc/configmap-volume"
|
||||
configMap = newConfigMap(f, name)
|
||||
)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := createConfigMapVolumeMounttestPod(f.Namespace.Name, volumeName, name, volumeMountPath,
|
||||
"--file_content=/etc/configmap-volume/data-1", "--file_mode=/etc/configmap-volume/data-1")
|
||||
one := int64(1)
|
||||
pod.Spec.TerminationGracePeriodSeconds = &one
|
||||
|
||||
if asUser {
|
||||
setPodNonRootUser(pod)
|
||||
}
|
||||
|
||||
if groupID != 0 {
|
||||
pod.Spec.SecurityContext.FSGroup = &groupID
|
||||
}
|
||||
|
||||
if defaultMode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.ConfigMap.DefaultMode = defaultMode
|
||||
}
|
||||
|
||||
fileModeRegexp := getFileModeRegex("/etc/configmap-volume/data-1", defaultMode)
|
||||
output := []string{
|
||||
"content of file \"/etc/configmap-volume/data-1\": value-1",
|
||||
fileModeRegexp,
|
||||
}
|
||||
f.TestContainerOutputRegexp("consume configMaps", pod, 0, output)
|
||||
}
|
||||
|
||||
func doConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) {
|
||||
groupID := int64(fsGroup)
|
||||
|
||||
var (
|
||||
name = "configmap-test-volume-map-" + string(uuid.NewUUID())
|
||||
volumeName = "configmap-volume"
|
||||
volumeMountPath = "/etc/configmap-volume"
|
||||
configMap = newConfigMap(f, name)
|
||||
)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := createConfigMapVolumeMounttestPod(f.Namespace.Name, volumeName, name, volumeMountPath,
|
||||
"--file_content=/etc/configmap-volume/path/to/data-2", "--file_mode=/etc/configmap-volume/path/to/data-2")
|
||||
one := int64(1)
|
||||
pod.Spec.TerminationGracePeriodSeconds = &one
|
||||
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items = []v1.KeyToPath{
|
||||
{
|
||||
Key: "data-2",
|
||||
Path: "path/to/data-2",
|
||||
},
|
||||
}
|
||||
|
||||
if asUser {
|
||||
setPodNonRootUser(pod)
|
||||
}
|
||||
|
||||
if groupID != 0 {
|
||||
pod.Spec.SecurityContext.FSGroup = &groupID
|
||||
}
|
||||
|
||||
if itemMode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items[0].Mode = itemMode
|
||||
}
|
||||
|
||||
// Just check file mode if fsGroup is not set. If fsGroup is set, the
|
||||
// final mode is adjusted and we are not testing that case.
|
||||
output := []string{
|
||||
"content of file \"/etc/configmap-volume/path/to/data-2\": value-2",
|
||||
}
|
||||
if fsGroup == 0 {
|
||||
fileModeRegexp := getFileModeRegex("/etc/configmap-volume/path/to/data-2", itemMode)
|
||||
output = append(output, fileModeRegexp)
|
||||
}
|
||||
f.TestContainerOutputRegexp("consume configMaps", pod, 0, output)
|
||||
}
|
||||
|
||||
func createNonOptionalConfigMapPod(f *framework.Framework, volumeMountPath string) (*v1.Pod, error) {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
falseValue := false
|
||||
|
||||
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
|
||||
createVolumeName := "createcm-volume"
|
||||
|
||||
// creating a pod without configMap object created, by mentioning the configMap volume source's local reference name
|
||||
pod := createConfigMapVolumeMounttestPod(f.Namespace.Name, createVolumeName, createName, path.Join(volumeMountPath, "create"),
|
||||
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1")
|
||||
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Optional = &falseValue
|
||||
|
||||
ginkgo.By("Creating the pod")
|
||||
pod = f.PodClient().Create(pod)
|
||||
return pod, e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
}
|
||||
|
||||
func createNonOptionalConfigMapPodWithConfig(f *framework.Framework, volumeMountPath string) (*v1.Pod, error) {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
falseValue := false
|
||||
|
||||
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
|
||||
createVolumeName := "createcm-volume"
|
||||
configMap := newConfigMap(f, createName)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
// creating a pod with configMap object, but with different key which is not present in configMap object.
|
||||
pod := createConfigMapVolumeMounttestPod(f.Namespace.Name, createVolumeName, createName, path.Join(volumeMountPath, "create"),
|
||||
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/configmap-volumes/create/data-1")
|
||||
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Optional = &falseValue
|
||||
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items = []v1.KeyToPath{
|
||||
{
|
||||
Key: "data-4",
|
||||
Path: "path/to/data-4",
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.By("Creating the pod")
|
||||
pod = f.PodClient().Create(pod)
|
||||
return pod, e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
}
|
||||
|
||||
func createConfigMapVolumeMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod {
|
||||
volumes := []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: referenceName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
podName := "pod-configmaps-" + string(uuid.NewUUID())
|
||||
mounttestArgs = append([]string{"mounttest"}, mounttestArgs...)
|
||||
pod := e2epod.NewAgnhostPod(namespace, podName, volumes, createMounts(volumeName, mountPath, true), nil, mounttestArgs...)
|
||||
pod.Spec.RestartPolicy = v1.RestartPolicyNever
|
||||
return pod
|
||||
}
|
||||
475
test/e2e/common/storage/downwardapi_volume.go
Normal file
475
test/e2e/common/storage/downwardapi_volume.go
Normal file
@@ -0,0 +1,475 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Downward API volume", func() {
|
||||
// How long to wait for a log pod to be displayed
|
||||
const podLogTimeout = 3 * time.Minute
|
||||
f := framework.NewDefaultFramework("downward-api")
|
||||
var podClient *framework.PodClient
|
||||
ginkgo.BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: DownwardAPI volume, pod name
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("%s\n", podName),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: DownwardAPI volume, volume mode 0400
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource with the volumesource mode set to -r-------- and DownwardAPIVolumeFiles contains a item for the Pod name. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should set DefaultMode on files [LinuxOnly] [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
defaultMode := int32(0400)
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podinfo/podname\": -r--------",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: DownwardAPI volume, file mode 0400
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the Pod name with the file mode set to -r--------. The container runtime MUST be able to access Pod name from the specified path on the mounted volume.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
mode := int32(0400)
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podinfo/podname\": -r--------",
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]", func() {
|
||||
// Windows does not support RunAsUser / FSGroup SecurityContext options.
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
podName := "metadata-volume-" + string(uuid.NewUUID())
|
||||
gid := int64(1234)
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: &gid,
|
||||
}
|
||||
setPodNonRootUser(pod)
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("%s\n", podName),
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]", func() {
|
||||
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
podName := "metadata-volume-" + string(uuid.NewUUID())
|
||||
gid := int64(1234)
|
||||
mode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: &gid,
|
||||
}
|
||||
setPodNonRootUser(pod)
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podinfo/podname\": -r--r-----",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: DownwardAPI volume, update label
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains list of items for each of the Pod labels. The container runtime MUST be able to access Pod labels from the specified path on the mounted volume. Update the labels by adding a new label to the running Pod. The new label MUST be available from the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
|
||||
labels := map[string]string{}
|
||||
labels["key1"] = "value1"
|
||||
labels["key2"] = "value2"
|
||||
|
||||
podName := "labelsupdate" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels")
|
||||
containerName := "client-container"
|
||||
ginkgo.By("Creating the pod")
|
||||
podClient.CreateSync(pod)
|
||||
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n"))
|
||||
|
||||
//modify labels
|
||||
podClient.Update(podName, func(pod *v1.Pod) {
|
||||
pod.Labels["key3"] = "value3"
|
||||
})
|
||||
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n"))
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: DownwardAPI volume, update annotations
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains list of items for each of the Pod annotations. The container runtime MUST be able to access Pod annotations from the specified path on the mounted volume. Update the annotations by adding a new annotation to the running Pod. The new annotation MUST be available from the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
|
||||
annotations := map[string]string{}
|
||||
annotations["builder"] = "bar"
|
||||
podName := "annotationupdate" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/podinfo/annotations")
|
||||
|
||||
containerName := "client-container"
|
||||
ginkgo.By("Creating the pod")
|
||||
pod = podClient.CreateSync(pod)
|
||||
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n"))
|
||||
|
||||
//modify annotations
|
||||
podClient.Update(podName, func(pod *v1.Pod) {
|
||||
pod.Annotations["builder"] = "foo"
|
||||
})
|
||||
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n"))
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: DownwardAPI volume, CPU limits
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU limits. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("2\n"),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: DownwardAPI volume, memory limits
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory limits. The container runtime MUST be able to access memory limits from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("67108864\n"),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: DownwardAPI volume, CPU request
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU request. The container runtime MUST be able to access CPU request from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("1\n"),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: DownwardAPI volume, memory request
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory request. The container runtime MUST be able to access memory request from the specified path on the mounted volume.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("33554432\n"),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: DownwardAPI volume, CPU limit, default node allocatable
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the CPU limits. CPU limits is not specified for the container. The container runtime MUST be able to access CPU limits from the specified path on the mounted volume and the value MUST be default node allocatable.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
|
||||
|
||||
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: DownwardAPI volume, memory limit, default node allocatable
|
||||
Description: A Pod is configured with DownwardAPIVolumeSource and DownwardAPIVolumeFiles contains a item for the memory limits. memory limits is not specified for the container. The container runtime MUST be able to access memory limits from the specified path on the mounted volume and the value MUST be default node allocatable.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
|
||||
|
||||
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
|
||||
})
|
||||
})
|
||||
|
||||
func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *v1.Pod {
|
||||
pod := downwardAPIVolumeBasePod(name, nil, nil)
|
||||
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "client-container",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--file_mode=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc/podinfo",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if itemMode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.DownwardAPI.Items[0].Mode = itemMode
|
||||
}
|
||||
if defaultMode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.DownwardAPI.DefaultMode = defaultMode
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func downwardAPIVolumePodForSimpleTest(name string, filePath string) *v1.Pod {
|
||||
pod := downwardAPIVolumeBasePod(name, nil, nil)
|
||||
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "client-container",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--file_content=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc/podinfo",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func downwardAPIVolumeForContainerResources(name string, filePath string) *v1.Pod {
|
||||
pod := downwardAPIVolumeBasePod(name, nil, nil)
|
||||
pod.Spec.Containers = downwardAPIVolumeBaseContainers("client-container", filePath)
|
||||
return pod
|
||||
}
|
||||
|
||||
func downwardAPIVolumeForDefaultContainerResources(name string, filePath string) *v1.Pod {
|
||||
pod := downwardAPIVolumeBasePod(name, nil, nil)
|
||||
pod.Spec.Containers = downwardAPIVolumeDefaultBaseContainer("client-container", filePath)
|
||||
return pod
|
||||
}
|
||||
|
||||
func downwardAPIVolumeBaseContainers(name, filePath string) []v1.Container {
|
||||
return []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--file_content=" + filePath},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("250m"),
|
||||
v1.ResourceMemory: resource.MustParse("32Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("1250m"),
|
||||
v1.ResourceMemory: resource.MustParse("64Mi"),
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc/podinfo",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []v1.Container {
|
||||
return []v1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--file_content=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc/podinfo",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *v1.Pod {
|
||||
pod := downwardAPIVolumeBasePod(name, labels, annotations)
|
||||
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "client-container",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc/podinfo",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations, pod)
|
||||
return pod
|
||||
}
|
||||
|
||||
func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "podinfo",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
DownwardAPI: &v1.DownwardAPIVolumeSource{
|
||||
Items: []v1.DownwardAPIVolumeFile{
|
||||
{
|
||||
Path: "podname",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "cpu_limit",
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
ContainerName: "client-container",
|
||||
Resource: "limits.cpu",
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "cpu_request",
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
ContainerName: "client-container",
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "memory_limit",
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
ContainerName: "client-container",
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "memory_request",
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
ContainerName: "client-container",
|
||||
Resource: "requests.memory",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations map[string]string, pod *v1.Pod) {
|
||||
if len(labels) > 0 {
|
||||
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
|
||||
Path: "labels",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.labels",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if len(annotations) > 0 {
|
||||
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
|
||||
Path: "annotations",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.annotations",
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: add test-webserver example as pointed out in https://github.com/kubernetes/kubernetes/pull/5093#discussion-diff-37606771
|
||||
639
test/e2e/common/storage/empty_dir.go
Normal file
639
test/e2e/common/storage/empty_dir.go
Normal file
@@ -0,0 +1,639 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
)
|
||||
|
||||
const (
|
||||
volumePath = "/test-volume"
|
||||
)
|
||||
|
||||
var (
|
||||
nonRootUID = int64(1001)
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("EmptyDir volumes", func() {
|
||||
f := framework.NewDefaultFramework("emptydir")
|
||||
|
||||
ginkgo.Context("when FSGroup is specified [LinuxOnly] [NodeFeature:FSGroup]", func() {
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
// Windows does not support the FSGroup SecurityContext option.
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
})
|
||||
|
||||
ginkgo.It("new files should be created with FSGroup ownership when container is root", func() {
|
||||
doTestSetgidFSGroup(f, 0, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
ginkgo.It("new files should be created with FSGroup ownership when container is non-root", func() {
|
||||
doTestSetgidFSGroup(f, nonRootUID, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
ginkgo.It("nonexistent volume subPath should have the correct mode and owner using FSGroup", func() {
|
||||
doTestSubPathFSGroup(f, nonRootUID, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
ginkgo.It("files with FSGroup ownership should support (root,0644,tmpfs)", func() {
|
||||
doTest0644FSGroup(f, 0, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
ginkgo.It("volume on default medium should have the correct mode using FSGroup", func() {
|
||||
doTestVolumeModeFSGroup(f, 0, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
ginkgo.It("volume on tmpfs should have the correct mode using FSGroup", func() {
|
||||
doTestVolumeModeFSGroup(f, 0, v1.StorageMediumMemory)
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode default
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or the medium = 'Memory'.
|
||||
*/
|
||||
framework.ConformanceIt("volume on tmpfs should have the correct mode [LinuxOnly] [NodeConformance]", func() {
|
||||
doTestVolumeMode(f, 0, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode 0644
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func() {
|
||||
doTest0644(f, 0, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode 0666
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func() {
|
||||
doTest0666(f, 0, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode 0777
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func() {
|
||||
doTest0777(f, 0, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode 0644, non-root user
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0644,tmpfs) [LinuxOnly] [NodeConformance]", func() {
|
||||
doTest0644(f, nonRootUID, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode 0666,, non-root user
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0666,tmpfs) [LinuxOnly] [NodeConformance]", func() {
|
||||
doTest0666(f, nonRootUID, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium memory, volume mode 0777, non-root user
|
||||
Description: A Pod created with an 'emptyDir' Volume and 'medium' as 'Memory', the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID, or the medium = 'Memory'.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0777,tmpfs) [LinuxOnly] [NodeConformance]", func() {
|
||||
doTest0777(f, nonRootUID, v1.StorageMediumMemory)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium default, volume mode default
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("volume on default medium should have the correct mode [LinuxOnly] [NodeConformance]", func() {
|
||||
doTestVolumeMode(f, 0, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium default, volume mode 0644
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0644,default) [LinuxOnly] [NodeConformance]", func() {
|
||||
doTest0644(f, 0, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium default, volume mode 0666
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0666,default) [LinuxOnly] [NodeConformance]", func() {
|
||||
doTest0666(f, 0, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium default, volume mode 0777
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. The volume MUST have mode set as -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
|
||||
*/
|
||||
framework.ConformanceIt("should support (root,0777,default) [LinuxOnly] [NodeConformance]", func() {
|
||||
doTest0777(f, 0, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium default, volume mode 0644
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0644. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-r--r-- and mount type set to tmpfs and the contents MUST be readable.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0644,default) [LinuxOnly] [NodeConformance]", func() {
|
||||
doTest0644(f, nonRootUID, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium default, volume mode 0666
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0666. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rw-rw-rw- and mount type set to tmpfs and the contents MUST be readable.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0666,default) [LinuxOnly] [NodeConformance]", func() {
|
||||
doTest0666(f, nonRootUID, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: EmptyDir, medium default, volume mode 0777
|
||||
Description: A Pod created with an 'emptyDir' Volume, the volume mode set to 0777. Volume is mounted into the container where container is run as a non-root user. The volume MUST have mode -rwxrwxrwx and mount type set to tmpfs and the contents MUST be readable.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
|
||||
*/
|
||||
framework.ConformanceIt("should support (non-root,0777,default) [LinuxOnly] [NodeConformance]", func() {
|
||||
doTest0777(f, nonRootUID, v1.StorageMediumDefault)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.15
|
||||
Testname: EmptyDir, Shared volumes between containers
|
||||
Description: A Pod created with an 'emptyDir' Volume, should share volumes between the containeres in the pod. The two busybox image containers shoud share the volumes mounted to the pod.
|
||||
The main container shoud wait until the sub container drops a file, and main container acess the shared data.
|
||||
*/
|
||||
framework.ConformanceIt("pod should support shared volumes between containers", func() {
|
||||
var (
|
||||
volumeName = "shared-data"
|
||||
busyBoxMainVolumeMountPath = "/usr/share/volumeshare"
|
||||
busyBoxSubVolumeMountPath = "/pod-data"
|
||||
busyBoxMainVolumeFilePath = fmt.Sprintf("%s/shareddata.txt", busyBoxMainVolumeMountPath)
|
||||
busyBoxSubVolumeFilePath = fmt.Sprintf("%s/shareddata.txt", busyBoxSubVolumeMountPath)
|
||||
message = "Hello from the busy-box sub-container"
|
||||
busyBoxMainContainerName = "busybox-main-container"
|
||||
busyBoxSubContainerName = "busybox-sub-container"
|
||||
resultString = ""
|
||||
deletionGracePeriod = int64(0)
|
||||
)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-sharedvolume-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: new(v1.EmptyDirVolumeSource),
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: busyBoxMainContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "sleep 100000"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: busyBoxMainVolumeMountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: busyBoxSubContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", fmt.Sprintf("echo %s > %s", message, busyBoxSubVolumeFilePath)},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: busyBoxSubVolumeMountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: &deletionGracePeriod,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.By("Creating Pod")
|
||||
f.PodClient().Create(pod)
|
||||
e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("Reading file content from the nginx-container")
|
||||
result := f.ExecShellInContainer(pod.Name, busyBoxMainContainerName, fmt.Sprintf("cat %s", busyBoxMainVolumeFilePath))
|
||||
framework.ExpectEqual(result, message, "failed to match expected string %s with %s", message, resultString)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.20
|
||||
Testname: EmptyDir, Memory backed volume is sized to specified limit
|
||||
Description: A Pod created with an 'emptyDir' Volume backed by memory should be sized to user provided value.
|
||||
*/
|
||||
ginkgo.It("pod should support memory backed volumes of specified size", func() {
|
||||
// skip if feature gate is not enabled, this could be elevated to conformance in future if on Linux.
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.SizeMemoryBackedVolumes) {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
volumeName = "shared-data"
|
||||
busyBoxMainVolumeMountPath = "/usr/share/volumeshare"
|
||||
busyBoxMainContainerName = "busybox-main-container"
|
||||
expectedResult = "10240" // equal to 10Mi
|
||||
deletionGracePeriod = int64(0)
|
||||
sizeLimit = resource.MustParse("10Mi")
|
||||
)
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-size-memory-volume-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
Medium: v1.StorageMediumMemory,
|
||||
SizeLimit: &sizeLimit,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: busyBoxMainContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "sleep 100000"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: busyBoxMainVolumeMountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
TerminationGracePeriodSeconds: &deletionGracePeriod,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
var err error
|
||||
ginkgo.By("Creating Pod")
|
||||
pod = f.PodClient().CreateSync(pod)
|
||||
|
||||
ginkgo.By("Waiting for the pod running")
|
||||
err = e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "failed to deploy pod %s", pod.Name)
|
||||
|
||||
ginkgo.By("Getting the pod")
|
||||
pod, err = f.PodClient().Get(context.TODO(), pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "failed to get pod %s", pod.Name)
|
||||
|
||||
ginkgo.By("Reading empty dir size")
|
||||
result := f.ExecShellInContainer(pod.Name, busyBoxMainContainerName, fmt.Sprintf("df | grep %s | awk '{print $2}'", busyBoxMainVolumeMountPath))
|
||||
framework.ExpectEqual(result, expectedResult, "failed to match expected string %s with %s", expectedResult, result)
|
||||
})
|
||||
})
|
||||
|
||||
const (
|
||||
containerName = "test-container"
|
||||
volumeName = "test-volume"
|
||||
)
|
||||
|
||||
func doTestSetgidFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
|
||||
var (
|
||||
filePath = path.Join(volumePath, "test-file")
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(uid, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--new_file_0660=%v", filePath),
|
||||
fmt.Sprintf("--file_perm=%v", filePath),
|
||||
fmt.Sprintf("--file_owner=%v", filePath),
|
||||
}
|
||||
|
||||
fsGroup := int64(123)
|
||||
pod.Spec.SecurityContext.FSGroup = &fsGroup
|
||||
|
||||
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume/test-file\": -rw-rw----",
|
||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||
"owner GID of \"/test-volume/test-file\": 123",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTestSubPathFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
|
||||
var (
|
||||
subPath = "test-sub"
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(uid, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--file_perm=%v", volumePath),
|
||||
fmt.Sprintf("--file_owner=%v", volumePath),
|
||||
fmt.Sprintf("--file_mode=%v", volumePath),
|
||||
}
|
||||
|
||||
pod.Spec.Containers[0].VolumeMounts[0].SubPath = subPath
|
||||
|
||||
fsGroup := int64(123)
|
||||
pod.Spec.SecurityContext.FSGroup = &fsGroup
|
||||
|
||||
msg := fmt.Sprintf("emptydir subpath on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume\": -rwxrwxrwx",
|
||||
"owner UID of \"/test-volume\": 0",
|
||||
"owner GID of \"/test-volume\": 123",
|
||||
"mode of file \"/test-volume\": dgtrwxrwxrwx",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTestVolumeModeFSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
|
||||
var (
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(uid, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--file_perm=%v", volumePath),
|
||||
}
|
||||
|
||||
fsGroup := int64(1001)
|
||||
pod.Spec.SecurityContext.FSGroup = &fsGroup
|
||||
|
||||
msg := fmt.Sprintf("emptydir volume type on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume\": -rwxrwxrwx",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTest0644FSGroup(f *framework.Framework, uid int64, medium v1.StorageMedium) {
|
||||
var (
|
||||
filePath = path.Join(volumePath, "test-file")
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(uid, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--new_file_0644=%v", filePath),
|
||||
fmt.Sprintf("--file_perm=%v", filePath),
|
||||
}
|
||||
|
||||
fsGroup := int64(123)
|
||||
pod.Spec.SecurityContext.FSGroup = &fsGroup
|
||||
|
||||
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume/test-file\": -rw-r--r--",
|
||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTestVolumeMode(f *framework.Framework, uid int64, medium v1.StorageMedium) {
|
||||
var (
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(uid, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--file_perm=%v", volumePath),
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("emptydir volume type on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume\": -rwxrwxrwx",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTest0644(f *framework.Framework, uid int64, medium v1.StorageMedium) {
|
||||
var (
|
||||
filePath = path.Join(volumePath, "test-file")
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(uid, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--new_file_0644=%v", filePath),
|
||||
fmt.Sprintf("--file_perm=%v", filePath),
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume/test-file\": -rw-r--r--",
|
||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTest0666(f *framework.Framework, uid int64, medium v1.StorageMedium) {
|
||||
var (
|
||||
filePath = path.Join(volumePath, "test-file")
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(uid, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--new_file_0666=%v", filePath),
|
||||
fmt.Sprintf("--file_perm=%v", filePath),
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("emptydir 0666 on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume/test-file\": -rw-rw-rw-",
|
||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func doTest0777(f *framework.Framework, uid int64, medium v1.StorageMedium) {
|
||||
var (
|
||||
filePath = path.Join(volumePath, "test-file")
|
||||
source = &v1.EmptyDirVolumeSource{Medium: medium}
|
||||
pod = testPodWithVolume(uid, volumePath, source)
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--new_file_0777=%v", filePath),
|
||||
fmt.Sprintf("--file_perm=%v", filePath),
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("emptydir 0777 on %v", formatMedium(medium))
|
||||
out := []string{
|
||||
"perms of file \"/test-volume/test-file\": -rwxrwxrwx",
|
||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||
}
|
||||
if medium == v1.StorageMediumMemory {
|
||||
out = append(out, "mount type of \"/test-volume\": tmpfs")
|
||||
}
|
||||
f.TestContainerOutput(msg, pod, 0, out)
|
||||
}
|
||||
|
||||
func formatMedium(medium v1.StorageMedium) string {
|
||||
if medium == v1.StorageMediumMemory {
|
||||
return "tmpfs"
|
||||
}
|
||||
|
||||
return "node default medium"
|
||||
}
|
||||
|
||||
// testPodWithVolume creates a Pod that runs as the given UID and with the given empty dir source mounted at the given path.
|
||||
// If the uid is 0, the Pod will run as its default user (root).
|
||||
func testPodWithVolume(uid int64, path string, source *v1.EmptyDirVolumeSource) *v1.Pod {
|
||||
podName := "pod-" + string(uuid.NewUUID())
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: path,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
Level: "s0",
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: source,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if uid != 0 {
|
||||
pod.Spec.SecurityContext.RunAsUser = &uid
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
24
test/e2e/common/storage/framework.go
Normal file
24
test/e2e/common/storage/framework.go
Normal file
@@ -0,0 +1,24 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import "k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
// SIGDescribe annotates the test with the SIG label.
|
||||
func SIGDescribe(text string, body func()) bool {
|
||||
return framework.KubeDescribe("[sig-storage] "+text, body)
|
||||
}
|
||||
194
test/e2e/common/storage/host_path.go
Normal file
194
test/e2e/common/storage/host_path.go
Normal file
@@ -0,0 +1,194 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
//TODO : Consolidate this code with the code for emptyDir.
|
||||
//This will require some smart.
|
||||
var _ = SIGDescribe("HostPath", func() {
|
||||
f := framework.NewDefaultFramework("hostpath")
|
||||
|
||||
ginkgo.BeforeEach(func() {
|
||||
// TODO permission denied cleanup failures
|
||||
//cleanup before running the test.
|
||||
_ = os.Remove("/tmp/test-file")
|
||||
})
|
||||
|
||||
/*
|
||||
Host path, volume mode default
|
||||
Create a Pod with host volume mounted. The volume mounted MUST be a directory with permissions mode -rwxrwxrwx and that is has the sticky bit (mode flag t) set.
|
||||
This test is marked LinuxOnly since Windows does not support setting the sticky bit (mode flag t).
|
||||
*/
|
||||
ginkgo.It("should give a volume the correct mode [LinuxOnly] [NodeConformance]", func() {
|
||||
source := &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
}
|
||||
pod := testPodWithHostVol(volumePath, source, false)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--fs_type=%v", volumePath),
|
||||
fmt.Sprintf("--file_mode=%v", volumePath),
|
||||
}
|
||||
f.TestContainerOutput("hostPath mode", pod, 0, []string{
|
||||
"mode of file \"/test-volume\": dtrwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir
|
||||
})
|
||||
})
|
||||
|
||||
// This test requires mounting a folder into a container with write privileges.
|
||||
ginkgo.It("should support r/w [NodeConformance]", func() {
|
||||
filePath := path.Join(volumePath, "test-file")
|
||||
retryDuration := 180
|
||||
source := &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
}
|
||||
// we can't spawn privileged containers on Windows, nor do we need to.
|
||||
privileged := !framework.NodeOSDistroIs("windows")
|
||||
pod := testPodWithHostVol(volumePath, source, privileged)
|
||||
|
||||
pod.Spec.Containers[0].Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--new_file_0644=%v", filePath),
|
||||
fmt.Sprintf("--file_mode=%v", filePath),
|
||||
}
|
||||
|
||||
pod.Spec.Containers[1].Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--file_content_in_loop=%v", filePath),
|
||||
fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||
}
|
||||
//Read the content of the file with the second container to
|
||||
//verify volumes being shared properly among containers within the pod.
|
||||
f.TestContainerOutput("hostPath r/w", pod, 1, []string{
|
||||
"content of file \"/test-volume/test-file\": mount-tester new file",
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("should support subPath [NodeConformance]", func() {
|
||||
subPath := "sub-path"
|
||||
fileName := "test-file"
|
||||
retryDuration := 180
|
||||
|
||||
filePathInWriter := path.Join(volumePath, fileName)
|
||||
filePathInReader := path.Join(volumePath, subPath, fileName)
|
||||
|
||||
source := &v1.HostPathVolumeSource{
|
||||
Path: "/tmp",
|
||||
}
|
||||
|
||||
// we can't spawn privileged containers on Windows, nor do we need to.
|
||||
privileged := !framework.NodeOSDistroIs("windows")
|
||||
pod := testPodWithHostVol(volumePath, source, privileged)
|
||||
|
||||
// Write the file in the subPath from container 0
|
||||
container := &pod.Spec.Containers[0]
|
||||
container.VolumeMounts[0].SubPath = subPath
|
||||
container.Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--new_file_0644=%v", filePathInWriter),
|
||||
fmt.Sprintf("--file_mode=%v", filePathInWriter),
|
||||
}
|
||||
|
||||
// Read it from outside the subPath from container 1
|
||||
pod.Spec.Containers[1].Args = []string{
|
||||
"mounttest",
|
||||
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
|
||||
fmt.Sprintf("--retry_time=%d", retryDuration),
|
||||
}
|
||||
|
||||
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
|
||||
"content of file \"" + filePathInReader + "\": mount-tester new file",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
//These constants are borrowed from the other test.
|
||||
//const volumeName = "test-volume"
|
||||
const containerName1 = "test-container-1"
|
||||
const containerName2 = "test-container-2"
|
||||
|
||||
func mount(source *v1.HostPathVolumeSource) []v1.Volume {
|
||||
return []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: source,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
//TODO: To merge this with the emptyDir tests, we can make source a lambda.
|
||||
func testPodWithHostVol(path string, source *v1.HostPathVolumeSource, privileged bool) *v1.Pod {
|
||||
podName := "pod-host-path-test"
|
||||
|
||||
return &v1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: containerName1,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: path,
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: containerName2,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: path,
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Volumes: mount(source),
|
||||
},
|
||||
}
|
||||
}
|
||||
148
test/e2e/common/storage/projected_combined.go
Normal file
148
test/e2e/common/storage/projected_combined.go
Normal file
@@ -0,0 +1,148 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Projected combined", func() {
|
||||
f := framework.NewDefaultFramework("projected")
|
||||
|
||||
// Test multiple projections
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, multiple projections
|
||||
Description: A Pod is created with a projected volume source for secrets, configMap and downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the secrets, configMap values and the cpu and memory limits as well as cpu and memory requests from the mounted DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should project all components that make up the projection API [Projection][NodeConformance]", func() {
|
||||
var err error
|
||||
podName := "projected-volume-" + string(uuid.NewUUID())
|
||||
secretName := "secret-projected-all-test-volume-" + string(uuid.NewUUID())
|
||||
configMapName := "configmap-projected-all-test-volume-" + string(uuid.NewUUID())
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: configMapName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"configmap-data": "configmap-value-1",
|
||||
},
|
||||
}
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: secretName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"secret-data": []byte("secret-value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := projectedAllVolumeBasePod(podName, secretName, configMapName, nil, nil)
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "projected-all-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.BusyBox),
|
||||
Command: []string{"sh", "-c", "cat /all/podname && cat /all/secret-data && cat /all/configmap-data"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/all",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
f.TestContainerOutput("Check all projections for projected volume plugin", pod, 0, []string{
|
||||
podName,
|
||||
"secret-value-1",
|
||||
"configmap-value-1",
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func projectedAllVolumeBasePod(podName string, secretName string, configMapName string, labels, annotations map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "podinfo",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
DownwardAPI: &v1.DownwardAPIProjection{
|
||||
Items: []v1.DownwardAPIVolumeFile{
|
||||
{
|
||||
Path: "podname",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: secretName,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: configMapName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
591
test/e2e/common/storage/projected_configmap.go
Normal file
591
test/e2e/common/storage/projected_configmap.go
Normal file
@@ -0,0 +1,591 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Projected configMap", func() {
|
||||
f := framework.NewDefaultFramework("projected")
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, ConfigMap, volume mode default
|
||||
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
|
||||
doProjectedConfigMapE2EWithoutMappings(f, false, 0, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, ConfigMap, volume mode 0400
|
||||
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with permission mode set to 0400. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -r--------.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func() {
|
||||
defaultMode := int32(0400)
|
||||
doProjectedConfigMapE2EWithoutMappings(f, false, 0, &defaultMode)
|
||||
})
|
||||
|
||||
ginkgo.It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeFeature:FSGroup]", func() {
|
||||
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
doProjectedConfigMapE2EWithoutMappings(f, true, 1001, &defaultMode)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, ConfigMap, non-root user
|
||||
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. Pod MUST be able to read the content of the ConfigMap successfully and the mode on the volume MUST be -rw-r--r--.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root [NodeConformance]", func() {
|
||||
doProjectedConfigMapE2EWithoutMappings(f, true, 0, nil)
|
||||
})
|
||||
|
||||
ginkgo.It("should be consumable from pods in volume as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func() {
|
||||
// Windows does not support RunAsUser / FSGroup SecurityContext options.
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
doProjectedConfigMapE2EWithoutMappings(f, true, 1001, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, ConfigMap, mapped
|
||||
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with default permission mode. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -rw-r--r--.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
|
||||
doProjectedConfigMapE2EWithMappings(f, false, 0, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, ConfigMap, mapped, volume mode 0400
|
||||
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap with permission mode set to 0400. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r--r--r--.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item mode set [LinuxOnly] [NodeConformance]", func() {
|
||||
mode := int32(0400)
|
||||
doProjectedConfigMapE2EWithMappings(f, false, 0, &mode)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, ConfigMap, mapped, non-root user
|
||||
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap as non-root user with uid 1000. The ConfigMap is also mapped to a custom path. Pod MUST be able to read the content of the ConfigMap from the custom location successfully and the mode on the volume MUST be -r--r--r--.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings as non-root [NodeConformance]", func() {
|
||||
doProjectedConfigMapE2EWithMappings(f, true, 0, nil)
|
||||
})
|
||||
|
||||
ginkgo.It("should be consumable from pods in volume with mappings as non-root with FSGroup [LinuxOnly] [NodeFeature:FSGroup]", func() {
|
||||
// Windows does not support RunAsUser / FSGroup SecurityContext options.
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
doProjectedConfigMapE2EWithMappings(f, true, 1001, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, ConfigMap, update
|
||||
Description: A Pod is created with projected volume source 'ConfigMap' to store a configMap and performs a create and update to new value. Pod MUST be able to create the configMap with value-1. Pod MUST be able to update the value in the confgiMap to value-2.
|
||||
*/
|
||||
framework.ConformanceIt("updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
|
||||
name := "projected-configmap-test-upd-" + string(uuid.NewUUID())
|
||||
volumeName := "projected-configmap-volume"
|
||||
volumeMountPath := "/etc/projected-configmap-volume"
|
||||
configMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating projection with configMap that has name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := createProjectedConfigMapMounttestPod(f.Namespace.Name, volumeName, name, volumeMountPath,
|
||||
"--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volume/data-1")
|
||||
|
||||
ginkgo.By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
}
|
||||
|
||||
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating configmap %v", configMap.Name))
|
||||
configMap.ResourceVersion = "" // to force update
|
||||
configMap.Data["data-1"] = "value-2"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), configMap, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By("waiting to observe update in volume")
|
||||
gomega.Eventually(pollLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-2"))
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, ConfigMap, create, update and delete
|
||||
Description: Create a Pod with three containers with ConfigMaps namely a create, update and delete container. Create Container when started MUST not have configMap, update and delete containers MUST be created with a ConfigMap value as 'value-1'. Create a configMap in the create container, the Pod MUST be able to read the configMap from the create container. Update the configMap in the update container, Pod MUST be able to read the updated configMap value. Delete the configMap in the delete container. Pod MUST fail to read the configMap from the delete container.
|
||||
*/
|
||||
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
trueVal := true
|
||||
volumeMountPath := "/etc/projected-configmap-volumes"
|
||||
|
||||
deleteName := "cm-test-opt-del-" + string(uuid.NewUUID())
|
||||
deleteContainerName := "delcm-volume-test"
|
||||
deleteVolumeName := "deletecm-volume"
|
||||
deleteConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: deleteName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
updateName := "cm-test-opt-upd-" + string(uuid.NewUUID())
|
||||
updateContainerName := "updcm-volume-test"
|
||||
updateVolumeName := "updatecm-volume"
|
||||
updateConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: updateName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
createName := "cm-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "createcm-volume-test"
|
||||
createVolumeName := "createcm-volume"
|
||||
createConfigMap := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: createName,
|
||||
},
|
||||
Data: map[string]string{
|
||||
"data-1": "value-1",
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", deleteConfigMap.Name))
|
||||
var err error
|
||||
if deleteConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), deleteConfigMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", deleteConfigMap.Name, err)
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", updateConfigMap.Name))
|
||||
if updateConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), updateConfigMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", updateConfigMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-projected-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: deleteName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: updateName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: createName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: deleteContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/delete/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "delete"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/update/data-3"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "update"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-configmap-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
ginkgo.By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting configmap %v", deleteConfigMap.Name))
|
||||
err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(context.TODO(), deleteConfigMap.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Failed to delete configmap %q in namespace %q", deleteConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating configmap %v", updateConfigMap.Name))
|
||||
updateConfigMap.ResourceVersion = "" // to force update
|
||||
delete(updateConfigMap.Data, "data-1")
|
||||
updateConfigMap.Data["data-3"] = "value-3"
|
||||
_, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Update(context.TODO(), updateConfigMap, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to update configmap %q in namespace %q", updateConfigMap.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", createConfigMap.Name))
|
||||
if createConfigMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), createConfigMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", createConfigMap.Name, err)
|
||||
}
|
||||
|
||||
ginkgo.By("waiting to observe update in volume")
|
||||
|
||||
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
|
||||
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-configmap-volumes/delete/data-1"))
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, ConfigMap, multiple volume paths
|
||||
Description: A Pod is created with a projected volume source 'ConfigMap' to store a configMap. The configMap is mapped to two different volume mounts. Pod MUST be able to read the content of the configMap successfully from the two volume mounts.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in the same pod [NodeConformance]", func() {
|
||||
var (
|
||||
name = "projected-configmap-test-volume-" + string(uuid.NewUUID())
|
||||
volumeName = "projected-configmap-volume"
|
||||
volumeMountPath = "/etc/projected-configmap-volume"
|
||||
volumeName2 = "projected-configmap-volume-2"
|
||||
volumeMountPath2 = "/etc/projected-configmap-volume-2"
|
||||
configMap = newConfigMap(f, name)
|
||||
)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-projected-configmaps-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "projected-configmap-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--file_content=/etc/projected-configmap-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
MountPath: volumeMountPath2,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
f.TestContainerOutput("consume configMaps", pod, 0, []string{
|
||||
"content of file \"/etc/projected-configmap-volume/data-1\": value-1",
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
//The pod is in pending during volume creation until the configMap objects are available
|
||||
//or until mount the configMap volume times out. There is no configMap object defined for the pod, so it should return timout exception unless it is marked optional.
|
||||
//Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to configMap object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/projected-configmap-volumes"
|
||||
pod, err := createNonOptionalConfigMapPod(f, volumeMountPath)
|
||||
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
|
||||
})
|
||||
|
||||
//ConfigMap object defined for the pod, If a key is specified which is not present in the ConfigMap,
|
||||
// the volume setup will error unless it is marked optional, during the pod creation.
|
||||
//Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to the key in the configMap object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/configmap-volumes"
|
||||
pod, err := createNonOptionalConfigMapPodWithConfig(f, volumeMountPath)
|
||||
framework.ExpectError(err, "created pod %q with non-optional configMap in namespace %q", pod.Name, f.Namespace.Name)
|
||||
})
|
||||
})
|
||||
|
||||
func doProjectedConfigMapE2EWithoutMappings(f *framework.Framework, asUser bool, fsGroup int64, defaultMode *int32) {
|
||||
groupID := int64(fsGroup)
|
||||
|
||||
var (
|
||||
name = "projected-configmap-test-volume-" + string(uuid.NewUUID())
|
||||
volumeName = "projected-configmap-volume"
|
||||
volumeMountPath = "/etc/projected-configmap-volume"
|
||||
configMap = newConfigMap(f, name)
|
||||
)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := createProjectedConfigMapMounttestPod(f.Namespace.Name, volumeName, name, volumeMountPath,
|
||||
"--file_content=/etc/projected-configmap-volume/data-1", "--file_mode=/etc/projected-configmap-volume/data-1")
|
||||
|
||||
if asUser {
|
||||
setPodNonRootUser(pod)
|
||||
}
|
||||
|
||||
if groupID != 0 {
|
||||
pod.Spec.SecurityContext.FSGroup = &groupID
|
||||
}
|
||||
|
||||
if defaultMode != nil {
|
||||
//pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].ConfigMap.DefaultMode = defaultMode
|
||||
pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = defaultMode
|
||||
}
|
||||
|
||||
fileModeRegexp := getFileModeRegex("/etc/projected-configmap-volume/data-1", defaultMode)
|
||||
output := []string{
|
||||
"content of file \"/etc/projected-configmap-volume/data-1\": value-1",
|
||||
fileModeRegexp,
|
||||
}
|
||||
f.TestContainerOutputRegexp("consume configMaps", pod, 0, output)
|
||||
}
|
||||
|
||||
func doProjectedConfigMapE2EWithMappings(f *framework.Framework, asUser bool, fsGroup int64, itemMode *int32) {
|
||||
groupID := int64(fsGroup)
|
||||
|
||||
var (
|
||||
name = "projected-configmap-test-volume-map-" + string(uuid.NewUUID())
|
||||
volumeName = "projected-configmap-volume"
|
||||
volumeMountPath = "/etc/projected-configmap-volume"
|
||||
configMap = newConfigMap(f, name)
|
||||
)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
|
||||
|
||||
var err error
|
||||
if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), configMap, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
|
||||
}
|
||||
|
||||
pod := createProjectedConfigMapMounttestPod(f.Namespace.Name, volumeName, name, volumeMountPath,
|
||||
"--file_content=/etc/projected-configmap-volume/path/to/data-2", "--file_mode=/etc/projected-configmap-volume/path/to/data-2")
|
||||
pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].ConfigMap.Items = []v1.KeyToPath{
|
||||
{
|
||||
Key: "data-2",
|
||||
Path: "path/to/data-2",
|
||||
},
|
||||
}
|
||||
|
||||
if asUser {
|
||||
setPodNonRootUser(pod)
|
||||
}
|
||||
|
||||
if groupID != 0 {
|
||||
pod.Spec.SecurityContext.FSGroup = &groupID
|
||||
}
|
||||
|
||||
if itemMode != nil {
|
||||
//pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items[0].Mode = itemMode
|
||||
pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = itemMode
|
||||
}
|
||||
|
||||
// Just check file mode if fsGroup is not set. If fsGroup is set, the
|
||||
// final mode is adjusted and we are not testing that case.
|
||||
output := []string{
|
||||
"content of file \"/etc/projected-configmap-volume/path/to/data-2\": value-2",
|
||||
}
|
||||
if fsGroup == 0 {
|
||||
fileModeRegexp := getFileModeRegex("/etc/projected-configmap-volume/path/to/data-2", itemMode)
|
||||
output = append(output, fileModeRegexp)
|
||||
}
|
||||
f.TestContainerOutputRegexp("consume configMaps", pod, 0, output)
|
||||
}
|
||||
|
||||
func createProjectedConfigMapMounttestPod(namespace, volumeName, referenceName, mountPath string, mounttestArgs ...string) *v1.Pod {
|
||||
volumes := []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
ConfigMap: &v1.ConfigMapProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: referenceName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
podName := "pod-projected-configmaps-" + string(uuid.NewUUID())
|
||||
mounttestArgs = append([]string{"mounttest"}, mounttestArgs...)
|
||||
pod := e2epod.NewAgnhostPod(namespace, podName, volumes, createMounts(volumeName, mountPath, true), nil, mounttestArgs...)
|
||||
pod.Spec.RestartPolicy = v1.RestartPolicyNever
|
||||
return pod
|
||||
}
|
||||
401
test/e2e/common/storage/projected_downwardapi.go
Normal file
401
test/e2e/common/storage/projected_downwardapi.go
Normal file
@@ -0,0 +1,401 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Projected downwardAPI", func() {
|
||||
f := framework.NewDefaultFramework("projected")
|
||||
|
||||
// How long to wait for a log pod to be displayed
|
||||
const podLogTimeout = 2 * time.Minute
|
||||
var podClient *framework.PodClient
|
||||
ginkgo.BeforeEach(func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, DownwardAPI, pod name
|
||||
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide podname only [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("%s\n", podName),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, DownwardAPI, volume mode 0400
|
||||
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r--------.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should set DefaultMode on files [LinuxOnly] [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
defaultMode := int32(0400)
|
||||
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", nil, &defaultMode)
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podinfo/podname\": -r--------",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, DownwardAPI, volume mode 0400
|
||||
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The default mode for the volume mount is set to 0400. Pod MUST be able to read the pod name from the mounted DownwardAPIVolumeFiles and the volume mode must be -r--------.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should set mode on item file [LinuxOnly] [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
mode := int32(0400)
|
||||
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podinfo/podname\": -r--------",
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("should provide podname as non-root with fsgroup [LinuxOnly] [NodeFeature:FSGroup]", func() {
|
||||
// Windows does not support RunAsUser / FSGroup SecurityContext options.
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
podName := "metadata-volume-" + string(uuid.NewUUID())
|
||||
gid := int64(1234)
|
||||
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podinfo/podname")
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: &gid,
|
||||
}
|
||||
setPodNonRootUser(pod)
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("%s\n", podName),
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("should provide podname as non-root with fsgroup and defaultMode [LinuxOnly] [NodeFeature:FSGroup]", func() {
|
||||
// Windows does not support RunAsUser / FSGroup SecurityContext options, and it does not support setting file permissions.
|
||||
e2eskipper.SkipIfNodeOSDistroIs("windows")
|
||||
podName := "metadata-volume-" + string(uuid.NewUUID())
|
||||
gid := int64(1234)
|
||||
mode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
pod := projectedDownwardAPIVolumePodForModeTest(podName, "/etc/podinfo/podname", &mode, nil)
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: &gid,
|
||||
}
|
||||
setPodNonRootUser(pod)
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
"mode of file \"/etc/podinfo/podname\": -r--r-----",
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, DownwardAPI, update labels
|
||||
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and label items. Pod MUST be able to read the labels from the mounted DownwardAPIVolumeFiles. Labels are then updated. Pod MUST be able to read the updated values for the Labels.
|
||||
*/
|
||||
framework.ConformanceIt("should update labels on modification [NodeConformance]", func() {
|
||||
labels := map[string]string{}
|
||||
labels["key1"] = "value1"
|
||||
labels["key2"] = "value2"
|
||||
|
||||
podName := "labelsupdate" + string(uuid.NewUUID())
|
||||
pod := projectedDownwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/podinfo/labels")
|
||||
containerName := "client-container"
|
||||
ginkgo.By("Creating the pod")
|
||||
podClient.CreateSync(pod)
|
||||
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key1=\"value1\"\n"))
|
||||
|
||||
//modify labels
|
||||
podClient.Update(podName, func(pod *v1.Pod) {
|
||||
pod.Labels["key3"] = "value3"
|
||||
})
|
||||
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("key3=\"value3\"\n"))
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, DownwardAPI, update annotation
|
||||
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests and annotation items. Pod MUST be able to read the annotations from the mounted DownwardAPIVolumeFiles. Annotations are then updated. Pod MUST be able to read the updated values for the Annotations.
|
||||
*/
|
||||
framework.ConformanceIt("should update annotations on modification [NodeConformance]", func() {
|
||||
annotations := map[string]string{}
|
||||
annotations["builder"] = "bar"
|
||||
podName := "annotationupdate" + string(uuid.NewUUID())
|
||||
pod := projectedDownwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/podinfo/annotations")
|
||||
|
||||
containerName := "client-container"
|
||||
ginkgo.By("Creating the pod")
|
||||
pod = podClient.CreateSync(pod)
|
||||
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"bar\"\n"))
|
||||
|
||||
//modify annotations
|
||||
podClient.Update(podName, func(pod *v1.Pod) {
|
||||
pod.Annotations["builder"] = "foo"
|
||||
})
|
||||
|
||||
gomega.Eventually(func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
|
||||
},
|
||||
podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("builder=\"foo\"\n"))
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, DownwardAPI, CPU limits
|
||||
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu limits from the mounted DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu limit [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_limit")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("2\n"),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, DownwardAPI, memory limits
|
||||
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory limits from the mounted DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory limit [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_limit")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("67108864\n"),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, DownwardAPI, CPU request
|
||||
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the cpu request from the mounted DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's cpu request [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/cpu_request")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("1\n"),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, DownwardAPI, memory request
|
||||
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. Pod MUST be able to read the memory request from the mounted DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide container's memory request [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForContainerResources(podName, "/etc/podinfo/memory_request")
|
||||
|
||||
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
|
||||
fmt.Sprintf("33554432\n"),
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, DownwardAPI, CPU limit, node allocatable
|
||||
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default cpu limits from the mounted DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (cpu) as default cpu limit if the limit is not set [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/cpu_limit")
|
||||
|
||||
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, DownwardAPI, memory limit, node allocatable
|
||||
Description: A Pod is created with a projected volume source for downwardAPI with pod name, cpu and memory limits and cpu and memory requests. The CPU and memory resources for requests and limits are NOT specified for the container. Pod MUST be able to read the default memory limits from the mounted DownwardAPIVolumeFiles.
|
||||
*/
|
||||
framework.ConformanceIt("should provide node allocatable (memory) as default memory limit if the limit is not set [NodeConformance]", func() {
|
||||
podName := "downwardapi-volume-" + string(uuid.NewUUID())
|
||||
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/podinfo/memory_limit")
|
||||
|
||||
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
|
||||
})
|
||||
})
|
||||
|
||||
func projectedDownwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *v1.Pod {
|
||||
pod := projectedDownwardAPIVolumeBasePod(name, nil, nil)
|
||||
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "client-container",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--file_mode=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc/podinfo",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if itemMode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items[0].Mode = itemMode
|
||||
}
|
||||
if defaultMode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = defaultMode
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func projectedDownwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *v1.Pod {
|
||||
pod := projectedDownwardAPIVolumeBasePod(name, labels, annotations)
|
||||
|
||||
pod.Spec.Containers = []v1.Container{
|
||||
{
|
||||
Name: "client-container",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", "--retry_time=1200", "--file_content_in_loop=" + filePath},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "podinfo",
|
||||
MountPath: "/etc/podinfo",
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
applyLabelsAndAnnotationsToProjectedDownwardAPIPod(labels, annotations, pod)
|
||||
return pod
|
||||
}
|
||||
|
||||
func projectedDownwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "podinfo",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
DownwardAPI: &v1.DownwardAPIProjection{
|
||||
Items: []v1.DownwardAPIVolumeFile{
|
||||
{
|
||||
Path: "podname",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "cpu_limit",
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
ContainerName: "client-container",
|
||||
Resource: "limits.cpu",
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "cpu_request",
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
ContainerName: "client-container",
|
||||
Resource: "requests.cpu",
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "memory_limit",
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
ContainerName: "client-container",
|
||||
Resource: "limits.memory",
|
||||
},
|
||||
},
|
||||
{
|
||||
Path: "memory_request",
|
||||
ResourceFieldRef: &v1.ResourceFieldSelector{
|
||||
ContainerName: "client-container",
|
||||
Resource: "requests.memory",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
return pod
|
||||
}
|
||||
|
||||
func applyLabelsAndAnnotationsToProjectedDownwardAPIPod(labels, annotations map[string]string, pod *v1.Pod) {
|
||||
if len(labels) > 0 {
|
||||
pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
|
||||
Path: "labels",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.labels",
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if len(annotations) > 0 {
|
||||
pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
|
||||
Path: "annotations",
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
APIVersion: "v1",
|
||||
FieldPath: "metadata.annotations",
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
583
test/e2e/common/storage/projected_secret.go
Normal file
583
test/e2e/common/storage/projected_secret.go
Normal file
@@ -0,0 +1,583 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Projected secret", func() {
|
||||
f := framework.NewDefaultFramework("projected")
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, Secrets, volume mode default
|
||||
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. Pod MUST be able to read the content of the key successfully and the mode MUST be -rw-r--r-- by default.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
|
||||
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, Secrets, volume mode 0400
|
||||
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with permission mode set to 0x400 on the Pod. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--------.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func() {
|
||||
defaultMode := int32(0400)
|
||||
doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), nil, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Project Volume, Secrets, non-root, custom fsGroup
|
||||
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key. The volume has permission mode set to 0440, fsgroup set to 1001 and user set to non-root uid of 1000. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--r-----.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func() {
|
||||
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
fsGroup := int64(1001)
|
||||
doProjectedSecretE2EWithoutMapping(f, &defaultMode, "projected-secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, Secrets, mapped
|
||||
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with default permission mode. The secret is also mapped to a custom path. Pod MUST be able to read the content of the key successfully and the mode MUST be -r--------on the mapped volume.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
|
||||
doProjectedSecretE2EWithMapping(f, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, Secrets, mapped, volume mode 0400
|
||||
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key with permission mode set to 0400. The secret is also mapped to a specific name. Pod MUST be able to read the content of the key successfully and the mode MUST be -r-------- on the mapped volume.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func() {
|
||||
mode := int32(0400)
|
||||
doProjectedSecretE2EWithMapping(f, &mode)
|
||||
})
|
||||
|
||||
ginkgo.It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
|
||||
var (
|
||||
namespace2 *v1.Namespace
|
||||
err error
|
||||
secret2Name = "projected-secret-test-" + string(uuid.NewUUID())
|
||||
)
|
||||
|
||||
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
|
||||
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
|
||||
}
|
||||
|
||||
secret2 := secretForTest(namespace2.Name, secret2Name)
|
||||
secret2.Data = map[string][]byte{
|
||||
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
|
||||
}
|
||||
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
|
||||
}
|
||||
doProjectedSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, Secrets, mapped, multiple paths
|
||||
Description: A Pod is created with a projected volume source 'secret' to store a secret with a specified key. The secret is mapped to two different volume mounts. Pod MUST be able to read the content of the key successfully from the two volume mounts and the mode MUST be -r-------- on the mapped volumes.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() {
|
||||
// This test ensures that the same secret can be mounted in multiple
|
||||
// volumes in the same pod. This test case exists to prevent
|
||||
// regressions that break this use-case.
|
||||
var (
|
||||
name = "projected-secret-test-" + string(uuid.NewUUID())
|
||||
volumeName = "projected-secret-volume"
|
||||
volumeMountPath = "/etc/projected-secret-volume"
|
||||
volumeName2 = "projected-secret-volume-2"
|
||||
volumeMountPath2 = "/etc/projected-secret-volume-2"
|
||||
secret = secretForTest(f.Namespace.Name, name)
|
||||
)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-projected-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "secret-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{
|
||||
"mounttest",
|
||||
"--file_content=/etc/projected-secret-volume/data-1",
|
||||
"--file_mode=/etc/projected-secret-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
MountPath: volumeMountPath2,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
fileModeRegexp := getFileModeRegex("/etc/projected-secret-volume/data-1", nil)
|
||||
f.TestContainerOutputRegexp("consume secrets", pod, 0, []string{
|
||||
"content of file \"/etc/projected-secret-volume/data-1\": value-1",
|
||||
fileModeRegexp,
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Projected Volume, Secrets, create, update delete
|
||||
Description: Create a Pod with three containers with secrets namely a create, update and delete container. Create Container when started MUST no have a secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
|
||||
*/
|
||||
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
trueVal := true
|
||||
volumeMountPath := "/etc/projected-secret-volumes"
|
||||
|
||||
deleteName := "s-test-opt-del-" + string(uuid.NewUUID())
|
||||
deleteContainerName := "dels-volume-test"
|
||||
deleteVolumeName := "deletes-volume"
|
||||
deleteSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: deleteName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
updateName := "s-test-opt-upd-" + string(uuid.NewUUID())
|
||||
updateContainerName := "upds-volume-test"
|
||||
updateVolumeName := "updates-volume"
|
||||
updateSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: updateName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
createName := "s-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "creates-volume-test"
|
||||
createVolumeName := "creates-volume"
|
||||
createSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: createName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
|
||||
var err error
|
||||
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
|
||||
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-projected-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: deleteName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: updateName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: createName,
|
||||
},
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: deleteContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/delete/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "delete"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/update/data-3"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "update"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/projected-secret-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
ginkgo.By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
|
||||
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
|
||||
updateSecret.ResourceVersion = "" // to force update
|
||||
delete(updateSecret.Data, "data-1")
|
||||
updateSecret.Data["data-3"] = []byte("value-3")
|
||||
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
|
||||
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
|
||||
}
|
||||
|
||||
ginkgo.By("waiting to observe update in volume")
|
||||
|
||||
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
|
||||
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/projected-secret-volumes/delete/data-1"))
|
||||
})
|
||||
|
||||
//The secret is in pending during volume creation until the secret objects are available
|
||||
//or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timeout exception unless it is marked optional.
|
||||
//Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/projected-secret-volumes"
|
||||
podName := "pod-secrets-" + string(uuid.NewUUID())
|
||||
err := createNonOptionalSecretPod(f, volumeMountPath, podName)
|
||||
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
|
||||
})
|
||||
|
||||
//Secret object defined for the pod, If a key is specified which is not present in the secret,
|
||||
// the volume setup will error unless it is marked optional, during the pod creation.
|
||||
//Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/secret-volumes"
|
||||
podName := "pod-secrets-" + string(uuid.NewUUID())
|
||||
err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName)
|
||||
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
|
||||
})
|
||||
})
|
||||
|
||||
func doProjectedSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32,
|
||||
secretName string, fsGroup *int64, uid *int64) {
|
||||
var (
|
||||
volumeName = "projected-secret-volume"
|
||||
volumeMountPath = "/etc/projected-secret-volume"
|
||||
secret = secretForTest(f.Namespace.Name, secretName)
|
||||
)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-projected-secrets-" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: secretName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "projected-secret-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{
|
||||
"mounttest",
|
||||
"--file_content=/etc/projected-secret-volume/data-1",
|
||||
"--file_mode=/etc/projected-secret-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
if defaultMode != nil {
|
||||
//pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].Secret.DefaultMode = defaultMode
|
||||
pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = defaultMode
|
||||
}
|
||||
|
||||
if fsGroup != nil || uid != nil {
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: fsGroup,
|
||||
RunAsUser: uid,
|
||||
}
|
||||
}
|
||||
|
||||
fileModeRegexp := getFileModeRegex("/etc/projected-secret-volume/data-1", defaultMode)
|
||||
expectedOutput := []string{
|
||||
"content of file \"/etc/projected-secret-volume/data-1\": value-1",
|
||||
fileModeRegexp,
|
||||
}
|
||||
|
||||
f.TestContainerOutputRegexp("consume secrets", pod, 0, expectedOutput)
|
||||
}
|
||||
|
||||
func doProjectedSecretE2EWithMapping(f *framework.Framework, mode *int32) {
|
||||
var (
|
||||
name = "projected-secret-test-map-" + string(uuid.NewUUID())
|
||||
volumeName = "projected-secret-volume"
|
||||
volumeMountPath = "/etc/projected-secret-volume"
|
||||
secret = secretForTest(f.Namespace.Name, name)
|
||||
)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating projection with secret that has name %s", secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-projected-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Projected: &v1.ProjectedVolumeSource{
|
||||
Sources: []v1.VolumeProjection{
|
||||
{
|
||||
Secret: &v1.SecretProjection{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "data-1",
|
||||
Path: "new-path-data-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "projected-secret-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{
|
||||
"mounttest",
|
||||
"--file_content=/etc/projected-secret-volume/new-path-data-1",
|
||||
"--file_mode=/etc/projected-secret-volume/new-path-data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
if mode != nil {
|
||||
//pod.Spec.Volumes[0].VolumeSource.Projected.Sources[0].Secret.Items[0].Mode = mode
|
||||
pod.Spec.Volumes[0].VolumeSource.Projected.DefaultMode = mode
|
||||
}
|
||||
|
||||
fileModeRegexp := getFileModeRegex("/etc/projected-secret-volume/new-path-data-1", mode)
|
||||
expectedOutput := []string{
|
||||
"content of file \"/etc/projected-secret-volume/new-path-data-1\": value-1",
|
||||
fileModeRegexp,
|
||||
}
|
||||
|
||||
f.TestContainerOutputRegexp("consume secrets", pod, 0, expectedOutput)
|
||||
}
|
||||
707
test/e2e/common/storage/secrets_volume.go
Normal file
707
test/e2e/common/storage/secrets_volume.go
Normal file
@@ -0,0 +1,707 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Secrets", func() {
|
||||
f := framework.NewDefaultFramework("secrets")
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Secrets Volume, default
|
||||
Description: Create a secret. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -rw-r--r-- by default.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume [NodeConformance]", func() {
|
||||
doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Secrets Volume, volume mode 0400
|
||||
Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r-------- by default.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with defaultMode set [LinuxOnly] [NodeConformance]", func() {
|
||||
defaultMode := int32(0400)
|
||||
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Secrets Volume, volume mode 0440, fsGroup 1001 and uid 1000
|
||||
Description: Create a secret. Create a Pod with secret volume source configured into the container with file mode set to 0x440 as a non-root user with uid 1000 and fsGroup id 1001. Pod MUST be able to read the secret from the mounted volume from the container runtime and the file mode of the secret MUST be -r--r-----by default.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions, or running as UID / GID.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [LinuxOnly] [NodeConformance]", func() {
|
||||
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
|
||||
fsGroup := int64(1001)
|
||||
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &nonRootTestUserID)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Secrets Volume, mapping
|
||||
Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -rw-r--r-- by default.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings [NodeConformance]", func() {
|
||||
doSecretE2EWithMapping(f, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Secrets Volume, mapping, volume mode 0400
|
||||
Description: Create a secret. Create a Pod with secret volume source configured into the container with a custom path and file mode set to 0x400. Pod MUST be able to read the secret from the mounted volume from the specified custom path. The file mode of the secret MUST be -r--r--r--.
|
||||
This test is marked LinuxOnly since Windows does not support setting specific file permissions.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable from pods in volume with mappings and Item Mode set [LinuxOnly] [NodeConformance]", func() {
|
||||
mode := int32(0400)
|
||||
doSecretE2EWithMapping(f, &mode)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.12
|
||||
Testname: Secrets Volume, volume mode default, secret with same name in different namespace
|
||||
Description: Create a secret with same name in two namespaces. Create a Pod with secret volume source configured into the container. Pod MUST be able to read the secrets from the mounted volume from the container runtime and only secrets which are associated with namespace where pod is created. The file mode of the secret MUST be -rw-r--r-- by default.
|
||||
*/
|
||||
framework.ConformanceIt("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [NodeConformance]", func() {
|
||||
var (
|
||||
namespace2 *v1.Namespace
|
||||
err error
|
||||
secret2Name = "secret-test-" + string(uuid.NewUUID())
|
||||
)
|
||||
|
||||
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
|
||||
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
|
||||
}
|
||||
|
||||
secret2 := secretForTest(namespace2.Name, secret2Name)
|
||||
secret2.Data = map[string][]byte{
|
||||
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
|
||||
}
|
||||
if secret2, err = f.ClientSet.CoreV1().Secrets(namespace2.Name).Create(context.TODO(), secret2, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
|
||||
}
|
||||
doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Secrets Volume, mapping multiple volume paths
|
||||
Description: Create a secret. Create a Pod with two secret volume sources configured into the container in to two different custom paths. Pod MUST be able to read the secret from the both the mounted volumes from the two specified custom paths.
|
||||
*/
|
||||
framework.ConformanceIt("should be consumable in multiple volumes in a pod [NodeConformance]", func() {
|
||||
// This test ensures that the same secret can be mounted in multiple
|
||||
// volumes in the same pod. This test case exists to prevent
|
||||
// regressions that break this use-case.
|
||||
var (
|
||||
name = "secret-test-" + string(uuid.NewUUID())
|
||||
volumeName = "secret-volume"
|
||||
volumeMountPath = "/etc/secret-volume"
|
||||
volumeName2 = "secret-volume-2"
|
||||
volumeMountPath2 = "/etc/secret-volume-2"
|
||||
secret = secretForTest(f.Namespace.Name, name)
|
||||
)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "secret-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{
|
||||
"mounttest",
|
||||
"--file_content=/etc/secret-volume/data-1",
|
||||
"--file_mode=/etc/secret-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: true,
|
||||
},
|
||||
{
|
||||
Name: volumeName2,
|
||||
MountPath: volumeMountPath2,
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
fileModeRegexp := getFileModeRegex("/etc/secret-volume/data-1", nil)
|
||||
f.TestContainerOutputRegexp("consume secrets", pod, 0, []string{
|
||||
"content of file \"/etc/secret-volume/data-1\": value-1",
|
||||
fileModeRegexp,
|
||||
})
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.9
|
||||
Testname: Secrets Volume, create, update and delete
|
||||
Description: Create a Pod with three containers with secrets volume sources namely a create, update and delete container. Create Container when started MUST not have secret, update and delete containers MUST be created with a secret value. Create a secret in the create container, the Pod MUST be able to read the secret from the create container. Update the secret in the update container, Pod MUST be able to read the updated secret value. Delete the secret in the delete container. Pod MUST fail to read the secret from the delete container.
|
||||
*/
|
||||
framework.ConformanceIt("optional updates should be reflected in volume [NodeConformance]", func() {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
trueVal := true
|
||||
volumeMountPath := "/etc/secret-volumes"
|
||||
|
||||
deleteName := "s-test-opt-del-" + string(uuid.NewUUID())
|
||||
deleteContainerName := "dels-volume-test"
|
||||
deleteVolumeName := "deletes-volume"
|
||||
deleteSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: deleteName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
updateName := "s-test-opt-upd-" + string(uuid.NewUUID())
|
||||
updateContainerName := "upds-volume-test"
|
||||
updateVolumeName := "updates-volume"
|
||||
updateSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: updateName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
createName := "s-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "creates-volume-test"
|
||||
createVolumeName := "creates-volume"
|
||||
createSecret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: createName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1"),
|
||||
},
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating secret with name %s", deleteSecret.Name))
|
||||
var err error
|
||||
if deleteSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), deleteSecret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", deleteSecret.Name, err)
|
||||
}
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating secret with name %s", updateSecret.Name))
|
||||
if updateSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), updateSecret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", updateSecret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: deleteName,
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: updateName,
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: createName,
|
||||
Optional: &trueVal,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: deleteContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/delete/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: deleteVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "delete"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: updateContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/update/data-3"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: updateVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "update"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
ginkgo.By("Creating the pod")
|
||||
f.PodClient().CreateSync(pod)
|
||||
|
||||
pollCreateLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, createContainerName)
|
||||
}
|
||||
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/create/data-1"))
|
||||
|
||||
pollUpdateLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, updateContainerName)
|
||||
}
|
||||
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/update/data-3"))
|
||||
|
||||
pollDeleteLogs := func() (string, error) {
|
||||
return e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, deleteContainerName)
|
||||
}
|
||||
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Deleting secret %v", deleteSecret.Name))
|
||||
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), deleteSecret.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", deleteSecret.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Updating secret %v", updateSecret.Name))
|
||||
updateSecret.ResourceVersion = "" // to force update
|
||||
delete(updateSecret.Data, "data-1")
|
||||
updateSecret.Data["data-3"] = []byte("value-3")
|
||||
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), updateSecret, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", updateSecret.Name, f.Namespace.Name)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating secret with name %s", createSecret.Name))
|
||||
if createSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), createSecret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", createSecret.Name, err)
|
||||
}
|
||||
|
||||
ginkgo.By("waiting to observe update in volume")
|
||||
|
||||
gomega.Eventually(pollCreateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-1"))
|
||||
gomega.Eventually(pollUpdateLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("value-3"))
|
||||
gomega.Eventually(pollDeleteLogs, podLogTimeout, framework.Poll).Should(gomega.ContainSubstring("Error reading file /etc/secret-volumes/delete/data-1"))
|
||||
})
|
||||
|
||||
/*
|
||||
Release: v1.21
|
||||
Testname: Secrets Volume, immutability
|
||||
Description: Create a secret. Update it's data field, the update MUST succeed.
|
||||
Mark the secret as immutable, the update MUST succeed. Try to update its data, the update MUST fail.
|
||||
Try to mark the secret back as not immutable, the update MUST fail.
|
||||
Try to update the secret`s metadata (labels), the update must succeed.
|
||||
Try to delete the secret, the deletion must succeed.
|
||||
*/
|
||||
framework.ConformanceIt("should be immutable if `immutable` field is set", func() {
|
||||
name := "immutable"
|
||||
secret := secretForTest(f.Namespace.Name, name)
|
||||
|
||||
currentSecret, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to create secret %q in namespace %q", secret.Name, secret.Namespace)
|
||||
|
||||
currentSecret.Data["data-4"] = []byte("value-4\n")
|
||||
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace)
|
||||
|
||||
// Mark secret as immutable.
|
||||
trueVal := true
|
||||
currentSecret.Immutable = &trueVal
|
||||
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to mark secret %q in namespace %q as immutable", secret.Name, secret.Namespace)
|
||||
|
||||
// Ensure data can't be changed now.
|
||||
currentSecret.Data["data-5"] = []byte("value-5\n")
|
||||
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{})
|
||||
framework.ExpectEqual(apierrors.IsInvalid(err), true)
|
||||
|
||||
// Ensure secret can't be switched from immutable to mutable.
|
||||
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace)
|
||||
framework.ExpectEqual(*currentSecret.Immutable, true)
|
||||
|
||||
falseVal := false
|
||||
currentSecret.Immutable = &falseVal
|
||||
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{})
|
||||
framework.ExpectEqual(apierrors.IsInvalid(err), true)
|
||||
|
||||
// Ensure that metadata can be changed.
|
||||
currentSecret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "Failed to get secret %q in namespace %q", secret.Name, secret.Namespace)
|
||||
currentSecret.Labels = map[string]string{"label1": "value1"}
|
||||
_, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Update(context.TODO(), currentSecret, metav1.UpdateOptions{})
|
||||
framework.ExpectNoError(err, "Failed to update secret %q in namespace %q", secret.Name, secret.Namespace)
|
||||
|
||||
// Ensure that immutable secret can be deleted.
|
||||
err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Failed to delete secret %q in namespace %q", secret.Name, secret.Namespace)
|
||||
})
|
||||
|
||||
// The secret is in pending during volume creation until the secret objects are available
|
||||
// or until mount the secret volume times out. There is no secret object defined for the pod, so it should return timout exception unless it is marked optional.
|
||||
// Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to secret object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/secret-volumes"
|
||||
podName := "pod-secrets-" + string(uuid.NewUUID())
|
||||
err := createNonOptionalSecretPod(f, volumeMountPath, podName)
|
||||
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
|
||||
})
|
||||
|
||||
// Secret object defined for the pod, If a key is specified which is not present in the secret,
|
||||
// the volume setup will error unless it is marked optional, during the pod creation.
|
||||
// Slow (~5 mins)
|
||||
ginkgo.It("Should fail non-optional pod creation due to the key in the secret object does not exist [Slow]", func() {
|
||||
volumeMountPath := "/etc/secret-volumes"
|
||||
podName := "pod-secrets-" + string(uuid.NewUUID())
|
||||
err := createNonOptionalSecretPodWithSecret(f, volumeMountPath, podName)
|
||||
framework.ExpectError(err, "created pod %q with non-optional secret in namespace %q", podName, f.Namespace.Name)
|
||||
})
|
||||
})
|
||||
|
||||
func secretForTest(namespace, name string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"data-1": []byte("value-1\n"),
|
||||
"data-2": []byte("value-2\n"),
|
||||
"data-3": []byte("value-3\n"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secretName string,
|
||||
fsGroup *int64, uid *int64) {
|
||||
var (
|
||||
volumeName = "secret-volume"
|
||||
volumeMountPath = "/etc/secret-volume"
|
||||
secret = secretForTest(f.Namespace.Name, secretName)
|
||||
)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
Namespace: f.Namespace.Name,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: secretName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "secret-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{
|
||||
"mounttest",
|
||||
"--file_content=/etc/secret-volume/data-1",
|
||||
"--file_mode=/etc/secret-volume/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
if defaultMode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.Secret.DefaultMode = defaultMode
|
||||
}
|
||||
|
||||
if fsGroup != nil || uid != nil {
|
||||
pod.Spec.SecurityContext = &v1.PodSecurityContext{
|
||||
FSGroup: fsGroup,
|
||||
RunAsUser: uid,
|
||||
}
|
||||
}
|
||||
|
||||
fileModeRegexp := getFileModeRegex("/etc/secret-volume/data-1", defaultMode)
|
||||
expectedOutput := []string{
|
||||
"content of file \"/etc/secret-volume/data-1\": value-1",
|
||||
fileModeRegexp,
|
||||
}
|
||||
|
||||
f.TestContainerOutputRegexp("consume secrets", pod, 0, expectedOutput)
|
||||
}
|
||||
|
||||
func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
|
||||
var (
|
||||
name = "secret-test-map-" + string(uuid.NewUUID())
|
||||
volumeName = "secret-volume"
|
||||
volumeMountPath = "/etc/secret-volume"
|
||||
secret = secretForTest(f.Namespace.Name, name)
|
||||
)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-secrets-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: volumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: name,
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "data-1",
|
||||
Path: "new-path-data-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "secret-volume-test",
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{
|
||||
"mounttest",
|
||||
"--file_content=/etc/secret-volume/new-path-data-1",
|
||||
"--file_mode=/etc/secret-volume/new-path-data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
|
||||
if mode != nil {
|
||||
pod.Spec.Volumes[0].VolumeSource.Secret.Items[0].Mode = mode
|
||||
}
|
||||
|
||||
fileModeRegexp := getFileModeRegex("/etc/secret-volume/new-path-data-1", mode)
|
||||
expectedOutput := []string{
|
||||
"content of file \"/etc/secret-volume/new-path-data-1\": value-1",
|
||||
fileModeRegexp,
|
||||
}
|
||||
|
||||
f.TestContainerOutputRegexp("consume secrets", pod, 0, expectedOutput)
|
||||
}
|
||||
|
||||
func createNonOptionalSecretPod(f *framework.Framework, volumeMountPath, podName string) error {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
falseValue := false
|
||||
|
||||
createName := "s-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "creates-volume-test"
|
||||
createVolumeName := "creates-volume"
|
||||
|
||||
// creating a pod without secret object created, by mentioning the secret volume source reference name
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: createName,
|
||||
Optional: &falseValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
ginkgo.By("Creating the pod")
|
||||
pod = f.PodClient().Create(pod)
|
||||
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
}
|
||||
|
||||
func createNonOptionalSecretPodWithSecret(f *framework.Framework, volumeMountPath, podName string) error {
|
||||
podLogTimeout := e2epod.GetPodSecretUpdateTimeout(f.ClientSet)
|
||||
containerTimeoutArg := fmt.Sprintf("--retry_time=%v", int(podLogTimeout.Seconds()))
|
||||
falseValue := false
|
||||
|
||||
createName := "s-test-opt-create-" + string(uuid.NewUUID())
|
||||
createContainerName := "creates-volume-test"
|
||||
createVolumeName := "creates-volume"
|
||||
|
||||
secret := secretForTest(f.Namespace.Name, createName)
|
||||
|
||||
ginkgo.By(fmt.Sprintf("Creating secret with name %s", secret.Name))
|
||||
var err error
|
||||
if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(context.TODO(), secret, metav1.CreateOptions{}); err != nil {
|
||||
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
|
||||
}
|
||||
// creating a pod with secret object, with the key which is not present in secret object.
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: createName,
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: "data_4",
|
||||
Path: "value-4\n",
|
||||
},
|
||||
},
|
||||
Optional: &falseValue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: createContainerName,
|
||||
Image: imageutils.GetE2EImage(imageutils.Agnhost),
|
||||
Args: []string{"mounttest", "--break_on_expected_content=false", containerTimeoutArg, "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: createVolumeName,
|
||||
MountPath: path.Join(volumeMountPath, "create"),
|
||||
ReadOnly: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
ginkgo.By("Creating the pod")
|
||||
pod = f.PodClient().Create(pod)
|
||||
return e2epod.WaitForPodNameRunningInNamespace(f.ClientSet, pod.Name, f.Namespace.Name)
|
||||
}
|
||||
74
test/e2e/common/storage/util.go
Normal file
74
test/e2e/common/storage/util.go
Normal file
@@ -0,0 +1,74 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
var (
|
||||
// non-Administrator Windows user used in tests. This is the Windows equivalent of the Linux non-root UID usage.
|
||||
nonAdminTestUserName = "ContainerUser"
|
||||
// non-root UID used in tests.
|
||||
nonRootTestUserID = int64(1000)
|
||||
)
|
||||
|
||||
// setPodNonRootUser configures the Pod to run as a non-root user.
|
||||
// For Windows, it sets the RunAsUserName field to ContainerUser, and for Linux, it sets the RunAsUser field to 1000.
|
||||
func setPodNonRootUser(pod *v1.Pod) {
|
||||
if framework.NodeOSDistroIs("windows") {
|
||||
pod.Spec.SecurityContext.WindowsOptions = &v1.WindowsSecurityContextOptions{RunAsUserName: &nonAdminTestUserName}
|
||||
} else {
|
||||
pod.Spec.SecurityContext.RunAsUser = &nonRootTestUserID
|
||||
}
|
||||
}
|
||||
|
||||
// getFileModeRegex returns a file mode related regex which should be matched by the mounttest pods' output.
|
||||
// If the given mask is nil, then the regex will contain the default OS file modes, which are 0644 for Linux and 0775 for Windows.
|
||||
func getFileModeRegex(filePath string, mask *int32) string {
|
||||
var (
|
||||
linuxMask int32
|
||||
windowsMask int32
|
||||
)
|
||||
if mask == nil {
|
||||
linuxMask = int32(0644)
|
||||
windowsMask = int32(0775)
|
||||
} else {
|
||||
linuxMask = *mask
|
||||
windowsMask = *mask
|
||||
}
|
||||
|
||||
linuxOutput := fmt.Sprintf("mode of file \"%s\": %v", filePath, os.FileMode(linuxMask))
|
||||
windowsOutput := fmt.Sprintf("mode of Windows file \"%v\": %s", filePath, os.FileMode(windowsMask))
|
||||
|
||||
return fmt.Sprintf("(%s|%s)", linuxOutput, windowsOutput)
|
||||
}
|
||||
|
||||
// createMounts creates a v1.VolumeMount list with a single element.
|
||||
func createMounts(volumeName, volumeMountPath string, readOnly bool) []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{
|
||||
Name: volumeName,
|
||||
MountPath: volumeMountPath,
|
||||
ReadOnly: readOnly,
|
||||
},
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user