mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
GenericEphemeralVolume: feature gate, API, documentation
As explained in https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/1698-generic-ephemeral-volumes, CSI inline volumes are not suitable for more "normal" kinds of storage systems. For those a new approach is needed: "generic ephemeral inline volumes".
This commit is contained in:
parent
896da2253c
commit
c05c8e915b
@ -431,6 +431,7 @@ func dropDisabledFields(
|
||||
dropDisabledProcMountField(podSpec, oldPodSpec)
|
||||
|
||||
dropDisabledCSIVolumeSourceAlphaFields(podSpec, oldPodSpec)
|
||||
dropDisabledEphemeralVolumeSourceAlphaFields(podSpec, oldPodSpec)
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.NonPreemptingPriority) &&
|
||||
!podPriorityInUse(oldPodSpec) {
|
||||
@ -499,6 +500,16 @@ func dropDisabledCSIVolumeSourceAlphaFields(podSpec, oldPodSpec *api.PodSpec) {
|
||||
}
|
||||
}
|
||||
|
||||
// dropDisabledEphemeralVolumeSourceAlphaFields removes disabled alpha fields from []EphemeralVolumeSource.
|
||||
// This should be called from PrepareForCreate/PrepareForUpdate for all pod specs resources containing a EphemeralVolumeSource
|
||||
func dropDisabledEphemeralVolumeSourceAlphaFields(podSpec, oldPodSpec *api.PodSpec) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) && !csiInUse(oldPodSpec) {
|
||||
for i := range podSpec.Volumes {
|
||||
podSpec.Volumes[i].Ephemeral = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ephemeralContainersInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
|
@ -159,7 +159,7 @@ func TestCompatibility_v1_PodSecurityContext(t *testing.T) {
|
||||
}
|
||||
|
||||
validator := func(obj runtime.Object) field.ErrorList {
|
||||
return validation.ValidatePodSpec(&(obj.(*api.Pod).Spec), field.NewPath("spec"))
|
||||
return validation.ValidatePodSpec(&(obj.(*api.Pod).Spec), &(obj.(*api.Pod).ObjectMeta), field.NewPath("spec"))
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
|
@ -263,6 +263,14 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
|
||||
i.ISCSIInterface = "default"
|
||||
}
|
||||
},
|
||||
func(i *core.PersistentVolumeClaimSpec, c fuzz.Continue) {
|
||||
// Match defaulting in pkg/apis/core/v1/defaults.go.
|
||||
volumeMode := core.PersistentVolumeMode(c.RandString())
|
||||
if volumeMode == "" {
|
||||
volumeMode = core.PersistentVolumeFilesystem
|
||||
}
|
||||
i.VolumeMode = &volumeMode
|
||||
},
|
||||
func(d *core.DNSPolicy, c fuzz.Continue) {
|
||||
policies := []core.DNSPolicy{core.DNSClusterFirst, core.DNSDefault}
|
||||
*d = policies[c.Rand.Intn(len(policies))]
|
||||
|
@ -157,6 +157,33 @@ type VolumeSource struct {
|
||||
// CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
|
||||
// +optional
|
||||
CSI *CSIVolumeSource
|
||||
// Ephemeral represents a volume that is handled by a cluster storage driver (Alpha feature).
|
||||
// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
|
||||
// and deleted when the pod is removed.
|
||||
//
|
||||
// Use this if:
|
||||
// a) the volume is only needed while the pod runs,
|
||||
// b) features of normal volumes like restoring from snapshot or capacity
|
||||
// tracking are needed,
|
||||
// c) the storage driver is specified through a storage class, and
|
||||
// d) the storage driver supports dynamic volume provisioning through
|
||||
// a PersistentVolumeClaim (see EphemeralVolumeSource for more
|
||||
// information on the connection between this volume type
|
||||
// and PersistentVolumeClaim).
|
||||
//
|
||||
// Use PersistentVolumeClaim or one of the vendor-specific
|
||||
// APIs for volumes that persist for longer than the lifecycle
|
||||
// of an individual pod.
|
||||
//
|
||||
// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
|
||||
// be used that way - see the documentation of the driver for
|
||||
// more information.
|
||||
//
|
||||
// A pod can use both types of ephemeral volumes and
|
||||
// persistent volumes at the same time.
|
||||
//
|
||||
// +optional
|
||||
Ephemeral *EphemeralVolumeSource
|
||||
}
|
||||
|
||||
// PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs.
|
||||
@ -1670,6 +1697,53 @@ type CSIVolumeSource struct {
|
||||
NodePublishSecretRef *LocalObjectReference
|
||||
}
|
||||
|
||||
// EphemeralVolumeSource represents an ephemeral volume that is handled by a normal storage driver.
|
||||
type EphemeralVolumeSource struct {
|
||||
// VolumeClaimTemplate will be used to create a stand-alone PVC to provision the volume.
|
||||
// The pod in which this EphemeralVolumeSource is embedded will be the
|
||||
// owner of the PVC, i.e. the PVC will be deleted together with the
|
||||
// pod. The name of the PVC will be `<pod name>-<volume name>` where
|
||||
// `<volume name>` is the name from the `PodSpec.Volumes` array
|
||||
// entry. Pod validation will reject the pod if the concatenated name
|
||||
// is not valid for a PVC (for example, too long).
|
||||
//
|
||||
// An existing PVC with that name that is not owned by the pod
|
||||
// will *not* be used for the pod to avoid using an unrelated
|
||||
// volume by mistake. Starting the pod is then blocked until
|
||||
// the unrelated PVC is removed. If such a pre-created PVC is
|
||||
// meant to be used by the pod, the PVC has to updated with an
|
||||
// owner reference to the pod once the pod exists. Normally
|
||||
// this should not be necessary, but it may be useful when
|
||||
// manually reconstructing a broken cluster.
|
||||
//
|
||||
// This field is read-only and no changes will be made by Kubernetes
|
||||
// to the PVC after it has been created.
|
||||
//
|
||||
// Required, must not be nil.
|
||||
VolumeClaimTemplate *PersistentVolumeClaimTemplate
|
||||
|
||||
// ReadOnly specifies a read-only configuration for the volume.
|
||||
// Defaults to false (read/write).
|
||||
// +optional
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimTemplate is used to produce
|
||||
// PersistentVolumeClaim objects as part of an EphemeralVolumeSource.
|
||||
type PersistentVolumeClaimTemplate struct {
|
||||
// ObjectMeta may contain labels and annotations that will be copied into the PVC
|
||||
// when creating it. No other fields are allowed and will be rejected during
|
||||
// validation.
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Spec for the PersistentVolumeClaim. The entire content is
|
||||
// copied unchanged into the PVC that gets created from this
|
||||
// template. The same fields as in a PersistentVolumeClaim
|
||||
// are also valid here.
|
||||
Spec PersistentVolumeClaimSpec
|
||||
}
|
||||
|
||||
// ContainerPort represents a network port in a single container
|
||||
type ContainerPort struct {
|
||||
// Optional: If specified, this must be an IANA_SVC_NAME Each named port
|
||||
|
@ -285,9 +285,11 @@ func SetDefaults_PersistentVolumeClaim(obj *v1.PersistentVolumeClaim) {
|
||||
if obj.Status.Phase == "" {
|
||||
obj.Status.Phase = v1.ClaimPending
|
||||
}
|
||||
if obj.Spec.VolumeMode == nil {
|
||||
obj.Spec.VolumeMode = new(v1.PersistentVolumeMode)
|
||||
*obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
func SetDefaults_PersistentVolumeClaimSpec(obj *v1.PersistentVolumeClaimSpec) {
|
||||
if obj.VolumeMode == nil {
|
||||
obj.VolumeMode = new(v1.PersistentVolumeMode)
|
||||
*obj.VolumeMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
}
|
||||
func SetDefaults_ISCSIVolumeSource(obj *v1.ISCSIVolumeSource) {
|
||||
|
@ -142,6 +142,7 @@ func TestWorkloadDefaults(t *testing.T) {
|
||||
".Spec.Volumes[0].VolumeSource.DownwardAPI.DefaultMode": `420`,
|
||||
".Spec.Volumes[0].VolumeSource.DownwardAPI.Items[0].FieldRef.APIVersion": `"v1"`,
|
||||
".Spec.Volumes[0].VolumeSource.EmptyDir": `{}`,
|
||||
".Spec.Volumes[0].VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode": `"Filesystem"`,
|
||||
".Spec.Volumes[0].VolumeSource.HostPath.Type": `""`,
|
||||
".Spec.Volumes[0].VolumeSource.ISCSI.ISCSIInterface": `"default"`,
|
||||
".Spec.Volumes[0].VolumeSource.Projected.DefaultMode": `420`,
|
||||
@ -265,6 +266,7 @@ func TestPodDefaults(t *testing.T) {
|
||||
".Spec.Volumes[0].VolumeSource.DownwardAPI.DefaultMode": `420`,
|
||||
".Spec.Volumes[0].VolumeSource.DownwardAPI.Items[0].FieldRef.APIVersion": `"v1"`,
|
||||
".Spec.Volumes[0].VolumeSource.EmptyDir": `{}`,
|
||||
".Spec.Volumes[0].VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode": `"Filesystem"`,
|
||||
".Spec.Volumes[0].VolumeSource.HostPath.Type": `""`,
|
||||
".Spec.Volumes[0].VolumeSource.ISCSI.ISCSIInterface": `"default"`,
|
||||
".Spec.Volumes[0].VolumeSource.Projected.DefaultMode": `420`,
|
||||
@ -1375,6 +1377,58 @@ func TestSetDefaultPersistentVolumeClaim(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultEphemeral(t *testing.T) {
|
||||
fsMode := v1.PersistentVolumeFilesystem
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
volumeMode *v1.PersistentVolumeMode
|
||||
expectedVolumeMode v1.PersistentVolumeMode
|
||||
}{
|
||||
{
|
||||
name: "volume mode nil",
|
||||
volumeMode: nil,
|
||||
expectedVolumeMode: v1.PersistentVolumeFilesystem,
|
||||
},
|
||||
{
|
||||
name: "volume mode filesystem",
|
||||
volumeMode: &fsMode,
|
||||
expectedVolumeMode: v1.PersistentVolumeFilesystem,
|
||||
},
|
||||
{
|
||||
name: "volume mode block",
|
||||
volumeMode: &blockMode,
|
||||
expectedVolumeMode: v1.PersistentVolumeBlock,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeMode: test.volumeMode,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
obj1 := roundTrip(t, runtime.Object(pod))
|
||||
pod1 := obj1.(*v1.Pod)
|
||||
if *pod1.Spec.Volumes[0].VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode != test.expectedVolumeMode {
|
||||
t.Errorf("Test %s failed, Expected VolumeMode: %v, but got %v", test.name, test.volumeMode, *pod1.Spec.Volumes[0].VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultEndpointsProtocol(t *testing.T) {
|
||||
in := &v1.Endpoints{Subsets: []v1.EndpointSubset{
|
||||
{Ports: []v1.EndpointPort{{}, {Protocol: "UDP"}, {}}},
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
v1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@ -349,15 +350,26 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *fiel
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateVolumes(volumes []core.Volume, fldPath *field.Path) (map[string]core.VolumeSource, field.ErrorList) {
|
||||
func ValidateVolumes(volumes []core.Volume, podMeta *metav1.ObjectMeta, fldPath *field.Path) (map[string]core.VolumeSource, field.ErrorList) {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
allNames := sets.String{}
|
||||
allCreatedPVCs := sets.String{}
|
||||
// Determine which PVCs will be created for this pod. We need
|
||||
// the exact name of the pod for this. Without it, this sanity
|
||||
// check has to be skipped.
|
||||
if podMeta != nil && podMeta.Name != "" {
|
||||
for _, vol := range volumes {
|
||||
if vol.VolumeSource.Ephemeral != nil {
|
||||
allCreatedPVCs.Insert(podMeta.Name + "-" + vol.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
vols := make(map[string]core.VolumeSource)
|
||||
for i, vol := range volumes {
|
||||
idxPath := fldPath.Index(i)
|
||||
namePath := idxPath.Child("name")
|
||||
el := validateVolumeSource(&vol.VolumeSource, idxPath, vol.Name)
|
||||
el := validateVolumeSource(&vol.VolumeSource, idxPath, vol.Name, podMeta)
|
||||
if len(vol.Name) == 0 {
|
||||
el = append(el, field.Required(namePath, ""))
|
||||
} else {
|
||||
@ -372,8 +384,14 @@ func ValidateVolumes(volumes []core.Volume, fldPath *field.Path) (map[string]cor
|
||||
} else {
|
||||
allErrs = append(allErrs, el...)
|
||||
}
|
||||
|
||||
// A PersistentVolumeClaimSource should not reference a created PVC. That doesn't
|
||||
// make sense.
|
||||
if vol.PersistentVolumeClaim != nil && allCreatedPVCs.Has(vol.PersistentVolumeClaim.ClaimName) {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("persistentVolumeClaim").Child("claimName"), vol.PersistentVolumeClaim.ClaimName,
|
||||
"must not reference a PVC that gets created for an ephemeral volume"))
|
||||
}
|
||||
}
|
||||
|
||||
return vols, allErrs
|
||||
}
|
||||
|
||||
@ -428,7 +446,7 @@ func devicePathAlreadyExists(devicePath string, mounts map[string]string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volName string) field.ErrorList {
|
||||
func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volName string, podMeta *metav1.ObjectMeta) field.ErrorList {
|
||||
numVolumes := 0
|
||||
allErrs := field.ErrorList{}
|
||||
if source.EmptyDir != nil {
|
||||
@ -659,6 +677,23 @@ func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volNam
|
||||
allErrs = append(allErrs, validateCSIVolumeSource(source.CSI, fldPath.Child("csi"))...)
|
||||
}
|
||||
}
|
||||
if source.Ephemeral != nil {
|
||||
if numVolumes > 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("ephemeral"), "may not specify more than 1 volume type"))
|
||||
} else {
|
||||
numVolumes++
|
||||
allErrs = append(allErrs, validateEphemeralVolumeSource(source.Ephemeral, fldPath.Child("ephemeral"))...)
|
||||
// Check the expected name for the PVC. This gets skipped if information is missing,
|
||||
// because that already gets flagged as a problem elsewhere. For example,
|
||||
// ValidateObjectMeta as called by validatePodMetadataAndSpec checks that the name is set.
|
||||
if podMeta != nil && podMeta.Name != "" && volName != "" {
|
||||
pvcName := podMeta.Name + "-" + volName
|
||||
for _, msg := range ValidatePersistentVolumeName(pvcName, false) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), volName, fmt.Sprintf("PVC name %q: %v", pvcName, msg)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if numVolumes == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type"))
|
||||
@ -1552,6 +1587,41 @@ func validateCSIVolumeSource(csi *core.CSIVolumeSource, fldPath *field.Path) fie
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateEphemeralVolumeSource(ephemeral *core.EphemeralVolumeSource, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if ephemeral.VolumeClaimTemplate == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("volumeClaimTemplate"), ""))
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidatePersistentVolumeClaimTemplate(ephemeral.VolumeClaimTemplate, fldPath.Child("volumeClaimTemplate"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePersistentVolumeClaimTemplate verifies that the embedded object meta and spec are valid.
|
||||
// Checking of the object data is very minimal because only labels and annotations are used.
|
||||
func ValidatePersistentVolumeClaimTemplate(claimTemplate *core.PersistentVolumeClaimTemplate, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := validatePersistentVolumeClaimTemplateObjectMeta(&claimTemplate.ObjectMeta, fldPath.Child("metadata"))
|
||||
allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&claimTemplate.Spec, fldPath.Child("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePersistentVolumeClaimTemplateObjectMeta(objMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := apimachineryvalidation.ValidateAnnotations(objMeta.Annotations, fldPath.Child("annotations"))
|
||||
allErrs = append(allErrs, v1validation.ValidateLabels(objMeta.Labels, fldPath.Child("labels"))...)
|
||||
// All other fields are not supported and thus must not be set
|
||||
// to avoid confusion. We could reject individual fields,
|
||||
// but then adding a new one to ObjectMeta wouldn't be checked
|
||||
// unless this code gets updated. Instead, we ensure that
|
||||
// only allowed fields are set via reflection.
|
||||
allErrs = append(allErrs, validateFieldAllowList(*objMeta, allowedPVCTemplateObjectMetaFields, "cannot be set for an ephemeral volume", fldPath)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
var allowedPVCTemplateObjectMetaFields = map[string]bool{
|
||||
"Annotations": true,
|
||||
"Labels": true,
|
||||
}
|
||||
|
||||
// ValidatePersistentVolumeName checks that a name is appropriate for a
|
||||
// PersistentVolumeName object.
|
||||
var ValidatePersistentVolumeName = apimachineryvalidation.NameIsDNSSubdomain
|
||||
@ -2647,21 +2717,31 @@ func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer,
|
||||
}
|
||||
|
||||
// Ephemeral Containers should not be relied upon for fundamental pod services, so fields such as
|
||||
// Lifecycle, probes, resources and ports should be disallowed. This is implemented as a whitelist
|
||||
// so that new fields will be given consideration prior to inclusion in Ephemeral Containers.
|
||||
specType, specValue := reflect.TypeOf(ec.EphemeralContainerCommon), reflect.ValueOf(ec.EphemeralContainerCommon)
|
||||
for i := 0; i < specType.NumField(); i++ {
|
||||
f := specType.Field(i)
|
||||
if allowedEphemeralContainerFields[f.Name] {
|
||||
continue
|
||||
}
|
||||
// Lifecycle, probes, resources and ports should be disallowed. This is implemented as a list
|
||||
// of allowed fields so that new fields will be given consideration prior to inclusion in Ephemeral Containers.
|
||||
allErrs = append(allErrs, validateFieldAllowList(ec.EphemeralContainerCommon, allowedEphemeralContainerFields, "cannot be set for an Ephemeral Container", idxPath)...)
|
||||
}
|
||||
|
||||
// Compare the value of this field to its zero value to determine if it has been set
|
||||
if !reflect.DeepEqual(specValue.Field(i).Interface(), reflect.Zero(f.Type).Interface()) {
|
||||
r, n := utf8.DecodeRuneInString(f.Name)
|
||||
lcName := string(unicode.ToLower(r)) + f.Name[n:]
|
||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child(lcName), "cannot be set for an Ephemeral Container"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateFieldAcceptList checks that only allowed fields are set.
|
||||
// The value must be a struct (not a pointer to a struct!).
|
||||
func validateFieldAllowList(value interface{}, allowedFields map[string]bool, errorText string, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
reflectType, reflectValue := reflect.TypeOf(value), reflect.ValueOf(value)
|
||||
for i := 0; i < reflectType.NumField(); i++ {
|
||||
f := reflectType.Field(i)
|
||||
if allowedFields[f.Name] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Compare the value of this field to its zero value to determine if it has been set
|
||||
if !reflect.DeepEqual(reflectValue.Field(i).Interface(), reflect.Zero(f.Type).Interface()) {
|
||||
r, n := utf8.DecodeRuneInString(f.Name)
|
||||
lcName := string(unicode.ToLower(r)) + f.Name[n:]
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child(lcName), errorText))
|
||||
}
|
||||
}
|
||||
|
||||
@ -3119,7 +3199,7 @@ func validatePodMetadataAndSpec(pod *core.Pod, opts PodValidationOptions) field.
|
||||
fldPath := field.NewPath("metadata")
|
||||
allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath)
|
||||
allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, field.NewPath("spec"))...)
|
||||
allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, &pod.ObjectMeta, field.NewPath("spec"))...)
|
||||
|
||||
// we do additional validation only pertinent for pods and not pod templates
|
||||
// this was done to preserve backwards compatibility
|
||||
@ -3198,10 +3278,12 @@ func validatePodIPs(pod *core.Pod) field.ErrorList {
|
||||
// This includes checking formatting and uniqueness. It also canonicalizes the
|
||||
// structure by setting default values and implementing any backwards-compatibility
|
||||
// tricks.
|
||||
func ValidatePodSpec(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
|
||||
// The pod metadata is needed to validate generic ephemeral volumes. It is optional
|
||||
// and should be left empty unless the spec is from a real pod object.
|
||||
func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
vols, vErrs := ValidateVolumes(spec.Volumes, fldPath.Child("volumes"))
|
||||
vols, vErrs := ValidateVolumes(spec.Volumes, podMeta, fldPath.Child("volumes"))
|
||||
allErrs = append(allErrs, vErrs...)
|
||||
allErrs = append(allErrs, validateContainers(spec.Containers, false, vols, fldPath.Child("containers"))...)
|
||||
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, fldPath.Child("initContainers"))...)
|
||||
@ -4493,7 +4575,7 @@ func ValidatePodTemplateSpec(spec *core.PodTemplateSpec, fldPath *field.Path) fi
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...)
|
||||
allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, &spec.Spec, fldPath.Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...)
|
||||
allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, nil, fldPath.Child("spec"))...)
|
||||
allErrs = append(allErrs, validateSeccompAnnotationsAndFields(spec.ObjectMeta, &spec.Spec, fldPath.Child("spec"))...)
|
||||
|
||||
if len(spec.Spec.EphemeralContainers) > 0 {
|
||||
|
@ -932,42 +932,195 @@ func testVolumeClaimStorageClassInAnnotationAndSpec(name, namespace, scNameInAnn
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
func testValidatePVC(t *testing.T, ephemeral bool) {
|
||||
invalidClassName := "-invalid-"
|
||||
validClassName := "valid"
|
||||
invalidMode := core.PersistentVolumeMode("fakeVolumeMode")
|
||||
validMode := core.PersistentVolumeFilesystem
|
||||
goodName := "foo"
|
||||
goodNS := "ns"
|
||||
if ephemeral {
|
||||
// Must be empty for ephemeral inline volumes.
|
||||
goodName = ""
|
||||
goodNS = ""
|
||||
}
|
||||
goodClaimSpec := core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: "Exists",
|
||||
},
|
||||
},
|
||||
},
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
core.ReadOnlyMany,
|
||||
},
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
StorageClassName: &validClassName,
|
||||
VolumeMode: &validMode,
|
||||
}
|
||||
now := metav1.Now()
|
||||
ten := int64(10)
|
||||
|
||||
scenarios := map[string]struct {
|
||||
isExpectedFailure bool
|
||||
claim *core.PersistentVolumeClaim
|
||||
}{
|
||||
"good-claim": {
|
||||
isExpectedFailure: false,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: "Exists",
|
||||
},
|
||||
claim: testVolumeClaim(goodName, goodNS, goodClaimSpec),
|
||||
},
|
||||
"missing-name": {
|
||||
isExpectedFailure: !ephemeral,
|
||||
claim: testVolumeClaim("", goodNS, goodClaimSpec),
|
||||
},
|
||||
"missing-namespace": {
|
||||
isExpectedFailure: !ephemeral,
|
||||
claim: testVolumeClaim(goodName, "", goodClaimSpec),
|
||||
},
|
||||
"with-generate-name": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.GenerateName = "pvc-"
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-uid": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-resource-version": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.ResourceVersion = "1"
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-generation": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Generation = 100
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-creation-timestamp": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.CreationTimestamp = now
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-deletion-grace-period-seconds": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.DeletionGracePeriodSeconds = &ten
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-owner-references": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "pod",
|
||||
Name: "foo",
|
||||
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
|
||||
},
|
||||
},
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
core.ReadOnlyMany,
|
||||
},
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-finalizers": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Finalizers = []string{
|
||||
"example.com/foo",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-cluster-name": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.ClusterName = "foo"
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-managed-fields": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.ManagedFields = []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsType: "FieldsV1",
|
||||
Operation: "Apply",
|
||||
APIVersion: "apps/v1",
|
||||
Manager: "foo",
|
||||
},
|
||||
},
|
||||
StorageClassName: &validClassName,
|
||||
VolumeMode: &validMode,
|
||||
}),
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-good-labels": {
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Labels = map[string]string{
|
||||
"apps.kubernetes.io/name": "test",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-bad-labels": {
|
||||
isExpectedFailure: true,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Labels = map[string]string{
|
||||
"hello-world": "hyphen not allowed",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-good-annotations": {
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Labels = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-bad-annotations": {
|
||||
isExpectedFailure: true,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Labels = map[string]string{
|
||||
"hello-world": "hyphen not allowed",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"invalid-claim-zero-capacity": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
@ -990,7 +1143,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"invalid-label-selector": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
@ -1013,7 +1166,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"invalid-accessmode": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{"fakemode"},
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
@ -1022,23 +1175,9 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
},
|
||||
"missing-namespace": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "", core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
core.ReadOnlyMany,
|
||||
},
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
"no-access-modes": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
|
||||
@ -1048,7 +1187,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"no-resource-requests": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
},
|
||||
@ -1056,7 +1195,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"invalid-resource-requests": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
},
|
||||
@ -1069,7 +1208,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"negative-storage-request": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
@ -1091,7 +1230,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"zero-storage-request": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
@ -1113,7 +1252,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"invalid-storage-class-name": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
@ -1136,7 +1275,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"invalid-volume-mode": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
core.ReadOnlyMany,
|
||||
@ -1153,17 +1292,43 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
errs := ValidatePersistentVolumeClaim(scenario.claim)
|
||||
var errs field.ErrorList
|
||||
if ephemeral {
|
||||
volumes := []core.Volume{
|
||||
{
|
||||
Name: "foo",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Ephemeral: &core.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &core.PersistentVolumeClaimTemplate{
|
||||
ObjectMeta: scenario.claim.ObjectMeta,
|
||||
Spec: scenario.claim.Spec,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, errs = ValidateVolumes(volumes, nil, field.NewPath(""))
|
||||
} else {
|
||||
errs = ValidatePersistentVolumeClaim(scenario.claim)
|
||||
}
|
||||
if len(errs) == 0 && scenario.isExpectedFailure {
|
||||
t.Errorf("Unexpected success for scenario: %s", name)
|
||||
t.Error("Unexpected success for scenario")
|
||||
}
|
||||
if len(errs) > 0 && !scenario.isExpectedFailure {
|
||||
t.Errorf("Unexpected failure for scenario: %s - %+v", name, errs)
|
||||
t.Errorf("Unexpected failure: %+v", errs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
testValidatePVC(t, false)
|
||||
}
|
||||
|
||||
func TestValidateEphemeralVolume(t *testing.T) {
|
||||
testValidatePVC(t, true)
|
||||
}
|
||||
|
||||
func TestAlphaPVVolumeModeUpdate(t *testing.T) {
|
||||
block := core.PersistentVolumeBlock
|
||||
file := core.PersistentVolumeFilesystem
|
||||
@ -3825,7 +3990,7 @@ func TestValidateVolumes(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
names, errs := ValidateVolumes([]core.Volume{tc.vol}, field.NewPath("field"))
|
||||
names, errs := ValidateVolumes([]core.Volume{tc.vol}, nil, field.NewPath("field"))
|
||||
if len(errs) != len(tc.errs) {
|
||||
t.Fatalf("unexpected error(s): got %d, want %d: %v", len(tc.errs), len(errs), errs)
|
||||
}
|
||||
@ -3851,7 +4016,7 @@ func TestValidateVolumes(t *testing.T) {
|
||||
{Name: "abc", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}},
|
||||
{Name: "abc", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}},
|
||||
}
|
||||
_, errs := ValidateVolumes(dupsCase, field.NewPath("field"))
|
||||
_, errs := ValidateVolumes(dupsCase, nil, field.NewPath("field"))
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("expected error")
|
||||
} else if len(errs) != 1 {
|
||||
@ -3864,7 +4029,7 @@ func TestValidateVolumes(t *testing.T) {
|
||||
hugePagesCase := core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{Medium: core.StorageMediumHugePages}}
|
||||
|
||||
// Enable HugePages
|
||||
if errs := validateVolumeSource(&hugePagesCase, field.NewPath("field").Index(0), "working"); len(errs) != 0 {
|
||||
if errs := validateVolumeSource(&hugePagesCase, field.NewPath("field").Index(0), "working", nil); len(errs) != 0 {
|
||||
t.Errorf("Unexpected error when HugePages feature is enabled.")
|
||||
}
|
||||
|
||||
@ -4194,7 +4359,7 @@ func TestAlphaLocalStorageCapacityIsolation(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if errs := validateVolumeSource(&tc, field.NewPath("spec"), "tmpvol"); len(errs) != 0 {
|
||||
if errs := validateVolumeSource(&tc, field.NewPath("spec"), "tmpvol", nil); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
@ -4937,7 +5102,7 @@ func TestValidateVolumeMounts(t *testing.T) {
|
||||
{Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}},
|
||||
{Name: "123", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
|
||||
}
|
||||
vols, v1err := ValidateVolumes(volumes, field.NewPath("field"))
|
||||
vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"))
|
||||
if len(v1err) > 0 {
|
||||
t.Errorf("Invalid test volume - expected success %v", v1err)
|
||||
return
|
||||
@ -5000,7 +5165,7 @@ func TestValidateDisabledSubpath(t *testing.T) {
|
||||
{Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}},
|
||||
{Name: "123", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
|
||||
}
|
||||
vols, v1err := ValidateVolumes(volumes, field.NewPath("field"))
|
||||
vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"))
|
||||
if len(v1err) > 0 {
|
||||
t.Errorf("Invalid test volume - expected success %v", v1err)
|
||||
return
|
||||
@ -5062,7 +5227,7 @@ func TestValidateSubpathMutuallyExclusive(t *testing.T) {
|
||||
{Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}},
|
||||
{Name: "123", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
|
||||
}
|
||||
vols, v1err := ValidateVolumes(volumes, field.NewPath("field"))
|
||||
vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"))
|
||||
if len(v1err) > 0 {
|
||||
t.Errorf("Invalid test volume - expected success %v", v1err)
|
||||
return
|
||||
@ -5143,7 +5308,7 @@ func TestValidateDisabledSubpathExpr(t *testing.T) {
|
||||
{Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}},
|
||||
{Name: "123", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
|
||||
}
|
||||
vols, v1err := ValidateVolumes(volumes, field.NewPath("field"))
|
||||
vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"))
|
||||
if len(v1err) > 0 {
|
||||
t.Errorf("Invalid test volume - expected success %v", v1err)
|
||||
return
|
||||
@ -5337,7 +5502,7 @@ func TestValidateMountPropagation(t *testing.T) {
|
||||
volumes := []core.Volume{
|
||||
{Name: "foo", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
|
||||
}
|
||||
vols2, v2err := ValidateVolumes(volumes, field.NewPath("field"))
|
||||
vols2, v2err := ValidateVolumes(volumes, nil, field.NewPath("field"))
|
||||
if len(v2err) > 0 {
|
||||
t.Errorf("Invalid test volume - expected success %v", v2err)
|
||||
return
|
||||
@ -5360,7 +5525,7 @@ func TestAlphaValidateVolumeDevices(t *testing.T) {
|
||||
{Name: "def", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
|
||||
}
|
||||
|
||||
vols, v1err := ValidateVolumes(volumes, field.NewPath("field"))
|
||||
vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"))
|
||||
if len(v1err) > 0 {
|
||||
t.Errorf("Invalid test volumes - expected success %v", v1err)
|
||||
return
|
||||
@ -6560,14 +6725,14 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
badfsGroupChangePolicy1 := core.PodFSGroupChangePolicy("invalid")
|
||||
badfsGroupChangePolicy2 := core.PodFSGroupChangePolicy("")
|
||||
|
||||
successCases := []core.PodSpec{
|
||||
{ // Populate basic fields, leave defaults for most.
|
||||
successCases := map[string]core.PodSpec{
|
||||
"populate basic fields, leave defaults for most": {
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate all fields.
|
||||
"populate all fields": {
|
||||
Volumes: []core.Volume{
|
||||
{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}},
|
||||
},
|
||||
@ -6582,7 +6747,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
ActiveDeadlineSeconds: &activeDeadlineSeconds,
|
||||
ServiceAccountName: "acct",
|
||||
},
|
||||
{ // Populate all fields with larger active deadline.
|
||||
"populate all fields with larger active deadline": {
|
||||
Volumes: []core.Volume{
|
||||
{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}},
|
||||
},
|
||||
@ -6597,7 +6762,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
ActiveDeadlineSeconds: &activeDeadlineSecondsMax,
|
||||
ServiceAccountName: "acct",
|
||||
},
|
||||
{ // Populate HostNetwork.
|
||||
"populate HostNetwork": {
|
||||
Containers: []core.Container{
|
||||
{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File",
|
||||
Ports: []core.ContainerPort{
|
||||
@ -6610,7 +6775,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate RunAsUser SupplementalGroups FSGroup with minID 0
|
||||
"populate RunAsUser SupplementalGroups FSGroup with minID 0": {
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
SecurityContext: &core.PodSecurityContext{
|
||||
SupplementalGroups: []int64{minGroupID},
|
||||
@ -6620,7 +6785,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate RunAsUser SupplementalGroups FSGroup with maxID 2147483647
|
||||
"populate RunAsUser SupplementalGroups FSGroup with maxID 2147483647": {
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
SecurityContext: &core.PodSecurityContext{
|
||||
SupplementalGroups: []int64{maxGroupID},
|
||||
@ -6630,7 +6795,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate HostIPC.
|
||||
"populate HostIPC": {
|
||||
SecurityContext: &core.PodSecurityContext{
|
||||
HostIPC: true,
|
||||
},
|
||||
@ -6639,7 +6804,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate HostPID.
|
||||
"populate HostPID": {
|
||||
SecurityContext: &core.PodSecurityContext{
|
||||
HostPID: true,
|
||||
},
|
||||
@ -6648,27 +6813,27 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate Affinity.
|
||||
"populate Affinity": {
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate HostAliases.
|
||||
"populate HostAliases": {
|
||||
HostAliases: []core.HostAlias{{IP: "12.34.56.78", Hostnames: []string{"host1", "host2"}}},
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate HostAliases with `foo.bar` hostnames.
|
||||
"populate HostAliases with `foo.bar` hostnames": {
|
||||
HostAliases: []core.HostAlias{{IP: "12.34.56.78", Hostnames: []string{"host1.foo", "host2.bar"}}},
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate HostAliases with HostNetwork.
|
||||
"populate HostAliases with HostNetwork": {
|
||||
HostAliases: []core.HostAlias{{IP: "12.34.56.78", Hostnames: []string{"host1.foo", "host2.bar"}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
SecurityContext: &core.PodSecurityContext{
|
||||
@ -6677,14 +6842,14 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate PriorityClassName.
|
||||
"populate PriorityClassName": {
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
PriorityClassName: "valid-name",
|
||||
},
|
||||
{ // Populate ShareProcessNamespace
|
||||
"populate ShareProcessNamespace": {
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
@ -6693,20 +6858,20 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
ShareProcessNamespace: &[]bool{true}[0],
|
||||
},
|
||||
},
|
||||
{ // Populate RuntimeClassName
|
||||
"populate RuntimeClassName": {
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
RuntimeClassName: utilpointer.StringPtr("valid-sandbox"),
|
||||
},
|
||||
{ // Populate Overhead
|
||||
"populate Overhead": {
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
RuntimeClassName: utilpointer.StringPtr("valid-sandbox"),
|
||||
Overhead: core.ResourceList{},
|
||||
},
|
||||
{
|
||||
"populate DNSPolicy": {
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
SecurityContext: &core.PodSecurityContext{
|
||||
FSGroupChangePolicy: &goodfsGroupChangePolicy,
|
||||
@ -6715,10 +6880,12 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
}
|
||||
for i := range successCases {
|
||||
if errs := ValidatePodSpec(&successCases[i], field.NewPath("field")); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
for k, v := range successCases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
if errs := ValidatePodSpec(&v, nil, field.NewPath("field")); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
activeDeadlineSeconds = int64(0)
|
||||
@ -6919,7 +7086,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for k, v := range failureCases {
|
||||
if errs := ValidatePodSpec(&v, field.NewPath("field")); len(errs) == 0 {
|
||||
if errs := ValidatePodSpec(&v, nil, field.NewPath("field")); len(errs) == 0 {
|
||||
t.Errorf("expected failure for %q", k)
|
||||
}
|
||||
}
|
||||
@ -6946,9 +7113,24 @@ func TestValidatePod(t *testing.T) {
|
||||
}
|
||||
return spec
|
||||
}
|
||||
validPVCSpec := core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
},
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
}
|
||||
validPVCTemplate := core.PersistentVolumeClaimTemplate{
|
||||
Spec: validPVCSpec,
|
||||
}
|
||||
longPodName := strings.Repeat("a", 200)
|
||||
longVolName := strings.Repeat("b", 60)
|
||||
|
||||
successCases := []core.Pod{
|
||||
{ // Basic fields.
|
||||
successCases := map[string]core.Pod{
|
||||
"basic fields": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
@ -6957,7 +7139,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
{ // Just about everything.
|
||||
"just about everything": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "abc.123.do-re-mi", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
@ -6972,7 +7154,7 @@ func TestValidatePod(t *testing.T) {
|
||||
NodeName: "foobar",
|
||||
},
|
||||
},
|
||||
{ // Serialized node affinity requirements.
|
||||
"serialized node affinity requirements": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7032,7 +7214,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
),
|
||||
},
|
||||
{ // Serialized node affinity requirements.
|
||||
"serialized node affinity requirements, II": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7073,7 +7255,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
),
|
||||
},
|
||||
{ // Serialized pod affinity in affinity requirements in annotations.
|
||||
"serialized pod affinity in affinity requirements in annotations": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7129,7 +7311,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
},
|
||||
{ // Serialized pod anti affinity with different Label Operators in affinity requirements in annotations.
|
||||
"serialized pod anti affinity with different Label Operators in affinity requirements in annotations": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7183,63 +7365,63 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
},
|
||||
{ // populate forgiveness tolerations with exists operator in annotations.
|
||||
"populate forgiveness tolerations with exists operator in annotations.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Key: "foo", Operator: "Exists", Value: "", Effect: "NoExecute", TolerationSeconds: &[]int64{60}[0]}}),
|
||||
},
|
||||
{ // populate forgiveness tolerations with equal operator in annotations.
|
||||
"populate forgiveness tolerations with equal operator in annotations.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Key: "foo", Operator: "Equal", Value: "bar", Effect: "NoExecute", TolerationSeconds: &[]int64{60}[0]}}),
|
||||
},
|
||||
{ // populate tolerations equal operator in annotations.
|
||||
"populate tolerations equal operator in annotations.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Key: "foo", Operator: "Equal", Value: "bar", Effect: "NoSchedule"}}),
|
||||
},
|
||||
{ // populate tolerations exists operator in annotations.
|
||||
"populate tolerations exists operator in annotations.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // empty key with Exists operator is OK for toleration, empty toleration key means match all taint keys.
|
||||
"empty key with Exists operator is OK for toleration, empty toleration key means match all taint keys.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Operator: "Exists", Effect: "NoSchedule"}}),
|
||||
},
|
||||
{ // empty operator is OK for toleration, defaults to Equal.
|
||||
"empty operator is OK for toleration, defaults to Equal.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Key: "foo", Value: "bar", Effect: "NoSchedule"}}),
|
||||
},
|
||||
{ // empty effect is OK for toleration, empty toleration effect means match all taint effects.
|
||||
"empty effect is OK for toleration, empty toleration effect means match all taint effects.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Key: "foo", Operator: "Equal", Value: "bar"}}),
|
||||
},
|
||||
{ // negative tolerationSeconds is OK for toleration.
|
||||
"negative tolerationSeconds is OK for toleration.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-forgiveness-invalid",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Key: "node.kubernetes.io/not-ready", Operator: "Exists", Effect: "NoExecute", TolerationSeconds: &[]int64{-2}[0]}}),
|
||||
},
|
||||
{ // runtime default seccomp profile
|
||||
"runtime default seccomp profile": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7249,7 +7431,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // docker default seccomp profile
|
||||
"docker default seccomp profile": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7259,7 +7441,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // unconfined seccomp profile
|
||||
"unconfined seccomp profile": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7269,7 +7451,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // localhost seccomp profile
|
||||
"localhost seccomp profile": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7279,7 +7461,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // localhost seccomp profile for a container
|
||||
"localhost seccomp profile for a container": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7289,7 +7471,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // runtime default seccomp profile for a pod
|
||||
"runtime default seccomp profile for a pod": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7305,7 +7487,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // runtime default seccomp profile for a container
|
||||
"runtime default seccomp profile for a container": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7322,7 +7504,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSDefault,
|
||||
},
|
||||
},
|
||||
{ // unconfined seccomp profile for a pod
|
||||
"unconfined seccomp profile for a pod": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7338,7 +7520,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // unconfined seccomp profile for a container
|
||||
"unconfined seccomp profile for a container": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7355,7 +7537,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSDefault,
|
||||
},
|
||||
},
|
||||
{ // localhost seccomp profile for a pod
|
||||
"localhost seccomp profile for a pod": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7372,7 +7554,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // localhost seccomp profile for a container
|
||||
"localhost seccomp profile for a container, II": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7390,7 +7572,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSDefault,
|
||||
},
|
||||
},
|
||||
{ // default AppArmor profile for a container
|
||||
"default AppArmor profile for a container": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7400,7 +7582,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // default AppArmor profile for an init container
|
||||
"default AppArmor profile for an init container": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7415,7 +7597,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
{ // localhost AppArmor profile for a container
|
||||
"localhost AppArmor profile for a container": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7425,7 +7607,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // syntactically valid sysctls
|
||||
"syntactically valid sysctls": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7452,7 +7634,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // valid extended resources for init container
|
||||
"valid extended resources for init container": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "valid-extended", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
InitContainers: []core.Container{
|
||||
@ -7476,7 +7658,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
{ // valid extended resources for regular container
|
||||
"valid extended resources for regular container": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "valid-extended", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
InitContainers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
@ -7500,7 +7682,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
{ // valid serviceaccount token projected volume with serviceaccount name specified
|
||||
"valid serviceaccount token projected volume with serviceaccount name specified": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "valid-extended", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
ServiceAccountName: "some-service-account",
|
||||
@ -7527,11 +7709,25 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"ephemeral volume + PVC, no conflict between them": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
{Name: "pvc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "my-pvc"}}},
|
||||
{Name: "ephemeral", VolumeSource: core.VolumeSource{Ephemeral: &core.EphemeralVolumeSource{VolumeClaimTemplate: &validPVCTemplate}}},
|
||||
},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, pod := range successCases {
|
||||
if errs := ValidatePodCreate(&pod, PodValidationOptions{}); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
for k, v := range successCases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
if errs := ValidatePodCreate(&v, PodValidationOptions{}); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
errorCases := map[string]struct {
|
||||
@ -8421,15 +8617,47 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"final PVC name for ephemeral volume must be valid": {
|
||||
expectedError: "spec.volumes[1].name: Invalid value: \"" + longVolName + "\": PVC name \"" + longPodName + "-" + longVolName + "\": must be no more than 253 characters",
|
||||
spec: core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: longPodName, Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
{Name: "pvc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "my-pvc"}}},
|
||||
{Name: longVolName, VolumeSource: core.VolumeSource{Ephemeral: &core.EphemeralVolumeSource{VolumeClaimTemplate: &validPVCTemplate}}},
|
||||
},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
},
|
||||
"PersistentVolumeClaimVolumeSource must not reference a generated PVC": {
|
||||
expectedError: "spec.volumes[0].persistentVolumeClaim.claimName: Invalid value: \"123-ephemeral-volume\": must not reference a PVC that gets created for an ephemeral volume",
|
||||
spec: core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
{Name: "pvc-volume", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "123-ephemeral-volume"}}},
|
||||
{Name: "ephemeral-volume", VolumeSource: core.VolumeSource{Ephemeral: &core.EphemeralVolumeSource{VolumeClaimTemplate: &validPVCTemplate}}},
|
||||
},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for k, v := range errorCases {
|
||||
if errs := ValidatePodCreate(&v.spec, PodValidationOptions{}); len(errs) == 0 {
|
||||
t.Errorf("expected failure for %q", k)
|
||||
} else if v.expectedError == "" {
|
||||
t.Errorf("missing expectedError for %q, got %q", k, errs.ToAggregate().Error())
|
||||
} else if actualError := errs.ToAggregate().Error(); !strings.Contains(actualError, v.expectedError) {
|
||||
t.Errorf("expected error for %q to contain %q, got %q", k, v.expectedError, actualError)
|
||||
}
|
||||
t.Run(k, func(t *testing.T) {
|
||||
if errs := ValidatePodCreate(&v.spec, PodValidationOptions{}); len(errs) == 0 {
|
||||
t.Errorf("expected failure")
|
||||
} else if v.expectedError == "" {
|
||||
t.Errorf("missing expectedError, got %q", errs.ToAggregate().Error())
|
||||
} else if actualError := errs.ToAggregate().Error(); !strings.Contains(actualError, v.expectedError) {
|
||||
t.Errorf("expected error to contain %q, got %q", v.expectedError, actualError)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ func ValidatePodPresetSpec(spec *settings.PodPresetSpec, fldPath *field.Path) fi
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("volumes", "env", "envFrom", "volumeMounts"), "must specify at least one"))
|
||||
}
|
||||
|
||||
vols, vErrs := apivalidation.ValidateVolumes(spec.Volumes, fldPath.Child("volumes"))
|
||||
vols, vErrs := apivalidation.ValidateVolumes(spec.Volumes, nil, fldPath.Child("volumes"))
|
||||
allErrs = append(allErrs, vErrs...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateEnv(spec.Env, fldPath.Child("env"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateEnvFrom(spec.EnvFrom, fldPath.Child("envFrom"))...)
|
||||
|
@ -306,6 +306,12 @@ const (
|
||||
// spreading and disables legacy SelectorSpread plugin.
|
||||
DefaultPodTopologySpread featuregate.Feature = "DefaultPodTopologySpread"
|
||||
|
||||
// owner: @pohly
|
||||
// alpha: v1.19
|
||||
//
|
||||
// Enables generic ephemeral inline volume support for pods
|
||||
GenericEphemeralVolume featuregate.Feature = "GenericEphemeralVolume"
|
||||
|
||||
// owner: @tallclair
|
||||
// alpha: v1.12
|
||||
// beta: v1.14
|
||||
@ -671,6 +677,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
CSIBlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
|
||||
CSIInlineVolume: {Default: true, PreRelease: featuregate.Beta},
|
||||
CSIStorageCapacity: {Default: false, PreRelease: featuregate.Alpha},
|
||||
GenericEphemeralVolume: {Default: false, PreRelease: featuregate.Alpha},
|
||||
RuntimeClass: {Default: true, PreRelease: featuregate.Beta},
|
||||
NodeLease: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
SCTPSupport: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
@ -346,6 +346,18 @@ func TestValidatePodFailures(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
failGenericEphemeralPod := defaultPod()
|
||||
failGenericEphemeralPod.Spec.Volumes = []api.Volume{
|
||||
{
|
||||
Name: "generic ephemeral volume",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Ephemeral: &api.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &api.PersistentVolumeClaimTemplate{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
errorCases := map[string]struct {
|
||||
pod *api.Pod
|
||||
psp *policy.PodSecurityPolicy
|
||||
@ -485,6 +497,11 @@ func TestValidatePodFailures(t *testing.T) {
|
||||
psp: defaultPSP(),
|
||||
expectedError: "csi volumes are not allowed to be used",
|
||||
},
|
||||
"generic ephemeral volumes without proper policy set": {
|
||||
pod: failGenericEphemeralPod,
|
||||
psp: defaultPSP(),
|
||||
expectedError: "ephemeral volumes are not allowed to be used",
|
||||
},
|
||||
}
|
||||
for name, test := range errorCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
@ -888,6 +905,18 @@ func TestValidatePodSuccess(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
genericEphemeralPod := defaultPod()
|
||||
genericEphemeralPod.Spec.Volumes = []api.Volume{
|
||||
{
|
||||
Name: "generic ephemeral volume",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Ephemeral: &api.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &api.PersistentVolumeClaimTemplate{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
successCases := map[string]struct {
|
||||
pod *api.Pod
|
||||
psp *policy.PodSecurityPolicy
|
||||
@ -995,6 +1024,22 @@ func TestValidatePodSuccess(t *testing.T) {
|
||||
return psp
|
||||
}(),
|
||||
},
|
||||
"generic ephemeral volume policy with generic ephemeral volume used": {
|
||||
pod: genericEphemeralPod,
|
||||
psp: func() *policy.PodSecurityPolicy {
|
||||
psp := defaultPSP()
|
||||
psp.Spec.Volumes = []policy.FSType{policy.Ephemeral}
|
||||
return psp
|
||||
}(),
|
||||
},
|
||||
"policy.All with generic ephemeral volume used": {
|
||||
pod: genericEphemeralPod,
|
||||
psp: func() *policy.PodSecurityPolicy {
|
||||
psp := defaultPSP()
|
||||
psp.Spec.Volumes = []policy.FSType{policy.All}
|
||||
return psp
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range successCases {
|
||||
@ -1328,6 +1373,7 @@ func defaultV1Pod() *v1.Pod {
|
||||
// the FSTypeAll wildcard.
|
||||
func TestValidateAllowedVolumes(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, true)()
|
||||
|
||||
val := reflect.ValueOf(api.VolumeSource{})
|
||||
|
||||
|
@ -68,6 +68,7 @@ func GetAllFSTypesAsSet() sets.String {
|
||||
string(policy.PortworxVolume),
|
||||
string(policy.ScaleIO),
|
||||
string(policy.CSI),
|
||||
string(policy.Ephemeral),
|
||||
)
|
||||
return fstypes
|
||||
}
|
||||
@ -131,6 +132,8 @@ func GetVolumeFSType(v api.Volume) (policy.FSType, error) {
|
||||
return policy.ScaleIO, nil
|
||||
case v.CSI != nil:
|
||||
return policy.CSI, nil
|
||||
case v.Ephemeral != nil:
|
||||
return policy.Ephemeral, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unknown volume type for volume: %#v", v)
|
||||
|
@ -630,6 +630,7 @@ func TestAdmitCaps(t *testing.T) {
|
||||
|
||||
func TestAdmitVolumes(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, true)()
|
||||
|
||||
val := reflect.ValueOf(kapi.VolumeSource{})
|
||||
|
||||
|
@ -156,6 +156,33 @@ type VolumeSource struct {
|
||||
// CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
|
||||
// +optional
|
||||
CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"`
|
||||
// Ephemeral represents a volume that is handled by a cluster storage driver (Alpha feature).
|
||||
// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
|
||||
// and deleted when the pod is removed.
|
||||
//
|
||||
// Use this if:
|
||||
// a) the volume is only needed while the pod runs,
|
||||
// b) features of normal volumes like restoring from snapshot or capacity
|
||||
// tracking are needed,
|
||||
// c) the storage driver is specified through a storage class, and
|
||||
// d) the storage driver supports dynamic volume provisioning through
|
||||
// a PersistentVolumeClaim (see EphemeralVolumeSource for more
|
||||
// information on the connection between this volume type
|
||||
// and PersistentVolumeClaim).
|
||||
//
|
||||
// Use PersistentVolumeClaim or one of the vendor-specific
|
||||
// APIs for volumes that persist for longer than the lifecycle
|
||||
// of an individual pod.
|
||||
//
|
||||
// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
|
||||
// be used that way - see the documentation of the driver for
|
||||
// more information.
|
||||
//
|
||||
// A pod can use both types of ephemeral volumes and
|
||||
// persistent volumes at the same time.
|
||||
//
|
||||
// +optional
|
||||
Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"`
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
|
||||
@ -1746,6 +1773,54 @@ type CSIVolumeSource struct {
|
||||
NodePublishSecretRef *LocalObjectReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,5,opt,name=nodePublishSecretRef"`
|
||||
}
|
||||
|
||||
// Represents an ephemeral volume that is handled by a normal storage driver.
|
||||
type EphemeralVolumeSource struct {
|
||||
// Will be used to create a stand-alone PVC to provision the volume.
|
||||
// The pod in which this EphemeralVolumeSource is embedded will be the
|
||||
// owner of the PVC, i.e. the PVC will be deleted together with the
|
||||
// pod. The name of the PVC will be `<pod name>-<volume name>` where
|
||||
// `<volume name>` is the name from the `PodSpec.Volumes` array
|
||||
// entry. Pod validation will reject the pod if the concatenated name
|
||||
// is not valid for a PVC (for example, too long).
|
||||
//
|
||||
// An existing PVC with that name that is not owned by the pod
|
||||
// will *not* be used for the pod to avoid using an unrelated
|
||||
// volume by mistake. Starting the pod is then blocked until
|
||||
// the unrelated PVC is removed. If such a pre-created PVC is
|
||||
// meant to be used by the pod, the PVC has to updated with an
|
||||
// owner reference to the pod once the pod exists. Normally
|
||||
// this should not be necessary, but it may be useful when
|
||||
// manually reconstructing a broken cluster.
|
||||
//
|
||||
// This field is read-only and no changes will be made by Kubernetes
|
||||
// to the PVC after it has been created.
|
||||
//
|
||||
// Required, must not be nil.
|
||||
VolumeClaimTemplate *PersistentVolumeClaimTemplate `json:"volumeClaimTemplate,omitempty" protobuf:"bytes,1,opt,name=volumeClaimTemplate"`
|
||||
|
||||
// Specifies a read-only configuration for the volume.
|
||||
// Defaults to false (read/write).
|
||||
// +optional
|
||||
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimTemplate is used to produce
|
||||
// PersistentVolumeClaim objects as part of an EphemeralVolumeSource.
|
||||
type PersistentVolumeClaimTemplate struct {
|
||||
// May contain labels and annotations that will be copied into the PVC
|
||||
// when creating it. No other fields are allowed and will be rejected during
|
||||
// validation.
|
||||
//
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// The specification for the PersistentVolumeClaim. The entire content is
|
||||
// copied unchanged into the PVC that gets created from this
|
||||
// template. The same fields as in a PersistentVolumeClaim
|
||||
// are also valid here.
|
||||
Spec PersistentVolumeClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
|
||||
}
|
||||
|
||||
// ContainerPort represents a network port in a single container.
|
||||
type ContainerPort struct {
|
||||
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
|
||||
|
@ -313,6 +313,7 @@ const (
|
||||
PortworxVolume FSType = "portworxVolume"
|
||||
ScaleIO FSType = "scaleIO"
|
||||
CSI FSType = "csi"
|
||||
Ephemeral FSType = "ephemeral"
|
||||
All FSType = "*"
|
||||
)
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user