mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
Merge pull request #92784 from pohly/generic-ephemeral-inline-volumes
generic ephemeral inline volumes
This commit is contained in:
commit
0cb7e320a5
35
api/openapi-spec/swagger.json
generated
35
api/openapi-spec/swagger.json
generated
@ -6278,6 +6278,20 @@
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"io.k8s.api.core.v1.EphemeralVolumeSource": {
|
||||
"description": "Represents an ephemeral volume that is handled by a normal storage driver.",
|
||||
"properties": {
|
||||
"readOnly": {
|
||||
"description": "Specifies a read-only configuration for the volume. Defaults to false (read/write).",
|
||||
"type": "boolean"
|
||||
},
|
||||
"volumeClaimTemplate": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimTemplate",
|
||||
"description": "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `<pod name>-<volume name>` where `<volume name>` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil."
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"io.k8s.api.core.v1.Event": {
|
||||
"description": "Event is a report of an event somewhere in the cluster.",
|
||||
"properties": {
|
||||
@ -7903,6 +7917,23 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"io.k8s.api.core.v1.PersistentVolumeClaimTemplate": {
|
||||
"description": "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.",
|
||||
"properties": {
|
||||
"metadata": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta",
|
||||
"description": "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation."
|
||||
},
|
||||
"spec": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.PersistentVolumeClaimSpec",
|
||||
"description": "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here."
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"spec"
|
||||
],
|
||||
"type": "object"
|
||||
},
|
||||
"io.k8s.api.core.v1.PersistentVolumeClaimVolumeSource": {
|
||||
"description": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).",
|
||||
"properties": {
|
||||
@ -10313,6 +10344,10 @@
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.EmptyDirVolumeSource",
|
||||
"description": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir"
|
||||
},
|
||||
"ephemeral": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.EphemeralVolumeSource",
|
||||
"description": "Ephemeral represents a volume that is handled by a cluster storage driver (Alpha feature). The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time."
|
||||
},
|
||||
"fc": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource",
|
||||
"description": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod."
|
||||
|
@ -75,6 +75,7 @@ go_library(
|
||||
"//pkg/controller/ttl:go_default_library",
|
||||
"//pkg/controller/ttlafterfinished:go_default_library",
|
||||
"//pkg/controller/volume/attachdetach:go_default_library",
|
||||
"//pkg/controller/volume/ephemeral:go_default_library",
|
||||
"//pkg/controller/volume/expand:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume:go_default_library",
|
||||
"//pkg/controller/volume/persistentvolume/config:go_default_library",
|
||||
|
@ -424,6 +424,7 @@ func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc
|
||||
controllers["pv-protection"] = startPVProtectionController
|
||||
controllers["ttl-after-finished"] = startTTLAfterFinishedController
|
||||
controllers["root-ca-cert-publisher"] = startRootCACertPublisher
|
||||
controllers["ephemeral-volume"] = startEphemeralVolumeController
|
||||
|
||||
return controllers
|
||||
}
|
||||
|
@ -57,6 +57,7 @@ import (
|
||||
ttlcontroller "k8s.io/kubernetes/pkg/controller/ttl"
|
||||
"k8s.io/kubernetes/pkg/controller/ttlafterfinished"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/ephemeral"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/expand"
|
||||
persistentvolumecontroller "k8s.io/kubernetes/pkg/controller/volume/persistentvolume"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/pvcprotection"
|
||||
@ -373,6 +374,22 @@ func startVolumeExpandController(ctx ControllerContext) (http.Handler, bool, err
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
func startEphemeralVolumeController(ctx ControllerContext) (http.Handler, bool, error) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
ephemeralController, err := ephemeral.NewController(
|
||||
ctx.ClientBuilder.ClientOrDie("ephemeral-volume-controller"),
|
||||
ctx.InformerFactory.Core().V1().Pods(),
|
||||
ctx.InformerFactory.Core().V1().PersistentVolumeClaims())
|
||||
if err != nil {
|
||||
return nil, true, fmt.Errorf("failed to start ephemeral volume controller: %v", err)
|
||||
}
|
||||
// TODO (before beta at the latest): make this configurable similar to the EndpointController
|
||||
go ephemeralController.Run(1 /* int(ctx.ComponentConfig.EphemeralController.ConcurrentEphemeralVolumeSyncs) */, ctx.Stop)
|
||||
return nil, true, nil
|
||||
}
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
func startEndpointController(ctx ControllerContext) (http.Handler, bool, error) {
|
||||
go endpointcontroller.NewEndpointController(
|
||||
ctx.InformerFactory.Core().V1().Pods(),
|
||||
@ -539,6 +556,7 @@ func startPVCProtectionController(ctx ControllerContext) (http.Handler, bool, er
|
||||
ctx.InformerFactory.Core().V1().Pods(),
|
||||
ctx.ClientBuilder.ClientOrDie("pvc-protection-controller"),
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection),
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, true, fmt.Errorf("failed to start the pvc protection controller: %v", err)
|
||||
|
@ -431,6 +431,7 @@ func dropDisabledFields(
|
||||
dropDisabledProcMountField(podSpec, oldPodSpec)
|
||||
|
||||
dropDisabledCSIVolumeSourceAlphaFields(podSpec, oldPodSpec)
|
||||
dropDisabledEphemeralVolumeSourceAlphaFields(podSpec, oldPodSpec)
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.NonPreemptingPriority) &&
|
||||
!podPriorityInUse(oldPodSpec) {
|
||||
@ -499,6 +500,16 @@ func dropDisabledCSIVolumeSourceAlphaFields(podSpec, oldPodSpec *api.PodSpec) {
|
||||
}
|
||||
}
|
||||
|
||||
// dropDisabledEphemeralVolumeSourceAlphaFields removes disabled alpha fields from []EphemeralVolumeSource.
|
||||
// This should be called from PrepareForCreate/PrepareForUpdate for all pod specs resources containing a EphemeralVolumeSource
|
||||
func dropDisabledEphemeralVolumeSourceAlphaFields(podSpec, oldPodSpec *api.PodSpec) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) && !csiInUse(oldPodSpec) {
|
||||
for i := range podSpec.Volumes {
|
||||
podSpec.Volumes[i].Ephemeral = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ephemeralContainersInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
|
@ -159,7 +159,7 @@ func TestCompatibility_v1_PodSecurityContext(t *testing.T) {
|
||||
}
|
||||
|
||||
validator := func(obj runtime.Object) field.ErrorList {
|
||||
return validation.ValidatePodSpec(&(obj.(*api.Pod).Spec), field.NewPath("spec"))
|
||||
return validation.ValidatePodSpec(&(obj.(*api.Pod).Spec), &(obj.(*api.Pod).ObjectMeta), field.NewPath("spec"))
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
|
29
pkg/apis/apps/v1/zz_generated.defaults.go
generated
29
pkg/apis/apps/v1/zz_generated.defaults.go
generated
@ -94,6 +94,13 @@ func SetObjectDefaults_DaemonSet(in *v1.DaemonSet) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
corev1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
@ -301,6 +308,13 @@ func SetObjectDefaults_Deployment(in *v1.Deployment) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
corev1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
@ -508,6 +522,13 @@ func SetObjectDefaults_ReplicaSet(in *v1.ReplicaSet) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
corev1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
@ -715,6 +736,13 @@ func SetObjectDefaults_StatefulSet(in *v1.StatefulSet) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
corev1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
@ -863,6 +891,7 @@ func SetObjectDefaults_StatefulSet(in *v1.StatefulSet) {
|
||||
for i := range in.Spec.VolumeClaimTemplates {
|
||||
a := &in.Spec.VolumeClaimTemplates[i]
|
||||
corev1.SetDefaults_PersistentVolumeClaim(a)
|
||||
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.Spec)
|
||||
corev1.SetDefaults_ResourceList(&a.Spec.Resources.Limits)
|
||||
corev1.SetDefaults_ResourceList(&a.Spec.Resources.Requests)
|
||||
corev1.SetDefaults_ResourceList(&a.Status.Capacity)
|
||||
|
15
pkg/apis/apps/v1beta1/zz_generated.defaults.go
generated
15
pkg/apis/apps/v1beta1/zz_generated.defaults.go
generated
@ -90,6 +90,13 @@ func SetObjectDefaults_Deployment(in *v1beta1.Deployment) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
@ -297,6 +304,13 @@ func SetObjectDefaults_StatefulSet(in *v1beta1.StatefulSet) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
@ -445,6 +459,7 @@ func SetObjectDefaults_StatefulSet(in *v1beta1.StatefulSet) {
|
||||
for i := range in.Spec.VolumeClaimTemplates {
|
||||
a := &in.Spec.VolumeClaimTemplates[i]
|
||||
v1.SetDefaults_PersistentVolumeClaim(a)
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.Spec.Resources.Requests)
|
||||
v1.SetDefaults_ResourceList(&a.Status.Capacity)
|
||||
|
29
pkg/apis/apps/v1beta2/zz_generated.defaults.go
generated
29
pkg/apis/apps/v1beta2/zz_generated.defaults.go
generated
@ -94,6 +94,13 @@ func SetObjectDefaults_DaemonSet(in *v1beta2.DaemonSet) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
@ -301,6 +308,13 @@ func SetObjectDefaults_Deployment(in *v1beta2.Deployment) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
@ -508,6 +522,13 @@ func SetObjectDefaults_ReplicaSet(in *v1beta2.ReplicaSet) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
@ -715,6 +736,13 @@ func SetObjectDefaults_StatefulSet(in *v1beta2.StatefulSet) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
@ -863,6 +891,7 @@ func SetObjectDefaults_StatefulSet(in *v1beta2.StatefulSet) {
|
||||
for i := range in.Spec.VolumeClaimTemplates {
|
||||
a := &in.Spec.VolumeClaimTemplates[i]
|
||||
v1.SetDefaults_PersistentVolumeClaim(a)
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.Spec.Resources.Requests)
|
||||
v1.SetDefaults_ResourceList(&a.Status.Capacity)
|
||||
|
7
pkg/apis/batch/v1/zz_generated.defaults.go
generated
7
pkg/apis/batch/v1/zz_generated.defaults.go
generated
@ -88,6 +88,13 @@ func SetObjectDefaults_Job(in *v1.Job) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
corev1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
corev1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
corev1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
|
14
pkg/apis/batch/v1beta1/zz_generated.defaults.go
generated
14
pkg/apis/batch/v1beta1/zz_generated.defaults.go
generated
@ -89,6 +89,13 @@ func SetObjectDefaults_CronJob(in *v1beta1.CronJob) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.JobTemplate.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.JobTemplate.Spec.Template.Spec.InitContainers[i]
|
||||
@ -295,6 +302,13 @@ func SetObjectDefaults_JobTemplate(in *v1beta1.JobTemplate) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Template.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Template.Spec.Template.Spec.InitContainers[i]
|
||||
|
14
pkg/apis/batch/v2alpha1/zz_generated.defaults.go
generated
14
pkg/apis/batch/v2alpha1/zz_generated.defaults.go
generated
@ -89,6 +89,13 @@ func SetObjectDefaults_CronJob(in *v2alpha1.CronJob) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.JobTemplate.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.JobTemplate.Spec.Template.Spec.InitContainers[i]
|
||||
@ -295,6 +302,13 @@ func SetObjectDefaults_JobTemplate(in *v2alpha1.JobTemplate) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Template.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Template.Spec.Template.Spec.InitContainers[i]
|
||||
|
@ -263,6 +263,14 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
|
||||
i.ISCSIInterface = "default"
|
||||
}
|
||||
},
|
||||
func(i *core.PersistentVolumeClaimSpec, c fuzz.Continue) {
|
||||
// Match defaulting in pkg/apis/core/v1/defaults.go.
|
||||
volumeMode := core.PersistentVolumeMode(c.RandString())
|
||||
if volumeMode == "" {
|
||||
volumeMode = core.PersistentVolumeFilesystem
|
||||
}
|
||||
i.VolumeMode = &volumeMode
|
||||
},
|
||||
func(d *core.DNSPolicy, c fuzz.Continue) {
|
||||
policies := []core.DNSPolicy{core.DNSClusterFirst, core.DNSDefault}
|
||||
*d = policies[c.Rand.Intn(len(policies))]
|
||||
|
@ -157,6 +157,33 @@ type VolumeSource struct {
|
||||
// CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
|
||||
// +optional
|
||||
CSI *CSIVolumeSource
|
||||
// Ephemeral represents a volume that is handled by a cluster storage driver (Alpha feature).
|
||||
// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
|
||||
// and deleted when the pod is removed.
|
||||
//
|
||||
// Use this if:
|
||||
// a) the volume is only needed while the pod runs,
|
||||
// b) features of normal volumes like restoring from snapshot or capacity
|
||||
// tracking are needed,
|
||||
// c) the storage driver is specified through a storage class, and
|
||||
// d) the storage driver supports dynamic volume provisioning through
|
||||
// a PersistentVolumeClaim (see EphemeralVolumeSource for more
|
||||
// information on the connection between this volume type
|
||||
// and PersistentVolumeClaim).
|
||||
//
|
||||
// Use PersistentVolumeClaim or one of the vendor-specific
|
||||
// APIs for volumes that persist for longer than the lifecycle
|
||||
// of an individual pod.
|
||||
//
|
||||
// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
|
||||
// be used that way - see the documentation of the driver for
|
||||
// more information.
|
||||
//
|
||||
// A pod can use both types of ephemeral volumes and
|
||||
// persistent volumes at the same time.
|
||||
//
|
||||
// +optional
|
||||
Ephemeral *EphemeralVolumeSource
|
||||
}
|
||||
|
||||
// PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs.
|
||||
@ -1670,6 +1697,53 @@ type CSIVolumeSource struct {
|
||||
NodePublishSecretRef *LocalObjectReference
|
||||
}
|
||||
|
||||
// EphemeralVolumeSource represents an ephemeral volume that is handled by a normal storage driver.
|
||||
type EphemeralVolumeSource struct {
|
||||
// VolumeClaimTemplate will be used to create a stand-alone PVC to provision the volume.
|
||||
// The pod in which this EphemeralVolumeSource is embedded will be the
|
||||
// owner of the PVC, i.e. the PVC will be deleted together with the
|
||||
// pod. The name of the PVC will be `<pod name>-<volume name>` where
|
||||
// `<volume name>` is the name from the `PodSpec.Volumes` array
|
||||
// entry. Pod validation will reject the pod if the concatenated name
|
||||
// is not valid for a PVC (for example, too long).
|
||||
//
|
||||
// An existing PVC with that name that is not owned by the pod
|
||||
// will *not* be used for the pod to avoid using an unrelated
|
||||
// volume by mistake. Starting the pod is then blocked until
|
||||
// the unrelated PVC is removed. If such a pre-created PVC is
|
||||
// meant to be used by the pod, the PVC has to updated with an
|
||||
// owner reference to the pod once the pod exists. Normally
|
||||
// this should not be necessary, but it may be useful when
|
||||
// manually reconstructing a broken cluster.
|
||||
//
|
||||
// This field is read-only and no changes will be made by Kubernetes
|
||||
// to the PVC after it has been created.
|
||||
//
|
||||
// Required, must not be nil.
|
||||
VolumeClaimTemplate *PersistentVolumeClaimTemplate
|
||||
|
||||
// ReadOnly specifies a read-only configuration for the volume.
|
||||
// Defaults to false (read/write).
|
||||
// +optional
|
||||
ReadOnly bool
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimTemplate is used to produce
|
||||
// PersistentVolumeClaim objects as part of an EphemeralVolumeSource.
|
||||
type PersistentVolumeClaimTemplate struct {
|
||||
// ObjectMeta may contain labels and annotations that will be copied into the PVC
|
||||
// when creating it. No other fields are allowed and will be rejected during
|
||||
// validation.
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
|
||||
// Spec for the PersistentVolumeClaim. The entire content is
|
||||
// copied unchanged into the PVC that gets created from this
|
||||
// template. The same fields as in a PersistentVolumeClaim
|
||||
// are also valid here.
|
||||
Spec PersistentVolumeClaimSpec
|
||||
}
|
||||
|
||||
// ContainerPort represents a network port in a single container
|
||||
type ContainerPort struct {
|
||||
// Optional: If specified, this must be an IANA_SVC_NAME Each named port
|
||||
|
@ -285,9 +285,11 @@ func SetDefaults_PersistentVolumeClaim(obj *v1.PersistentVolumeClaim) {
|
||||
if obj.Status.Phase == "" {
|
||||
obj.Status.Phase = v1.ClaimPending
|
||||
}
|
||||
if obj.Spec.VolumeMode == nil {
|
||||
obj.Spec.VolumeMode = new(v1.PersistentVolumeMode)
|
||||
*obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
func SetDefaults_PersistentVolumeClaimSpec(obj *v1.PersistentVolumeClaimSpec) {
|
||||
if obj.VolumeMode == nil {
|
||||
obj.VolumeMode = new(v1.PersistentVolumeMode)
|
||||
*obj.VolumeMode = v1.PersistentVolumeFilesystem
|
||||
}
|
||||
}
|
||||
func SetDefaults_ISCSIVolumeSource(obj *v1.ISCSIVolumeSource) {
|
||||
|
@ -142,6 +142,7 @@ func TestWorkloadDefaults(t *testing.T) {
|
||||
".Spec.Volumes[0].VolumeSource.DownwardAPI.DefaultMode": `420`,
|
||||
".Spec.Volumes[0].VolumeSource.DownwardAPI.Items[0].FieldRef.APIVersion": `"v1"`,
|
||||
".Spec.Volumes[0].VolumeSource.EmptyDir": `{}`,
|
||||
".Spec.Volumes[0].VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode": `"Filesystem"`,
|
||||
".Spec.Volumes[0].VolumeSource.HostPath.Type": `""`,
|
||||
".Spec.Volumes[0].VolumeSource.ISCSI.ISCSIInterface": `"default"`,
|
||||
".Spec.Volumes[0].VolumeSource.Projected.DefaultMode": `420`,
|
||||
@ -265,6 +266,7 @@ func TestPodDefaults(t *testing.T) {
|
||||
".Spec.Volumes[0].VolumeSource.DownwardAPI.DefaultMode": `420`,
|
||||
".Spec.Volumes[0].VolumeSource.DownwardAPI.Items[0].FieldRef.APIVersion": `"v1"`,
|
||||
".Spec.Volumes[0].VolumeSource.EmptyDir": `{}`,
|
||||
".Spec.Volumes[0].VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode": `"Filesystem"`,
|
||||
".Spec.Volumes[0].VolumeSource.HostPath.Type": `""`,
|
||||
".Spec.Volumes[0].VolumeSource.ISCSI.ISCSIInterface": `"default"`,
|
||||
".Spec.Volumes[0].VolumeSource.Projected.DefaultMode": `420`,
|
||||
@ -1375,6 +1377,58 @@ func TestSetDefaultPersistentVolumeClaim(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultEphemeral(t *testing.T) {
|
||||
fsMode := v1.PersistentVolumeFilesystem
|
||||
blockMode := v1.PersistentVolumeBlock
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
volumeMode *v1.PersistentVolumeMode
|
||||
expectedVolumeMode v1.PersistentVolumeMode
|
||||
}{
|
||||
{
|
||||
name: "volume mode nil",
|
||||
volumeMode: nil,
|
||||
expectedVolumeMode: v1.PersistentVolumeFilesystem,
|
||||
},
|
||||
{
|
||||
name: "volume mode filesystem",
|
||||
volumeMode: &fsMode,
|
||||
expectedVolumeMode: v1.PersistentVolumeFilesystem,
|
||||
},
|
||||
{
|
||||
name: "volume mode block",
|
||||
volumeMode: &blockMode,
|
||||
expectedVolumeMode: v1.PersistentVolumeBlock,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
pod := &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
VolumeMode: test.volumeMode,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
obj1 := roundTrip(t, runtime.Object(pod))
|
||||
pod1 := obj1.(*v1.Pod)
|
||||
if *pod1.Spec.Volumes[0].VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode != test.expectedVolumeMode {
|
||||
t.Errorf("Test %s failed, Expected VolumeMode: %v, but got %v", test.name, test.volumeMode, *pod1.Spec.Volumes[0].VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.VolumeMode)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetDefaultEndpointsProtocol(t *testing.T) {
|
||||
in := &v1.Endpoints{Subsets: []v1.EndpointSubset{
|
||||
{Ports: []v1.EndpointPort{{}, {Protocol: "UDP"}, {}}},
|
||||
|
70
pkg/apis/core/v1/zz_generated.conversion.go
generated
70
pkg/apis/core/v1/zz_generated.conversion.go
generated
@ -541,6 +541,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.EphemeralVolumeSource)(nil), (*core.EphemeralVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(a.(*v1.EphemeralVolumeSource), b.(*core.EphemeralVolumeSource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.EphemeralVolumeSource)(nil), (*v1.EphemeralVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(a.(*core.EphemeralVolumeSource), b.(*v1.EphemeralVolumeSource), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.Event)(nil), (*core.Event)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_Event_To_core_Event(a.(*v1.Event), b.(*core.Event), scope)
|
||||
}); err != nil {
|
||||
@ -1131,6 +1141,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimTemplate)(nil), (*core.PersistentVolumeClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(a.(*v1.PersistentVolumeClaimTemplate), b.(*core.PersistentVolumeClaimTemplate), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*core.PersistentVolumeClaimTemplate)(nil), (*v1.PersistentVolumeClaimTemplate)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(a.(*core.PersistentVolumeClaimTemplate), b.(*v1.PersistentVolumeClaimTemplate), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1.PersistentVolumeClaimVolumeSource)(nil), (*core.PersistentVolumeClaimVolumeSource)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(a.(*v1.PersistentVolumeClaimVolumeSource), b.(*core.PersistentVolumeClaimVolumeSource), scope)
|
||||
}); err != nil {
|
||||
@ -3514,6 +3534,28 @@ func Convert_core_EphemeralContainers_To_v1_EphemeralContainers(in *core.Ephemer
|
||||
return autoConvert_core_EphemeralContainers_To_v1_EphemeralContainers(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in *v1.EphemeralVolumeSource, out *core.EphemeralVolumeSource, s conversion.Scope) error {
|
||||
out.VolumeClaimTemplate = (*core.PersistentVolumeClaimTemplate)(unsafe.Pointer(in.VolumeClaimTemplate))
|
||||
out.ReadOnly = in.ReadOnly
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource is an autogenerated conversion function.
|
||||
func Convert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in *v1.EphemeralVolumeSource, out *core.EphemeralVolumeSource, s conversion.Scope) error {
|
||||
return autoConvert_v1_EphemeralVolumeSource_To_core_EphemeralVolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in *core.EphemeralVolumeSource, out *v1.EphemeralVolumeSource, s conversion.Scope) error {
|
||||
out.VolumeClaimTemplate = (*v1.PersistentVolumeClaimTemplate)(unsafe.Pointer(in.VolumeClaimTemplate))
|
||||
out.ReadOnly = in.ReadOnly
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource is an autogenerated conversion function.
|
||||
func Convert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in *core.EphemeralVolumeSource, out *v1.EphemeralVolumeSource, s conversion.Scope) error {
|
||||
return autoConvert_core_EphemeralVolumeSource_To_v1_EphemeralVolumeSource(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil {
|
||||
@ -5149,6 +5191,32 @@ func Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(
|
||||
return autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in *v1.PersistentVolumeClaimTemplate, out *core.PersistentVolumeClaimTemplate, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate is an autogenerated conversion function.
|
||||
func Convert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in *v1.PersistentVolumeClaimTemplate, out *core.PersistentVolumeClaimTemplate, s conversion.Scope) error {
|
||||
return autoConvert_v1_PersistentVolumeClaimTemplate_To_core_PersistentVolumeClaimTemplate(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in *core.PersistentVolumeClaimTemplate, out *v1.PersistentVolumeClaimTemplate, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate is an autogenerated conversion function.
|
||||
func Convert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in *core.PersistentVolumeClaimTemplate, out *v1.PersistentVolumeClaimTemplate, s conversion.Scope) error {
|
||||
return autoConvert_core_PersistentVolumeClaimTemplate_To_v1_PersistentVolumeClaimTemplate(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error {
|
||||
out.ClaimName = in.ClaimName
|
||||
out.ReadOnly = in.ReadOnly
|
||||
@ -8015,6 +8083,7 @@ func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *
|
||||
out.ScaleIO = (*core.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO))
|
||||
out.StorageOS = (*core.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS))
|
||||
out.CSI = (*core.CSIVolumeSource)(unsafe.Pointer(in.CSI))
|
||||
out.Ephemeral = (*core.EphemeralVolumeSource)(unsafe.Pointer(in.Ephemeral))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -8060,6 +8129,7 @@ func autoConvert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out
|
||||
out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO))
|
||||
out.StorageOS = (*v1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS))
|
||||
out.CSI = (*v1.CSIVolumeSource)(unsafe.Pointer(in.CSI))
|
||||
out.Ephemeral = (*v1.EphemeralVolumeSource)(unsafe.Pointer(in.Ephemeral))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
22
pkg/apis/core/v1/zz_generated.defaults.go
generated
22
pkg/apis/core/v1/zz_generated.defaults.go
generated
@ -200,6 +200,7 @@ func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) {
|
||||
|
||||
func SetObjectDefaults_PersistentVolumeClaim(in *v1.PersistentVolumeClaim) {
|
||||
SetDefaults_PersistentVolumeClaim(in)
|
||||
SetDefaults_PersistentVolumeClaimSpec(&in.Spec)
|
||||
SetDefaults_ResourceList(&in.Spec.Resources.Limits)
|
||||
SetDefaults_ResourceList(&in.Spec.Resources.Requests)
|
||||
SetDefaults_ResourceList(&in.Status.Capacity)
|
||||
@ -272,6 +273,13 @@ func SetObjectDefaults_Pod(in *v1.Pod) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.InitContainers {
|
||||
a := &in.Spec.InitContainers[i]
|
||||
@ -478,6 +486,13 @@ func SetObjectDefaults_PodTemplate(in *v1.PodTemplate) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Template.Spec.InitContainers {
|
||||
a := &in.Template.Spec.InitContainers[i]
|
||||
@ -686,6 +701,13 @@ func SetObjectDefaults_ReplicationController(in *v1.ReplicationController) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
|
@ -37,6 +37,7 @@ import (
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
v1validation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@ -349,15 +350,26 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *fiel
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func ValidateVolumes(volumes []core.Volume, fldPath *field.Path) (map[string]core.VolumeSource, field.ErrorList) {
|
||||
func ValidateVolumes(volumes []core.Volume, podMeta *metav1.ObjectMeta, fldPath *field.Path) (map[string]core.VolumeSource, field.ErrorList) {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
allNames := sets.String{}
|
||||
allCreatedPVCs := sets.String{}
|
||||
// Determine which PVCs will be created for this pod. We need
|
||||
// the exact name of the pod for this. Without it, this sanity
|
||||
// check has to be skipped.
|
||||
if podMeta != nil && podMeta.Name != "" {
|
||||
for _, vol := range volumes {
|
||||
if vol.VolumeSource.Ephemeral != nil {
|
||||
allCreatedPVCs.Insert(podMeta.Name + "-" + vol.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
vols := make(map[string]core.VolumeSource)
|
||||
for i, vol := range volumes {
|
||||
idxPath := fldPath.Index(i)
|
||||
namePath := idxPath.Child("name")
|
||||
el := validateVolumeSource(&vol.VolumeSource, idxPath, vol.Name)
|
||||
el := validateVolumeSource(&vol.VolumeSource, idxPath, vol.Name, podMeta)
|
||||
if len(vol.Name) == 0 {
|
||||
el = append(el, field.Required(namePath, ""))
|
||||
} else {
|
||||
@ -372,8 +384,14 @@ func ValidateVolumes(volumes []core.Volume, fldPath *field.Path) (map[string]cor
|
||||
} else {
|
||||
allErrs = append(allErrs, el...)
|
||||
}
|
||||
|
||||
// A PersistentVolumeClaimSource should not reference a created PVC. That doesn't
|
||||
// make sense.
|
||||
if vol.PersistentVolumeClaim != nil && allCreatedPVCs.Has(vol.PersistentVolumeClaim.ClaimName) {
|
||||
allErrs = append(allErrs, field.Invalid(idxPath.Child("persistentVolumeClaim").Child("claimName"), vol.PersistentVolumeClaim.ClaimName,
|
||||
"must not reference a PVC that gets created for an ephemeral volume"))
|
||||
}
|
||||
}
|
||||
|
||||
return vols, allErrs
|
||||
}
|
||||
|
||||
@ -428,7 +446,7 @@ func devicePathAlreadyExists(devicePath string, mounts map[string]string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volName string) field.ErrorList {
|
||||
func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volName string, podMeta *metav1.ObjectMeta) field.ErrorList {
|
||||
numVolumes := 0
|
||||
allErrs := field.ErrorList{}
|
||||
if source.EmptyDir != nil {
|
||||
@ -659,6 +677,23 @@ func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volNam
|
||||
allErrs = append(allErrs, validateCSIVolumeSource(source.CSI, fldPath.Child("csi"))...)
|
||||
}
|
||||
}
|
||||
if source.Ephemeral != nil {
|
||||
if numVolumes > 0 {
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child("ephemeral"), "may not specify more than 1 volume type"))
|
||||
} else {
|
||||
numVolumes++
|
||||
allErrs = append(allErrs, validateEphemeralVolumeSource(source.Ephemeral, fldPath.Child("ephemeral"))...)
|
||||
// Check the expected name for the PVC. This gets skipped if information is missing,
|
||||
// because that already gets flagged as a problem elsewhere. For example,
|
||||
// ValidateObjectMeta as called by validatePodMetadataAndSpec checks that the name is set.
|
||||
if podMeta != nil && podMeta.Name != "" && volName != "" {
|
||||
pvcName := podMeta.Name + "-" + volName
|
||||
for _, msg := range ValidatePersistentVolumeName(pvcName, false) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), volName, fmt.Sprintf("PVC name %q: %v", pvcName, msg)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if numVolumes == 0 {
|
||||
allErrs = append(allErrs, field.Required(fldPath, "must specify a volume type"))
|
||||
@ -1552,6 +1587,41 @@ func validateCSIVolumeSource(csi *core.CSIVolumeSource, fldPath *field.Path) fie
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateEphemeralVolumeSource(ephemeral *core.EphemeralVolumeSource, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
if ephemeral.VolumeClaimTemplate == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("volumeClaimTemplate"), ""))
|
||||
} else {
|
||||
allErrs = append(allErrs, ValidatePersistentVolumeClaimTemplate(ephemeral.VolumeClaimTemplate, fldPath.Child("volumeClaimTemplate"))...)
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePersistentVolumeClaimTemplate verifies that the embedded object meta and spec are valid.
|
||||
// Checking of the object data is very minimal because only labels and annotations are used.
|
||||
func ValidatePersistentVolumeClaimTemplate(claimTemplate *core.PersistentVolumeClaimTemplate, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := validatePersistentVolumeClaimTemplateObjectMeta(&claimTemplate.ObjectMeta, fldPath.Child("metadata"))
|
||||
allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&claimTemplate.Spec, fldPath.Child("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validatePersistentVolumeClaimTemplateObjectMeta(objMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := apimachineryvalidation.ValidateAnnotations(objMeta.Annotations, fldPath.Child("annotations"))
|
||||
allErrs = append(allErrs, v1validation.ValidateLabels(objMeta.Labels, fldPath.Child("labels"))...)
|
||||
// All other fields are not supported and thus must not be set
|
||||
// to avoid confusion. We could reject individual fields,
|
||||
// but then adding a new one to ObjectMeta wouldn't be checked
|
||||
// unless this code gets updated. Instead, we ensure that
|
||||
// only allowed fields are set via reflection.
|
||||
allErrs = append(allErrs, validateFieldAllowList(*objMeta, allowedPVCTemplateObjectMetaFields, "cannot be set for an ephemeral volume", fldPath)...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
var allowedPVCTemplateObjectMetaFields = map[string]bool{
|
||||
"Annotations": true,
|
||||
"Labels": true,
|
||||
}
|
||||
|
||||
// ValidatePersistentVolumeName checks that a name is appropriate for a
|
||||
// PersistentVolumeName object.
|
||||
var ValidatePersistentVolumeName = apimachineryvalidation.NameIsDNSSubdomain
|
||||
@ -2647,21 +2717,31 @@ func validateEphemeralContainers(ephemeralContainers []core.EphemeralContainer,
|
||||
}
|
||||
|
||||
// Ephemeral Containers should not be relied upon for fundamental pod services, so fields such as
|
||||
// Lifecycle, probes, resources and ports should be disallowed. This is implemented as a whitelist
|
||||
// so that new fields will be given consideration prior to inclusion in Ephemeral Containers.
|
||||
specType, specValue := reflect.TypeOf(ec.EphemeralContainerCommon), reflect.ValueOf(ec.EphemeralContainerCommon)
|
||||
for i := 0; i < specType.NumField(); i++ {
|
||||
f := specType.Field(i)
|
||||
if allowedEphemeralContainerFields[f.Name] {
|
||||
continue
|
||||
}
|
||||
// Lifecycle, probes, resources and ports should be disallowed. This is implemented as a list
|
||||
// of allowed fields so that new fields will be given consideration prior to inclusion in Ephemeral Containers.
|
||||
allErrs = append(allErrs, validateFieldAllowList(ec.EphemeralContainerCommon, allowedEphemeralContainerFields, "cannot be set for an Ephemeral Container", idxPath)...)
|
||||
}
|
||||
|
||||
// Compare the value of this field to its zero value to determine if it has been set
|
||||
if !reflect.DeepEqual(specValue.Field(i).Interface(), reflect.Zero(f.Type).Interface()) {
|
||||
r, n := utf8.DecodeRuneInString(f.Name)
|
||||
lcName := string(unicode.ToLower(r)) + f.Name[n:]
|
||||
allErrs = append(allErrs, field.Forbidden(idxPath.Child(lcName), "cannot be set for an Ephemeral Container"))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// validateFieldAcceptList checks that only allowed fields are set.
|
||||
// The value must be a struct (not a pointer to a struct!).
|
||||
func validateFieldAllowList(value interface{}, allowedFields map[string]bool, errorText string, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
reflectType, reflectValue := reflect.TypeOf(value), reflect.ValueOf(value)
|
||||
for i := 0; i < reflectType.NumField(); i++ {
|
||||
f := reflectType.Field(i)
|
||||
if allowedFields[f.Name] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Compare the value of this field to its zero value to determine if it has been set
|
||||
if !reflect.DeepEqual(reflectValue.Field(i).Interface(), reflect.Zero(f.Type).Interface()) {
|
||||
r, n := utf8.DecodeRuneInString(f.Name)
|
||||
lcName := string(unicode.ToLower(r)) + f.Name[n:]
|
||||
allErrs = append(allErrs, field.Forbidden(fldPath.Child(lcName), errorText))
|
||||
}
|
||||
}
|
||||
|
||||
@ -3119,7 +3199,7 @@ func validatePodMetadataAndSpec(pod *core.Pod, opts PodValidationOptions) field.
|
||||
fldPath := field.NewPath("metadata")
|
||||
allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath)
|
||||
allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, field.NewPath("spec"))...)
|
||||
allErrs = append(allErrs, ValidatePodSpec(&pod.Spec, &pod.ObjectMeta, field.NewPath("spec"))...)
|
||||
|
||||
// we do additional validation only pertinent for pods and not pod templates
|
||||
// this was done to preserve backwards compatibility
|
||||
@ -3198,10 +3278,12 @@ func validatePodIPs(pod *core.Pod) field.ErrorList {
|
||||
// This includes checking formatting and uniqueness. It also canonicalizes the
|
||||
// structure by setting default values and implementing any backwards-compatibility
|
||||
// tricks.
|
||||
func ValidatePodSpec(spec *core.PodSpec, fldPath *field.Path) field.ErrorList {
|
||||
// The pod metadata is needed to validate generic ephemeral volumes. It is optional
|
||||
// and should be left empty unless the spec is from a real pod object.
|
||||
func ValidatePodSpec(spec *core.PodSpec, podMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
vols, vErrs := ValidateVolumes(spec.Volumes, fldPath.Child("volumes"))
|
||||
vols, vErrs := ValidateVolumes(spec.Volumes, podMeta, fldPath.Child("volumes"))
|
||||
allErrs = append(allErrs, vErrs...)
|
||||
allErrs = append(allErrs, validateContainers(spec.Containers, false, vols, fldPath.Child("containers"))...)
|
||||
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, fldPath.Child("initContainers"))...)
|
||||
@ -4493,7 +4575,7 @@ func ValidatePodTemplateSpec(spec *core.PodTemplateSpec, fldPath *field.Path) fi
|
||||
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...)
|
||||
allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSpecificAnnotations(spec.Annotations, &spec.Spec, fldPath.Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, fldPath.Child("spec"))...)
|
||||
allErrs = append(allErrs, ValidatePodSpec(&spec.Spec, nil, fldPath.Child("spec"))...)
|
||||
allErrs = append(allErrs, validateSeccompAnnotationsAndFields(spec.ObjectMeta, &spec.Spec, fldPath.Child("spec"))...)
|
||||
|
||||
if len(spec.Spec.EphemeralContainers) > 0 {
|
||||
|
@ -932,42 +932,195 @@ func testVolumeClaimStorageClassInAnnotationAndSpec(name, namespace, scNameInAnn
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
func testValidatePVC(t *testing.T, ephemeral bool) {
|
||||
invalidClassName := "-invalid-"
|
||||
validClassName := "valid"
|
||||
invalidMode := core.PersistentVolumeMode("fakeVolumeMode")
|
||||
validMode := core.PersistentVolumeFilesystem
|
||||
goodName := "foo"
|
||||
goodNS := "ns"
|
||||
if ephemeral {
|
||||
// Must be empty for ephemeral inline volumes.
|
||||
goodName = ""
|
||||
goodNS = ""
|
||||
}
|
||||
goodClaimSpec := core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: "Exists",
|
||||
},
|
||||
},
|
||||
},
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
core.ReadOnlyMany,
|
||||
},
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
StorageClassName: &validClassName,
|
||||
VolumeMode: &validMode,
|
||||
}
|
||||
now := metav1.Now()
|
||||
ten := int64(10)
|
||||
|
||||
scenarios := map[string]struct {
|
||||
isExpectedFailure bool
|
||||
claim *core.PersistentVolumeClaim
|
||||
}{
|
||||
"good-claim": {
|
||||
isExpectedFailure: false,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "key2",
|
||||
Operator: "Exists",
|
||||
},
|
||||
claim: testVolumeClaim(goodName, goodNS, goodClaimSpec),
|
||||
},
|
||||
"missing-name": {
|
||||
isExpectedFailure: !ephemeral,
|
||||
claim: testVolumeClaim("", goodNS, goodClaimSpec),
|
||||
},
|
||||
"missing-namespace": {
|
||||
isExpectedFailure: !ephemeral,
|
||||
claim: testVolumeClaim(goodName, "", goodClaimSpec),
|
||||
},
|
||||
"with-generate-name": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.GenerateName = "pvc-"
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-uid": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.UID = "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d"
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-resource-version": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.ResourceVersion = "1"
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-generation": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Generation = 100
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-creation-timestamp": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.CreationTimestamp = now
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-deletion-grace-period-seconds": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.DeletionGracePeriodSeconds = &ten
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-owner-references": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.OwnerReferences = []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "pod",
|
||||
Name: "foo",
|
||||
UID: "ac051fac-2ead-46d9-b8b4-4e0fbeb7455d",
|
||||
},
|
||||
},
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
core.ReadOnlyMany,
|
||||
},
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-finalizers": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Finalizers = []string{
|
||||
"example.com/foo",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-cluster-name": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.ClusterName = "foo"
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-managed-fields": {
|
||||
isExpectedFailure: ephemeral,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.ManagedFields = []metav1.ManagedFieldsEntry{
|
||||
{
|
||||
FieldsType: "FieldsV1",
|
||||
Operation: "Apply",
|
||||
APIVersion: "apps/v1",
|
||||
Manager: "foo",
|
||||
},
|
||||
},
|
||||
StorageClassName: &validClassName,
|
||||
VolumeMode: &validMode,
|
||||
}),
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-good-labels": {
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Labels = map[string]string{
|
||||
"apps.kubernetes.io/name": "test",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-bad-labels": {
|
||||
isExpectedFailure: true,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Labels = map[string]string{
|
||||
"hello-world": "hyphen not allowed",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-good-annotations": {
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Labels = map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"with-bad-annotations": {
|
||||
isExpectedFailure: true,
|
||||
claim: func() *core.PersistentVolumeClaim {
|
||||
claim := testVolumeClaim(goodName, goodNS, goodClaimSpec)
|
||||
claim.Labels = map[string]string{
|
||||
"hello-world": "hyphen not allowed",
|
||||
}
|
||||
return claim
|
||||
}(),
|
||||
},
|
||||
"invalid-claim-zero-capacity": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
@ -990,7 +1143,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"invalid-label-selector": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
@ -1013,7 +1166,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"invalid-accessmode": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{"fakemode"},
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
@ -1022,23 +1175,9 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
},
|
||||
"missing-namespace": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "", core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
core.ReadOnlyMany,
|
||||
},
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
"no-access-modes": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
|
||||
@ -1048,7 +1187,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"no-resource-requests": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
},
|
||||
@ -1056,7 +1195,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"invalid-resource-requests": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
},
|
||||
@ -1069,7 +1208,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"negative-storage-request": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
@ -1091,7 +1230,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"zero-storage-request": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
@ -1113,7 +1252,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"invalid-storage-class-name": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
@ -1136,7 +1275,7 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
},
|
||||
"invalid-volume-mode": {
|
||||
isExpectedFailure: true,
|
||||
claim: testVolumeClaim("foo", "ns", core.PersistentVolumeClaimSpec{
|
||||
claim: testVolumeClaim(goodName, goodNS, core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
core.ReadOnlyMany,
|
||||
@ -1153,17 +1292,43 @@ func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
|
||||
for name, scenario := range scenarios {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
errs := ValidatePersistentVolumeClaim(scenario.claim)
|
||||
var errs field.ErrorList
|
||||
if ephemeral {
|
||||
volumes := []core.Volume{
|
||||
{
|
||||
Name: "foo",
|
||||
VolumeSource: core.VolumeSource{
|
||||
Ephemeral: &core.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &core.PersistentVolumeClaimTemplate{
|
||||
ObjectMeta: scenario.claim.ObjectMeta,
|
||||
Spec: scenario.claim.Spec,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, errs = ValidateVolumes(volumes, nil, field.NewPath(""))
|
||||
} else {
|
||||
errs = ValidatePersistentVolumeClaim(scenario.claim)
|
||||
}
|
||||
if len(errs) == 0 && scenario.isExpectedFailure {
|
||||
t.Errorf("Unexpected success for scenario: %s", name)
|
||||
t.Error("Unexpected success for scenario")
|
||||
}
|
||||
if len(errs) > 0 && !scenario.isExpectedFailure {
|
||||
t.Errorf("Unexpected failure for scenario: %s - %+v", name, errs)
|
||||
t.Errorf("Unexpected failure: %+v", errs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatePersistentVolumeClaim(t *testing.T) {
|
||||
testValidatePVC(t, false)
|
||||
}
|
||||
|
||||
func TestValidateEphemeralVolume(t *testing.T) {
|
||||
testValidatePVC(t, true)
|
||||
}
|
||||
|
||||
func TestAlphaPVVolumeModeUpdate(t *testing.T) {
|
||||
block := core.PersistentVolumeBlock
|
||||
file := core.PersistentVolumeFilesystem
|
||||
@ -3825,7 +3990,7 @@ func TestValidateVolumes(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
names, errs := ValidateVolumes([]core.Volume{tc.vol}, field.NewPath("field"))
|
||||
names, errs := ValidateVolumes([]core.Volume{tc.vol}, nil, field.NewPath("field"))
|
||||
if len(errs) != len(tc.errs) {
|
||||
t.Fatalf("unexpected error(s): got %d, want %d: %v", len(tc.errs), len(errs), errs)
|
||||
}
|
||||
@ -3851,7 +4016,7 @@ func TestValidateVolumes(t *testing.T) {
|
||||
{Name: "abc", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}},
|
||||
{Name: "abc", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}},
|
||||
}
|
||||
_, errs := ValidateVolumes(dupsCase, field.NewPath("field"))
|
||||
_, errs := ValidateVolumes(dupsCase, nil, field.NewPath("field"))
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("expected error")
|
||||
} else if len(errs) != 1 {
|
||||
@ -3864,7 +4029,7 @@ func TestValidateVolumes(t *testing.T) {
|
||||
hugePagesCase := core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{Medium: core.StorageMediumHugePages}}
|
||||
|
||||
// Enable HugePages
|
||||
if errs := validateVolumeSource(&hugePagesCase, field.NewPath("field").Index(0), "working"); len(errs) != 0 {
|
||||
if errs := validateVolumeSource(&hugePagesCase, field.NewPath("field").Index(0), "working", nil); len(errs) != 0 {
|
||||
t.Errorf("Unexpected error when HugePages feature is enabled.")
|
||||
}
|
||||
|
||||
@ -4194,7 +4359,7 @@ func TestAlphaLocalStorageCapacityIsolation(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
if errs := validateVolumeSource(&tc, field.NewPath("spec"), "tmpvol"); len(errs) != 0 {
|
||||
if errs := validateVolumeSource(&tc, field.NewPath("spec"), "tmpvol", nil); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
}
|
||||
@ -4937,7 +5102,7 @@ func TestValidateVolumeMounts(t *testing.T) {
|
||||
{Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}},
|
||||
{Name: "123", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
|
||||
}
|
||||
vols, v1err := ValidateVolumes(volumes, field.NewPath("field"))
|
||||
vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"))
|
||||
if len(v1err) > 0 {
|
||||
t.Errorf("Invalid test volume - expected success %v", v1err)
|
||||
return
|
||||
@ -5000,7 +5165,7 @@ func TestValidateDisabledSubpath(t *testing.T) {
|
||||
{Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}},
|
||||
{Name: "123", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
|
||||
}
|
||||
vols, v1err := ValidateVolumes(volumes, field.NewPath("field"))
|
||||
vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"))
|
||||
if len(v1err) > 0 {
|
||||
t.Errorf("Invalid test volume - expected success %v", v1err)
|
||||
return
|
||||
@ -5062,7 +5227,7 @@ func TestValidateSubpathMutuallyExclusive(t *testing.T) {
|
||||
{Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}},
|
||||
{Name: "123", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
|
||||
}
|
||||
vols, v1err := ValidateVolumes(volumes, field.NewPath("field"))
|
||||
vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"))
|
||||
if len(v1err) > 0 {
|
||||
t.Errorf("Invalid test volume - expected success %v", v1err)
|
||||
return
|
||||
@ -5143,7 +5308,7 @@ func TestValidateDisabledSubpathExpr(t *testing.T) {
|
||||
{Name: "abc-123", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "testclaim2"}}},
|
||||
{Name: "123", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
|
||||
}
|
||||
vols, v1err := ValidateVolumes(volumes, field.NewPath("field"))
|
||||
vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"))
|
||||
if len(v1err) > 0 {
|
||||
t.Errorf("Invalid test volume - expected success %v", v1err)
|
||||
return
|
||||
@ -5337,7 +5502,7 @@ func TestValidateMountPropagation(t *testing.T) {
|
||||
volumes := []core.Volume{
|
||||
{Name: "foo", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
|
||||
}
|
||||
vols2, v2err := ValidateVolumes(volumes, field.NewPath("field"))
|
||||
vols2, v2err := ValidateVolumes(volumes, nil, field.NewPath("field"))
|
||||
if len(v2err) > 0 {
|
||||
t.Errorf("Invalid test volume - expected success %v", v2err)
|
||||
return
|
||||
@ -5360,7 +5525,7 @@ func TestAlphaValidateVolumeDevices(t *testing.T) {
|
||||
{Name: "def", VolumeSource: core.VolumeSource{HostPath: &core.HostPathVolumeSource{Path: "/foo/baz", Type: newHostPathType(string(core.HostPathUnset))}}},
|
||||
}
|
||||
|
||||
vols, v1err := ValidateVolumes(volumes, field.NewPath("field"))
|
||||
vols, v1err := ValidateVolumes(volumes, nil, field.NewPath("field"))
|
||||
if len(v1err) > 0 {
|
||||
t.Errorf("Invalid test volumes - expected success %v", v1err)
|
||||
return
|
||||
@ -6560,14 +6725,14 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
badfsGroupChangePolicy1 := core.PodFSGroupChangePolicy("invalid")
|
||||
badfsGroupChangePolicy2 := core.PodFSGroupChangePolicy("")
|
||||
|
||||
successCases := []core.PodSpec{
|
||||
{ // Populate basic fields, leave defaults for most.
|
||||
successCases := map[string]core.PodSpec{
|
||||
"populate basic fields, leave defaults for most": {
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate all fields.
|
||||
"populate all fields": {
|
||||
Volumes: []core.Volume{
|
||||
{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}},
|
||||
},
|
||||
@ -6582,7 +6747,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
ActiveDeadlineSeconds: &activeDeadlineSeconds,
|
||||
ServiceAccountName: "acct",
|
||||
},
|
||||
{ // Populate all fields with larger active deadline.
|
||||
"populate all fields with larger active deadline": {
|
||||
Volumes: []core.Volume{
|
||||
{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}},
|
||||
},
|
||||
@ -6597,7 +6762,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
ActiveDeadlineSeconds: &activeDeadlineSecondsMax,
|
||||
ServiceAccountName: "acct",
|
||||
},
|
||||
{ // Populate HostNetwork.
|
||||
"populate HostNetwork": {
|
||||
Containers: []core.Container{
|
||||
{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File",
|
||||
Ports: []core.ContainerPort{
|
||||
@ -6610,7 +6775,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate RunAsUser SupplementalGroups FSGroup with minID 0
|
||||
"populate RunAsUser SupplementalGroups FSGroup with minID 0": {
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
SecurityContext: &core.PodSecurityContext{
|
||||
SupplementalGroups: []int64{minGroupID},
|
||||
@ -6620,7 +6785,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate RunAsUser SupplementalGroups FSGroup with maxID 2147483647
|
||||
"populate RunAsUser SupplementalGroups FSGroup with maxID 2147483647": {
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
SecurityContext: &core.PodSecurityContext{
|
||||
SupplementalGroups: []int64{maxGroupID},
|
||||
@ -6630,7 +6795,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate HostIPC.
|
||||
"populate HostIPC": {
|
||||
SecurityContext: &core.PodSecurityContext{
|
||||
HostIPC: true,
|
||||
},
|
||||
@ -6639,7 +6804,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate HostPID.
|
||||
"populate HostPID": {
|
||||
SecurityContext: &core.PodSecurityContext{
|
||||
HostPID: true,
|
||||
},
|
||||
@ -6648,27 +6813,27 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate Affinity.
|
||||
"populate Affinity": {
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate HostAliases.
|
||||
"populate HostAliases": {
|
||||
HostAliases: []core.HostAlias{{IP: "12.34.56.78", Hostnames: []string{"host1", "host2"}}},
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate HostAliases with `foo.bar` hostnames.
|
||||
"populate HostAliases with `foo.bar` hostnames": {
|
||||
HostAliases: []core.HostAlias{{IP: "12.34.56.78", Hostnames: []string{"host1.foo", "host2.bar"}}},
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate HostAliases with HostNetwork.
|
||||
"populate HostAliases with HostNetwork": {
|
||||
HostAliases: []core.HostAlias{{IP: "12.34.56.78", Hostnames: []string{"host1.foo", "host2.bar"}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
SecurityContext: &core.PodSecurityContext{
|
||||
@ -6677,14 +6842,14 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
{ // Populate PriorityClassName.
|
||||
"populate PriorityClassName": {
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
PriorityClassName: "valid-name",
|
||||
},
|
||||
{ // Populate ShareProcessNamespace
|
||||
"populate ShareProcessNamespace": {
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
@ -6693,20 +6858,20 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
ShareProcessNamespace: &[]bool{true}[0],
|
||||
},
|
||||
},
|
||||
{ // Populate RuntimeClassName
|
||||
"populate RuntimeClassName": {
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
RuntimeClassName: utilpointer.StringPtr("valid-sandbox"),
|
||||
},
|
||||
{ // Populate Overhead
|
||||
"populate Overhead": {
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
RuntimeClassName: utilpointer.StringPtr("valid-sandbox"),
|
||||
Overhead: core.ResourceList{},
|
||||
},
|
||||
{
|
||||
"populate DNSPolicy": {
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
SecurityContext: &core.PodSecurityContext{
|
||||
FSGroupChangePolicy: &goodfsGroupChangePolicy,
|
||||
@ -6715,10 +6880,12 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
}
|
||||
for i := range successCases {
|
||||
if errs := ValidatePodSpec(&successCases[i], field.NewPath("field")); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
for k, v := range successCases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
if errs := ValidatePodSpec(&v, nil, field.NewPath("field")); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
activeDeadlineSeconds = int64(0)
|
||||
@ -6919,7 +7086,7 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for k, v := range failureCases {
|
||||
if errs := ValidatePodSpec(&v, field.NewPath("field")); len(errs) == 0 {
|
||||
if errs := ValidatePodSpec(&v, nil, field.NewPath("field")); len(errs) == 0 {
|
||||
t.Errorf("expected failure for %q", k)
|
||||
}
|
||||
}
|
||||
@ -6946,9 +7113,24 @@ func TestValidatePod(t *testing.T) {
|
||||
}
|
||||
return spec
|
||||
}
|
||||
validPVCSpec := core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
},
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceName(core.ResourceStorage): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
}
|
||||
validPVCTemplate := core.PersistentVolumeClaimTemplate{
|
||||
Spec: validPVCSpec,
|
||||
}
|
||||
longPodName := strings.Repeat("a", 200)
|
||||
longVolName := strings.Repeat("b", 60)
|
||||
|
||||
successCases := []core.Pod{
|
||||
{ // Basic fields.
|
||||
successCases := map[string]core.Pod{
|
||||
"basic fields": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{{Name: "vol", VolumeSource: core.VolumeSource{EmptyDir: &core.EmptyDirVolumeSource{}}}},
|
||||
@ -6957,7 +7139,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
{ // Just about everything.
|
||||
"just about everything": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "abc.123.do-re-mi", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
@ -6972,7 +7154,7 @@ func TestValidatePod(t *testing.T) {
|
||||
NodeName: "foobar",
|
||||
},
|
||||
},
|
||||
{ // Serialized node affinity requirements.
|
||||
"serialized node affinity requirements": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7032,7 +7214,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
),
|
||||
},
|
||||
{ // Serialized node affinity requirements.
|
||||
"serialized node affinity requirements, II": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7073,7 +7255,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
),
|
||||
},
|
||||
{ // Serialized pod affinity in affinity requirements in annotations.
|
||||
"serialized pod affinity in affinity requirements in annotations": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7129,7 +7311,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
},
|
||||
{ // Serialized pod anti affinity with different Label Operators in affinity requirements in annotations.
|
||||
"serialized pod anti affinity with different Label Operators in affinity requirements in annotations": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7183,63 +7365,63 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
}),
|
||||
},
|
||||
{ // populate forgiveness tolerations with exists operator in annotations.
|
||||
"populate forgiveness tolerations with exists operator in annotations.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Key: "foo", Operator: "Exists", Value: "", Effect: "NoExecute", TolerationSeconds: &[]int64{60}[0]}}),
|
||||
},
|
||||
{ // populate forgiveness tolerations with equal operator in annotations.
|
||||
"populate forgiveness tolerations with equal operator in annotations.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Key: "foo", Operator: "Equal", Value: "bar", Effect: "NoExecute", TolerationSeconds: &[]int64{60}[0]}}),
|
||||
},
|
||||
{ // populate tolerations equal operator in annotations.
|
||||
"populate tolerations equal operator in annotations.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Key: "foo", Operator: "Equal", Value: "bar", Effect: "NoSchedule"}}),
|
||||
},
|
||||
{ // populate tolerations exists operator in annotations.
|
||||
"populate tolerations exists operator in annotations.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // empty key with Exists operator is OK for toleration, empty toleration key means match all taint keys.
|
||||
"empty key with Exists operator is OK for toleration, empty toleration key means match all taint keys.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Operator: "Exists", Effect: "NoSchedule"}}),
|
||||
},
|
||||
{ // empty operator is OK for toleration, defaults to Equal.
|
||||
"empty operator is OK for toleration, defaults to Equal.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Key: "foo", Value: "bar", Effect: "NoSchedule"}}),
|
||||
},
|
||||
{ // empty effect is OK for toleration, empty toleration effect means match all taint effects.
|
||||
"empty effect is OK for toleration, empty toleration effect means match all taint effects.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Key: "foo", Operator: "Equal", Value: "bar"}}),
|
||||
},
|
||||
{ // negative tolerationSeconds is OK for toleration.
|
||||
"negative tolerationSeconds is OK for toleration.": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod-forgiveness-invalid",
|
||||
Namespace: "ns",
|
||||
},
|
||||
Spec: extendPodSpecwithTolerations(validPodSpec(nil), []core.Toleration{{Key: "node.kubernetes.io/not-ready", Operator: "Exists", Effect: "NoExecute", TolerationSeconds: &[]int64{-2}[0]}}),
|
||||
},
|
||||
{ // runtime default seccomp profile
|
||||
"runtime default seccomp profile": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7249,7 +7431,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // docker default seccomp profile
|
||||
"docker default seccomp profile": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7259,7 +7441,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // unconfined seccomp profile
|
||||
"unconfined seccomp profile": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7269,7 +7451,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // localhost seccomp profile
|
||||
"localhost seccomp profile": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7279,7 +7461,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // localhost seccomp profile for a container
|
||||
"localhost seccomp profile for a container": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7289,7 +7471,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // runtime default seccomp profile for a pod
|
||||
"runtime default seccomp profile for a pod": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7305,7 +7487,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // runtime default seccomp profile for a container
|
||||
"runtime default seccomp profile for a container": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7322,7 +7504,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSDefault,
|
||||
},
|
||||
},
|
||||
{ // unconfined seccomp profile for a pod
|
||||
"unconfined seccomp profile for a pod": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7338,7 +7520,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // unconfined seccomp profile for a container
|
||||
"unconfined seccomp profile for a container": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7355,7 +7537,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSDefault,
|
||||
},
|
||||
},
|
||||
{ // localhost seccomp profile for a pod
|
||||
"localhost seccomp profile for a pod": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7372,7 +7554,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // localhost seccomp profile for a container
|
||||
"localhost seccomp profile for a container, II": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7390,7 +7572,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSDefault,
|
||||
},
|
||||
},
|
||||
{ // default AppArmor profile for a container
|
||||
"default AppArmor profile for a container": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7400,7 +7582,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // default AppArmor profile for an init container
|
||||
"default AppArmor profile for an init container": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7415,7 +7597,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
{ // localhost AppArmor profile for a container
|
||||
"localhost AppArmor profile for a container": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7425,7 +7607,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
Spec: validPodSpec(nil),
|
||||
},
|
||||
{ // syntactically valid sysctls
|
||||
"syntactically valid sysctls": {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "123",
|
||||
Namespace: "ns",
|
||||
@ -7452,7 +7634,7 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{ // valid extended resources for init container
|
||||
"valid extended resources for init container": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "valid-extended", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
InitContainers: []core.Container{
|
||||
@ -7476,7 +7658,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
{ // valid extended resources for regular container
|
||||
"valid extended resources for regular container": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "valid-extended", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
InitContainers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
@ -7500,7 +7682,7 @@ func TestValidatePod(t *testing.T) {
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
{ // valid serviceaccount token projected volume with serviceaccount name specified
|
||||
"valid serviceaccount token projected volume with serviceaccount name specified": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "valid-extended", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
ServiceAccountName: "some-service-account",
|
||||
@ -7527,11 +7709,25 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"ephemeral volume + PVC, no conflict between them": {
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
{Name: "pvc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "my-pvc"}}},
|
||||
{Name: "ephemeral", VolumeSource: core.VolumeSource{Ephemeral: &core.EphemeralVolumeSource{VolumeClaimTemplate: &validPVCTemplate}}},
|
||||
},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, pod := range successCases {
|
||||
if errs := ValidatePodCreate(&pod, PodValidationOptions{}); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
for k, v := range successCases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
if errs := ValidatePodCreate(&v, PodValidationOptions{}); len(errs) != 0 {
|
||||
t.Errorf("expected success: %v", errs)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
errorCases := map[string]struct {
|
||||
@ -8421,15 +8617,47 @@ func TestValidatePod(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"final PVC name for ephemeral volume must be valid": {
|
||||
expectedError: "spec.volumes[1].name: Invalid value: \"" + longVolName + "\": PVC name \"" + longPodName + "-" + longVolName + "\": must be no more than 253 characters",
|
||||
spec: core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: longPodName, Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
{Name: "pvc", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "my-pvc"}}},
|
||||
{Name: longVolName, VolumeSource: core.VolumeSource{Ephemeral: &core.EphemeralVolumeSource{VolumeClaimTemplate: &validPVCTemplate}}},
|
||||
},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
},
|
||||
"PersistentVolumeClaimVolumeSource must not reference a generated PVC": {
|
||||
expectedError: "spec.volumes[0].persistentVolumeClaim.claimName: Invalid value: \"123-ephemeral-volume\": must not reference a PVC that gets created for an ephemeral volume",
|
||||
spec: core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: "ns"},
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
{Name: "pvc-volume", VolumeSource: core.VolumeSource{PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ClaimName: "123-ephemeral-volume"}}},
|
||||
{Name: "ephemeral-volume", VolumeSource: core.VolumeSource{Ephemeral: &core.EphemeralVolumeSource{VolumeClaimTemplate: &validPVCTemplate}}},
|
||||
},
|
||||
Containers: []core.Container{{Name: "ctr", Image: "image", ImagePullPolicy: "IfNotPresent", TerminationMessagePolicy: "File"}},
|
||||
RestartPolicy: core.RestartPolicyAlways,
|
||||
DNSPolicy: core.DNSClusterFirst,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for k, v := range errorCases {
|
||||
if errs := ValidatePodCreate(&v.spec, PodValidationOptions{}); len(errs) == 0 {
|
||||
t.Errorf("expected failure for %q", k)
|
||||
} else if v.expectedError == "" {
|
||||
t.Errorf("missing expectedError for %q, got %q", k, errs.ToAggregate().Error())
|
||||
} else if actualError := errs.ToAggregate().Error(); !strings.Contains(actualError, v.expectedError) {
|
||||
t.Errorf("expected error for %q to contain %q, got %q", k, v.expectedError, actualError)
|
||||
}
|
||||
t.Run(k, func(t *testing.T) {
|
||||
if errs := ValidatePodCreate(&v.spec, PodValidationOptions{}); len(errs) == 0 {
|
||||
t.Errorf("expected failure")
|
||||
} else if v.expectedError == "" {
|
||||
t.Errorf("missing expectedError, got %q", errs.ToAggregate().Error())
|
||||
} else if actualError := errs.ToAggregate().Error(); !strings.Contains(actualError, v.expectedError) {
|
||||
t.Errorf("expected error to contain %q, got %q", v.expectedError, actualError)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
44
pkg/apis/core/zz_generated.deepcopy.go
generated
44
pkg/apis/core/zz_generated.deepcopy.go
generated
@ -1433,6 +1433,27 @@ func (in *EphemeralContainers) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralVolumeSource) DeepCopyInto(out *EphemeralVolumeSource) {
|
||||
*out = *in
|
||||
if in.VolumeClaimTemplate != nil {
|
||||
in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate
|
||||
*out = new(PersistentVolumeClaimTemplate)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralVolumeSource.
|
||||
func (in *EphemeralVolumeSource) DeepCopy() *EphemeralVolumeSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralVolumeSource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Event) DeepCopyInto(out *Event) {
|
||||
*out = *in
|
||||
@ -2987,6 +3008,24 @@ func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PersistentVolumeClaimTemplate) DeepCopyInto(out *PersistentVolumeClaimTemplate) {
|
||||
*out = *in
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimTemplate.
|
||||
func (in *PersistentVolumeClaimTemplate) DeepCopy() *PersistentVolumeClaimTemplate {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PersistentVolumeClaimTemplate)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) {
|
||||
*out = *in
|
||||
@ -5748,6 +5787,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
|
||||
*out = new(CSIVolumeSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Ephemeral != nil {
|
||||
in, out := &in.Ephemeral, &out.Ephemeral
|
||||
*out = new(EphemeralVolumeSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
21
pkg/apis/extensions/v1beta1/zz_generated.defaults.go
generated
21
pkg/apis/extensions/v1beta1/zz_generated.defaults.go
generated
@ -98,6 +98,13 @@ func SetObjectDefaults_DaemonSet(in *v1beta1.DaemonSet) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
@ -305,6 +312,13 @@ func SetObjectDefaults_Deployment(in *v1beta1.Deployment) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
@ -553,6 +567,13 @@ func SetObjectDefaults_ReplicaSet(in *v1beta1.ReplicaSet) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
for i := range in.Spec.Template.Spec.InitContainers {
|
||||
a := &in.Spec.Template.Spec.InitContainers[i]
|
||||
|
@ -94,6 +94,13 @@ func SetObjectDefaults_PodPreset(in *v1alpha1.PodPreset) {
|
||||
if a.VolumeSource.ScaleIO != nil {
|
||||
v1.SetDefaults_ScaleIOVolumeSource(a.VolumeSource.ScaleIO)
|
||||
}
|
||||
if a.VolumeSource.Ephemeral != nil {
|
||||
if a.VolumeSource.Ephemeral.VolumeClaimTemplate != nil {
|
||||
v1.SetDefaults_PersistentVolumeClaimSpec(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Limits)
|
||||
v1.SetDefaults_ResourceList(&a.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.Resources.Requests)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -44,7 +44,7 @@ func ValidatePodPresetSpec(spec *settings.PodPresetSpec, fldPath *field.Path) fi
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("volumes", "env", "envFrom", "volumeMounts"), "must specify at least one"))
|
||||
}
|
||||
|
||||
vols, vErrs := apivalidation.ValidateVolumes(spec.Volumes, fldPath.Child("volumes"))
|
||||
vols, vErrs := apivalidation.ValidateVolumes(spec.Volumes, nil, fldPath.Child("volumes"))
|
||||
allErrs = append(allErrs, vErrs...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateEnv(spec.Env, fldPath.Child("env"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateEnvFrom(spec.EnvFrom, fldPath.Child("envFrom"))...)
|
||||
|
@ -137,6 +137,7 @@ filegroup(
|
||||
"//pkg/controller/util/node:all-srcs",
|
||||
"//pkg/controller/volume/attachdetach:all-srcs",
|
||||
"//pkg/controller/volume/common:all-srcs",
|
||||
"//pkg/controller/volume/ephemeral:all-srcs",
|
||||
"//pkg/controller/volume/events:all-srcs",
|
||||
"//pkg/controller/volume/expand:all-srcs",
|
||||
"//pkg/controller/volume/persistentvolume:all-srcs",
|
||||
|
@ -203,7 +203,7 @@ func NewAttachDetachController(
|
||||
|
||||
// This custom indexer will index pods by its PVC keys. Then we don't need
|
||||
// to iterate all pods every time to find pods which reference given PVC.
|
||||
if err := common.AddIndexerIfNotPresent(adc.podIndexer, common.PodPVCIndex, common.PodPVCIndexFunc); err != nil {
|
||||
if err := common.AddPodPVCIndexerIfNotPresent(adc.podIndexer); err != nil {
|
||||
return nil, fmt.Errorf("Could not initialize attach detach controller: %v", err)
|
||||
}
|
||||
|
||||
@ -425,7 +425,7 @@ func (adc *attachDetachController) populateDesiredStateOfWorld() error {
|
||||
// The volume specs present in the ActualStateOfWorld are nil, let's replace those
|
||||
// with the correct ones found on pods. The present in the ASW with no corresponding
|
||||
// pod will be detached and the spec is irrelevant.
|
||||
volumeSpec, err := util.CreateVolumeSpec(podVolume, podToAdd.Namespace, nodeName, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
|
||||
volumeSpec, err := util.CreateVolumeSpec(podVolume, podToAdd, nodeName, &adc.volumePluginMgr, adc.pvcLister, adc.pvLister, adc.csiMigratedPluginManager, adc.intreeToCSITranslator)
|
||||
if err != nil {
|
||||
klog.Errorf(
|
||||
"Error creating spec for volume %q, pod %q/%q: %v",
|
||||
|
@ -168,7 +168,7 @@ func (collector *attachDetachStateCollector) getVolumeInUseCount() volumeCount {
|
||||
continue
|
||||
}
|
||||
for _, podVolume := range pod.Spec.Volumes {
|
||||
volumeSpec, err := util.CreateVolumeSpec(podVolume, pod.Namespace, types.NodeName(pod.Spec.NodeName), collector.volumePluginMgr, collector.pvcLister, collector.pvLister, collector.csiMigratedPluginManager, collector.intreeToCSITranslator)
|
||||
volumeSpec, err := util.CreateVolumeSpec(podVolume, pod, types.NodeName(pod.Spec.NodeName), collector.volumePluginMgr, collector.pvcLister, collector.pvLister, collector.csiMigratedPluginManager, collector.intreeToCSITranslator)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
@ -39,39 +39,49 @@ import (
|
||||
// A volume.Spec that refers to an in-tree plugin spec is translated to refer
|
||||
// to a migrated CSI plugin spec if all conditions for CSI migration on a node
|
||||
// for the in-tree plugin is satisfied.
|
||||
func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, nodeName types.NodeName, vpm *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) (*volume.Spec, error) {
|
||||
func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName, vpm *volume.VolumePluginMgr, pvcLister corelisters.PersistentVolumeClaimLister, pvLister corelisters.PersistentVolumeLister, csiMigratedPluginManager csimigration.PluginManager, csiTranslator csimigration.InTreeToCSITranslator) (*volume.Spec, error) {
|
||||
claimName := ""
|
||||
readOnly := false
|
||||
if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
|
||||
claimName = pvcSource.ClaimName
|
||||
readOnly = pvcSource.ReadOnly
|
||||
}
|
||||
if ephemeralSource := podVolume.VolumeSource.Ephemeral; ephemeralSource != nil && utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
claimName = pod.Name + "-" + podVolume.Name
|
||||
readOnly = ephemeralSource.ReadOnly
|
||||
}
|
||||
if claimName != "" {
|
||||
klog.V(10).Infof(
|
||||
"Found PVC, ClaimName: %q/%q",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName)
|
||||
pod.Namespace,
|
||||
claimName)
|
||||
|
||||
// If podVolume is a PVC, fetch the real PV behind the claim
|
||||
pvName, pvcUID, err := getPVCFromCacheExtractPV(
|
||||
podNamespace, pvcSource.ClaimName, pvcLister)
|
||||
pod.Namespace, claimName, pvcLister)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"error processing PVC %q/%q: %v",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
pod.Namespace,
|
||||
claimName,
|
||||
err)
|
||||
}
|
||||
|
||||
klog.V(10).Infof(
|
||||
"Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
pod.Namespace,
|
||||
claimName,
|
||||
pvcUID,
|
||||
pvName)
|
||||
|
||||
// Fetch actual PV object
|
||||
volumeSpec, err := getPVSpecFromCache(
|
||||
pvName, pvcSource.ReadOnly, pvcUID, pvLister)
|
||||
pvName, readOnly, pvcUID, pvLister)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"error processing PVC %q/%q: %v",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
pod.Namespace,
|
||||
claimName,
|
||||
err)
|
||||
}
|
||||
|
||||
@ -79,8 +89,8 @@ func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, nodeName types.N
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(
|
||||
"error performing CSI migration checks and translation for PVC %q/%q: %v",
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
pod.Namespace,
|
||||
claimName,
|
||||
err)
|
||||
}
|
||||
|
||||
@ -88,8 +98,8 @@ func CreateVolumeSpec(podVolume v1.Volume, podNamespace string, nodeName types.N
|
||||
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
|
||||
volumeSpec.Name(),
|
||||
pvName,
|
||||
podNamespace,
|
||||
pvcSource.ClaimName,
|
||||
pod.Namespace,
|
||||
claimName,
|
||||
pvcUID)
|
||||
|
||||
return volumeSpec, nil
|
||||
@ -219,7 +229,7 @@ func ProcessPodVolumes(pod *v1.Pod, addVolumes bool, desiredStateOfWorld cache.D
|
||||
|
||||
// Process volume spec for each volume defined in pod
|
||||
for _, podVolume := range pod.Spec.Volumes {
|
||||
volumeSpec, err := CreateVolumeSpec(podVolume, pod.Namespace, nodeName, volumePluginMgr, pvcLister, pvLister, csiMigratedPluginManager, csiTranslator)
|
||||
volumeSpec, err := CreateVolumeSpec(podVolume, pod, nodeName, volumePluginMgr, pvcLister, pvLister, csiMigratedPluginManager, csiTranslator)
|
||||
if err != nil {
|
||||
klog.V(10).Infof(
|
||||
"Error processing volume %q for pod %q/%q: %v",
|
||||
|
@ -6,7 +6,9 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/common",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/features:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -20,7 +20,9 @@ import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -28,19 +30,36 @@ const (
|
||||
PodPVCIndex = "pod-pvc-index"
|
||||
)
|
||||
|
||||
// PodPVCIndexFunc returns PVC keys for given pod
|
||||
func PodPVCIndexFunc(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return []string{}, nil
|
||||
}
|
||||
keys := []string{}
|
||||
for _, podVolume := range pod.Spec.Volumes {
|
||||
if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
|
||||
keys = append(keys, fmt.Sprintf("%s/%s", pod.Namespace, pvcSource.ClaimName))
|
||||
// PodPVCIndexFunc creates an index function that returns PVC keys (=
|
||||
// namespace/name) for given pod. If enabled, this includes the PVCs
|
||||
// that might be created for generic ephemeral volumes.
|
||||
func PodPVCIndexFunc(genericEphemeralVolumeFeatureEnabled bool) func(obj interface{}) ([]string, error) {
|
||||
return func(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return []string{}, nil
|
||||
}
|
||||
keys := []string{}
|
||||
for _, podVolume := range pod.Spec.Volumes {
|
||||
claimName := ""
|
||||
if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
|
||||
claimName = pvcSource.ClaimName
|
||||
}
|
||||
if ephemeralSource := podVolume.VolumeSource.Ephemeral; genericEphemeralVolumeFeatureEnabled && ephemeralSource != nil {
|
||||
claimName = pod.Name + "-" + podVolume.Name
|
||||
}
|
||||
if claimName != "" {
|
||||
keys = append(keys, fmt.Sprintf("%s/%s", pod.Namespace, claimName))
|
||||
}
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
return keys, nil
|
||||
}
|
||||
|
||||
// AddPodPVCIndexerIfNotPresent adds the PodPVCIndexFunc with the current global setting for GenericEphemeralVolume.
|
||||
func AddPodPVCIndexerIfNotPresent(indexer cache.Indexer) error {
|
||||
return AddIndexerIfNotPresent(indexer, PodPVCIndex,
|
||||
PodPVCIndexFunc(utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume)))
|
||||
}
|
||||
|
||||
// AddIndexerIfNotPresent adds the index function with the name into the cache indexer if not present
|
||||
|
62
pkg/controller/volume/ephemeral/BUILD
Normal file
62
pkg/controller/volume/ephemeral/BUILD
Normal file
@ -0,0 +1,62 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"controller.go",
|
||||
"doc.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/controller/volume/ephemeral",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/controller/volume/common:go_default_library",
|
||||
"//pkg/controller/volume/events:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/scheme:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["controller_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/controller:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/k8s.io/klog/v2:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
6
pkg/controller/volume/ephemeral/OWNERS
Normal file
6
pkg/controller/volume/ephemeral/OWNERS
Normal file
@ -0,0 +1,6 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- saad-ali
|
||||
- jsafrane
|
||||
- pohly
|
286
pkg/controller/volume/ephemeral/controller.go
Normal file
286
pkg/controller/volume/ephemeral/controller.go
Normal file
@ -0,0 +1,286 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ephemeral
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
kcache "k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/common"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/events"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
// Controller creates PVCs for ephemeral inline volumes in a pod spec.
|
||||
type Controller interface {
|
||||
Run(workers int, stopCh <-chan struct{})
|
||||
}
|
||||
|
||||
type ephemeralController struct {
|
||||
// kubeClient is the kube API client used by volumehost to communicate with
|
||||
// the API server.
|
||||
kubeClient clientset.Interface
|
||||
|
||||
// pvcLister is the shared PVC lister used to fetch and store PVC
|
||||
// objects from the API server. It is shared with other controllers and
|
||||
// therefore the PVC objects in its store should be treated as immutable.
|
||||
pvcLister corelisters.PersistentVolumeClaimLister
|
||||
pvcsSynced kcache.InformerSynced
|
||||
|
||||
// podLister is the shared Pod lister used to fetch Pod
|
||||
// objects from the API server. It is shared with other controllers and
|
||||
// therefore the Pod objects in its store should be treated as immutable.
|
||||
podLister corelisters.PodLister
|
||||
podSynced kcache.InformerSynced
|
||||
|
||||
// podIndexer has the common PodPVC indexer indexer installed To
|
||||
// limit iteration over pods to those of interest.
|
||||
podIndexer cache.Indexer
|
||||
|
||||
// recorder is used to record events in the API server
|
||||
recorder record.EventRecorder
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
}
|
||||
|
||||
// NewController creates an ephemeral volume controller.
|
||||
func NewController(
|
||||
kubeClient clientset.Interface,
|
||||
podInformer coreinformers.PodInformer,
|
||||
pvcInformer coreinformers.PersistentVolumeClaimInformer) (Controller, error) {
|
||||
|
||||
ec := &ephemeralController{
|
||||
kubeClient: kubeClient,
|
||||
podLister: podInformer.Lister(),
|
||||
podIndexer: podInformer.Informer().GetIndexer(),
|
||||
podSynced: podInformer.Informer().HasSynced,
|
||||
pvcLister: pvcInformer.Lister(),
|
||||
pvcsSynced: pvcInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ephemeral_volume"),
|
||||
}
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
ec.recorder = eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ephemeral_volume"})
|
||||
|
||||
podInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
||||
AddFunc: ec.enqueuePod,
|
||||
// The pod spec is immutable. Therefore the controller can ignore pod updates
|
||||
// because there cannot be any changes that have to be copied into the generated
|
||||
// PVC.
|
||||
// Deletion of the PVC is handled through the owner reference and garbage collection.
|
||||
// Therefore pod deletions also can be ignored.
|
||||
})
|
||||
pvcInformer.Informer().AddEventHandler(kcache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ec.onPVCDelete,
|
||||
})
|
||||
if err := common.AddPodPVCIndexerIfNotPresent(ec.podIndexer); err != nil {
|
||||
return nil, fmt.Errorf("Could not initialize pvc protection controller: %v", err)
|
||||
}
|
||||
|
||||
return ec, nil
|
||||
}
|
||||
|
||||
func (ec *ephemeralController) enqueuePod(obj interface{}) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// Ignore pods which are already getting deleted.
|
||||
if pod.DeletionTimestamp != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if vol.Ephemeral != nil {
|
||||
// It has at least one ephemeral inline volume, work on it.
|
||||
key, err := kcache.DeletionHandlingMetaNamespaceKeyFunc(pod)
|
||||
if err != nil {
|
||||
runtime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", pod, err))
|
||||
return
|
||||
}
|
||||
ec.queue.Add(key)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ec *ephemeralController) onPVCDelete(obj interface{}) {
|
||||
pvc, ok := obj.(*v1.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// Someone deleted a PVC, either intentionally or
|
||||
// accidentally. If there is a pod referencing it because of
|
||||
// an ephemeral volume, then we should re-create the PVC.
|
||||
// The common indexer does some prefiltering for us by
|
||||
// limiting the list to those pods which reference
|
||||
// the PVC.
|
||||
objs, err := ec.podIndexer.ByIndex(common.PodPVCIndex, fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name))
|
||||
if err != nil {
|
||||
runtime.HandleError(fmt.Errorf("listing pods from cache: %v", err))
|
||||
return
|
||||
}
|
||||
for _, obj := range objs {
|
||||
ec.enqueuePod(obj)
|
||||
}
|
||||
}
|
||||
|
||||
func (ec *ephemeralController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer runtime.HandleCrash()
|
||||
defer ec.queue.ShutDown()
|
||||
|
||||
klog.Infof("Starting ephemeral volume controller")
|
||||
defer klog.Infof("Shutting down ephemeral volume controller")
|
||||
|
||||
if !cache.WaitForNamedCacheSync("ephemeral", stopCh, ec.podSynced, ec.pvcsSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(ec.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (ec *ephemeralController) runWorker() {
|
||||
for ec.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (ec *ephemeralController) processNextWorkItem() bool {
|
||||
key, shutdown := ec.queue.Get()
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
defer ec.queue.Done(key)
|
||||
|
||||
err := ec.syncHandler(key.(string))
|
||||
if err == nil {
|
||||
ec.queue.Forget(key)
|
||||
return true
|
||||
}
|
||||
|
||||
runtime.HandleError(fmt.Errorf("%v failed with: %v", key, err))
|
||||
ec.queue.AddRateLimited(key)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// syncHandler is invoked for each pod which might need to be processed.
|
||||
// If an error is returned from this function, the pod will be requeued.
|
||||
func (ec *ephemeralController) syncHandler(key string) error {
|
||||
namespace, name, err := kcache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pod, err := ec.podLister.Pods(namespace).Get(name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
klog.V(5).Infof("ephemeral: nothing to do for pod %s, it is gone", key)
|
||||
return nil
|
||||
}
|
||||
klog.V(5).Infof("Error getting pod %s/%s (uid: %q) from informer : %v", pod.Namespace, pod.Name, pod.UID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Ignore pods which are already getting deleted.
|
||||
if pod.DeletionTimestamp != nil {
|
||||
klog.V(5).Infof("ephemeral: nothing to do for pod %s, it is marked for deletion", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if err := ec.handleVolume(pod, vol); err != nil {
|
||||
ec.recorder.Event(pod, v1.EventTypeWarning, events.FailedBinding, fmt.Sprintf("ephemeral volume %s: %v", vol.Name, err))
|
||||
return fmt.Errorf("pod %s, ephemeral volume %s: %v", key, vol.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleEphemeralVolume is invoked for each volume of a pod.
|
||||
func (ec *ephemeralController) handleVolume(pod *v1.Pod, vol v1.Volume) error {
|
||||
klog.V(5).Infof("ephemeral: checking volume %s", vol.Name)
|
||||
ephemeral := vol.Ephemeral
|
||||
if ephemeral == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pvcName := pod.Name + "-" + vol.Name
|
||||
pvc, err := ec.pvcLister.PersistentVolumeClaims(pod.Namespace).Get(pvcName)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
if pvc != nil {
|
||||
if metav1.IsControlledBy(pvc, pod) {
|
||||
// Already created, nothing more to do.
|
||||
klog.V(5).Infof("ephemeral: volume %s: PVC %s already created", vol.Name, pvcName)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("PVC %q (uid: %q) was not created for the pod",
|
||||
util.GetPersistentVolumeClaimQualifiedName(pvc), pvc.UID)
|
||||
}
|
||||
|
||||
// Create the PVC with pod as owner.
|
||||
isTrue := true
|
||||
pvc = &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: pvcName,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
Name: pod.Name,
|
||||
UID: pod.UID,
|
||||
Controller: &isTrue,
|
||||
BlockOwnerDeletion: &isTrue,
|
||||
},
|
||||
},
|
||||
Annotations: ephemeral.VolumeClaimTemplate.Annotations,
|
||||
Labels: ephemeral.VolumeClaimTemplate.Labels,
|
||||
},
|
||||
Spec: ephemeral.VolumeClaimTemplate.Spec,
|
||||
}
|
||||
_, err = ec.kubeClient.CoreV1().PersistentVolumeClaims(pod.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create PVC %s: %v", pvcName, err)
|
||||
}
|
||||
return nil
|
||||
}
|
221
pkg/controller/volume/ephemeral/controller_test.go
Normal file
221
pkg/controller/volume/ephemeral/controller_test.go
Normal file
@ -0,0 +1,221 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ephemeral
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
// storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
// "k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
kcache "k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var (
|
||||
testPodName = "test-pod"
|
||||
testNamespace = "my-namespace"
|
||||
testPodUID = types.UID("uidpod1")
|
||||
otherNamespace = "not-my-namespace"
|
||||
ephemeralVolumeName = "ephemeral-volume"
|
||||
|
||||
testPod = makePod(testPodName, testNamespace, testPodUID)
|
||||
testPodWithEphemeral = makePod(testPodName, testNamespace, testPodUID, *makeEphemeralVolume(ephemeralVolumeName))
|
||||
testPodEphemeralClaim = makePVC(testPodName+"-"+ephemeralVolumeName, testNamespace, makeOwnerReference(testPodWithEphemeral, true))
|
||||
conflictingClaim = makePVC(testPodName+"-"+ephemeralVolumeName, testNamespace, nil)
|
||||
otherNamespaceClaim = makePVC(testPodName+"-"+ephemeralVolumeName, otherNamespace, nil)
|
||||
)
|
||||
|
||||
func init() {
|
||||
klog.InitFlags(nil)
|
||||
}
|
||||
|
||||
func TestSyncHandler(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
podKey string
|
||||
pvcs []*v1.PersistentVolumeClaim
|
||||
pods []*v1.Pod
|
||||
expectedPVCs []v1.PersistentVolumeClaim
|
||||
expectedError bool
|
||||
}{
|
||||
{
|
||||
name: "create",
|
||||
pods: []*v1.Pod{testPodWithEphemeral},
|
||||
podKey: podKey(testPodWithEphemeral),
|
||||
expectedPVCs: []v1.PersistentVolumeClaim{*testPodEphemeralClaim},
|
||||
},
|
||||
{
|
||||
name: "no-such-pod",
|
||||
podKey: podKey(testPodWithEphemeral),
|
||||
},
|
||||
{
|
||||
name: "pod-deleted",
|
||||
pods: func() []*v1.Pod {
|
||||
deleted := metav1.Now()
|
||||
pods := []*v1.Pod{testPodWithEphemeral.DeepCopy()}
|
||||
pods[0].DeletionTimestamp = &deleted
|
||||
return pods
|
||||
}(),
|
||||
podKey: podKey(testPodWithEphemeral),
|
||||
},
|
||||
{
|
||||
name: "no-volumes",
|
||||
pods: []*v1.Pod{testPod},
|
||||
podKey: podKey(testPod),
|
||||
},
|
||||
{
|
||||
name: "create-with-other-PVC",
|
||||
pods: []*v1.Pod{testPodWithEphemeral},
|
||||
podKey: podKey(testPodWithEphemeral),
|
||||
pvcs: []*v1.PersistentVolumeClaim{otherNamespaceClaim},
|
||||
expectedPVCs: []v1.PersistentVolumeClaim{*otherNamespaceClaim, *testPodEphemeralClaim},
|
||||
},
|
||||
{
|
||||
name: "wrong-PVC-owner",
|
||||
pods: []*v1.Pod{testPodWithEphemeral},
|
||||
podKey: podKey(testPodWithEphemeral),
|
||||
pvcs: []*v1.PersistentVolumeClaim{conflictingClaim},
|
||||
expectedPVCs: []v1.PersistentVolumeClaim{*conflictingClaim},
|
||||
expectedError: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
// Run sequentially because of global logging.
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// There is no good way to shut down the informers. They spawn
|
||||
// various goroutines and some of them (in particular shared informer)
|
||||
// become very unhappy ("close on closed channel") when using a context
|
||||
// that gets cancelled. Therefore we just keep everything running.
|
||||
ctx := context.Background()
|
||||
|
||||
var objects []runtime.Object
|
||||
for _, pod := range tc.pods {
|
||||
objects = append(objects, pod)
|
||||
}
|
||||
for _, pvc := range tc.pvcs {
|
||||
objects = append(objects, pvc)
|
||||
}
|
||||
|
||||
fakeKubeClient := createTestClient(objects...)
|
||||
informerFactory := informers.NewSharedInformerFactory(fakeKubeClient, controller.NoResyncPeriodFunc())
|
||||
podInformer := informerFactory.Core().V1().Pods()
|
||||
pvcInformer := informerFactory.Core().V1().PersistentVolumeClaims()
|
||||
|
||||
c, err := NewController(fakeKubeClient, podInformer, pvcInformer)
|
||||
if err != nil {
|
||||
t.Fatalf("error creating ephemeral controller : %v", err)
|
||||
}
|
||||
ec, _ := c.(*ephemeralController)
|
||||
|
||||
// Ensure informers are up-to-date.
|
||||
go informerFactory.Start(ctx.Done())
|
||||
informerFactory.WaitForCacheSync(ctx.Done())
|
||||
cache.WaitForCacheSync(ctx.Done(), podInformer.Informer().HasSynced, pvcInformer.Informer().HasSynced)
|
||||
|
||||
err = ec.syncHandler(tc.podKey)
|
||||
if err != nil && !tc.expectedError {
|
||||
t.Fatalf("unexpected error while running handler: %v", err)
|
||||
}
|
||||
if err == nil && tc.expectedError {
|
||||
t.Fatalf("unexpected success")
|
||||
}
|
||||
|
||||
pvcs, err := fakeKubeClient.CoreV1().PersistentVolumeClaims("").List(ctx, metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error while listing PVCs: %v", err)
|
||||
}
|
||||
assert.Equal(t, sortPVCs(tc.expectedPVCs), sortPVCs(pvcs.Items))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func makePVC(name, namespace string, owner *metav1.OwnerReference) *v1.PersistentVolumeClaim {
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
|
||||
Spec: v1.PersistentVolumeClaimSpec{},
|
||||
}
|
||||
if owner != nil {
|
||||
pvc.OwnerReferences = []metav1.OwnerReference{*owner}
|
||||
}
|
||||
|
||||
return pvc
|
||||
}
|
||||
|
||||
func makeEphemeralVolume(name string) *v1.Volume {
|
||||
return &v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Ephemeral: &v1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &v1.PersistentVolumeClaimTemplate{},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makePod(name, namespace string, uid types.UID, volumes ...v1.Volume) *v1.Pod {
|
||||
pvc := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace, UID: uid},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: volumes,
|
||||
},
|
||||
}
|
||||
|
||||
return pvc
|
||||
}
|
||||
|
||||
func podKey(pod *v1.Pod) string {
|
||||
key, _ := kcache.DeletionHandlingMetaNamespaceKeyFunc(testPodWithEphemeral)
|
||||
return key
|
||||
}
|
||||
|
||||
func makeOwnerReference(pod *v1.Pod, isController bool) *metav1.OwnerReference {
|
||||
isTrue := true
|
||||
return &metav1.OwnerReference{
|
||||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
Name: pod.Name,
|
||||
UID: pod.UID,
|
||||
Controller: &isController,
|
||||
BlockOwnerDeletion: &isTrue,
|
||||
}
|
||||
}
|
||||
|
||||
func sortPVCs(pvcs []v1.PersistentVolumeClaim) []v1.PersistentVolumeClaim {
|
||||
sort.Slice(pvcs, func(i, j int) bool {
|
||||
return pvcs[i].Namespace < pvcs[j].Namespace ||
|
||||
pvcs[i].Name < pvcs[j].Name
|
||||
})
|
||||
return pvcs
|
||||
}
|
||||
|
||||
func createTestClient(objects ...runtime.Object) *fake.Clientset {
|
||||
fakeClient := fake.NewSimpleClientset(objects...)
|
||||
return fakeClient
|
||||
}
|
21
pkg/controller/volume/ephemeral/doc.go
Normal file
21
pkg/controller/volume/ephemeral/doc.go
Normal file
@ -0,0 +1,21 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package ephemeral implements the controller part of
|
||||
// https://github.com/kubernetes/enhancements/tree/master/keps/sig-storage/1698-generic-ephemeral-volumes
|
||||
//
|
||||
// It was derived from the expand controller.
|
||||
package ephemeral
|
@ -134,7 +134,7 @@ func NewController(p ControllerParameters) (*PersistentVolumeController, error)
|
||||
|
||||
// This custom indexer will index pods by its PVC keys. Then we don't need
|
||||
// to iterate all pods every time to find pods which reference given PVC.
|
||||
if err := common.AddIndexerIfNotPresent(controller.podIndexer, common.PodPVCIndex, common.PodPVCIndexFunc); err != nil {
|
||||
if err := common.AddPodPVCIndexerIfNotPresent(controller.podIndexer); err != nil {
|
||||
return nil, fmt.Errorf("Could not initialize attach detach controller: %v", err)
|
||||
}
|
||||
|
||||
|
@ -55,14 +55,18 @@ type Controller struct {
|
||||
|
||||
// allows overriding of StorageObjectInUseProtection feature Enabled/Disabled for testing
|
||||
storageObjectInUseProtectionEnabled bool
|
||||
|
||||
// allows overriding of GenericEphemeralVolume feature Enabled/Disabled for testing
|
||||
genericEphemeralVolumeFeatureEnabled bool
|
||||
}
|
||||
|
||||
// NewPVCProtectionController returns a new instance of PVCProtectionController.
|
||||
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled bool) (*Controller, error) {
|
||||
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled, genericEphemeralVolumeFeatureEnabled bool) (*Controller, error) {
|
||||
e := &Controller{
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
||||
storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled,
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
||||
storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled,
|
||||
genericEphemeralVolumeFeatureEnabled: genericEphemeralVolumeFeatureEnabled,
|
||||
}
|
||||
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("persistentvolumeclaim_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
|
||||
@ -80,7 +84,7 @@ func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimI
|
||||
e.podLister = podInformer.Lister()
|
||||
e.podListerSynced = podInformer.Informer().HasSynced
|
||||
e.podIndexer = podInformer.Informer().GetIndexer()
|
||||
if err := common.AddIndexerIfNotPresent(e.podIndexer, common.PodPVCIndex, common.PodPVCIndexFunc); err != nil {
|
||||
if err := common.AddIndexerIfNotPresent(e.podIndexer, common.PodPVCIndex, common.PodPVCIndexFunc(genericEphemeralVolumeFeatureEnabled)); err != nil {
|
||||
return nil, fmt.Errorf("Could not initialize pvc protection controller: %v", err)
|
||||
}
|
||||
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -236,6 +240,7 @@ func (c *Controller) isBeingUsed(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
||||
func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
||||
klog.V(4).Infof("Looking for Pods using PVC %s/%s in the Informer's cache", pvc.Namespace, pvc.Name)
|
||||
|
||||
// The indexer is used to find pods which might use the PVC.
|
||||
objs, err := c.podIndexer.ByIndex(common.PodPVCIndex, fmt.Sprintf("%s/%s", pvc.Namespace, pvc.Name))
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("cache-based list of pods failed while processing %s/%s: %s", pvc.Namespace, pvc.Name, err.Error())
|
||||
@ -245,6 +250,19 @@ func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if c.genericEphemeralVolumeFeatureEnabled {
|
||||
// We still need to look at each volume: that's redundant for volume.PersistentVolumeClaim,
|
||||
// but for volume.Ephemeral we need to be sure that this particular PVC is the one
|
||||
// created for the ephemeral volume.
|
||||
if c.podUsesPVC(pod, pvc) {
|
||||
return true, nil
|
||||
}
|
||||
continue
|
||||
|
||||
}
|
||||
|
||||
// This is the traditional behavior without GenericEphemeralVolume enabled.
|
||||
if pod.Spec.NodeName == "" {
|
||||
continue
|
||||
}
|
||||
@ -265,7 +283,7 @@ func (c *Controller) askAPIServer(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
||||
}
|
||||
|
||||
for _, pod := range podsList.Items {
|
||||
if podUsesPVC(&pod, pvc.Name) {
|
||||
if c.podUsesPVC(&pod, pvc) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
@ -274,13 +292,14 @@ func (c *Controller) askAPIServer(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func podUsesPVC(pod *v1.Pod, pvc string) bool {
|
||||
func (c *Controller) podUsesPVC(pod *v1.Pod, pvc *v1.PersistentVolumeClaim) bool {
|
||||
// Check whether pvc is used by pod only if pod is scheduled, because
|
||||
// kubelet sees pods after they have been scheduled and it won't allow
|
||||
// starting a pod referencing a PVC with a non-nil deletionTimestamp.
|
||||
if pod.Spec.NodeName != "" {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.ClaimName == pvc {
|
||||
if volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.ClaimName == pvc.Name ||
|
||||
c.genericEphemeralVolumeFeatureEnabled && !podIsShutDown(pod) && volume.Ephemeral != nil && pod.Name+"-"+volume.Name == pvc.Name && metav1.IsControlledBy(pvc, pod) {
|
||||
klog.V(2).Infof("Pod %s/%s uses PVC %s", pod.Namespace, pod.Name, pvc)
|
||||
return true
|
||||
}
|
||||
@ -289,6 +308,43 @@ func podUsesPVC(pod *v1.Pod, pvc string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// podIsShutDown returns true if kubelet is done with the pod or
|
||||
// it was force-deleted.
|
||||
func podIsShutDown(pod *v1.Pod) bool {
|
||||
// The following text is based on how pod shutdown was
|
||||
// initially described to me. During PR review, it was pointed out
|
||||
// that this is not correct: "deleteGracePeriodSeconds tells
|
||||
// kubelet when it can start force terminating the
|
||||
// containers. Volume teardown only starts after containers
|
||||
// are termianted. So there is an additional time period after
|
||||
// the grace period where volume teardown is happening."
|
||||
//
|
||||
// TODO (https://github.com/kubernetes/enhancements/issues/1698#issuecomment-655344680):
|
||||
// investigate what kubelet really does and if necessary,
|
||||
// add some other signal for "kubelet is done". For now the check
|
||||
// is used only for ephemeral volumes, because it
|
||||
// is needed to avoid the deadlock.
|
||||
//
|
||||
// A pod that has a deletionTimestamp and a zero
|
||||
// deletionGracePeriodSeconds
|
||||
// a) has been processed by kubelet and is ready for deletion or
|
||||
// b) was force-deleted.
|
||||
//
|
||||
// It's now just waiting for garbage collection. We could wait
|
||||
// for it to actually get removed, but that may be blocked by
|
||||
// finalizers for the pod and thus get delayed.
|
||||
//
|
||||
// Worse, it is possible that there is a cyclic dependency
|
||||
// (pod finalizer waits for PVC to get removed, PVC protection
|
||||
// controller waits for pod to get removed). By considering
|
||||
// the PVC unused in this case, we allow the PVC to get
|
||||
// removed and break such a cycle.
|
||||
//
|
||||
// Therefore it is better to proceed with PVC removal,
|
||||
// which is safe (case a) and/or desirable (case b).
|
||||
return pod.DeletionTimestamp != nil && pod.DeletionGracePeriodSeconds != nil && *pod.DeletionGracePeriodSeconds == 0
|
||||
}
|
||||
|
||||
// pvcAddedUpdated reacts to pvc added/updated events
|
||||
func (c *Controller) pvcAddedUpdated(obj interface{}) {
|
||||
pvc, ok := obj.(*v1.PersistentVolumeClaim)
|
||||
@ -354,8 +410,11 @@ func (c *Controller) enqueuePVCs(pod *v1.Pod, deleted bool) {
|
||||
|
||||
// Enqueue all PVCs that the pod uses
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.PersistentVolumeClaim != nil {
|
||||
switch {
|
||||
case volume.PersistentVolumeClaim != nil:
|
||||
c.queue.Add(pod.Namespace + "/" + volume.PersistentVolumeClaim.ClaimName)
|
||||
case c.genericEphemeralVolumeFeatureEnabled && volume.Ephemeral != nil:
|
||||
c.queue.Add(pod.Namespace + "/" + pod.Name + "-" + volume.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -146,7 +146,7 @@ func generateUpdateErrorFunc(t *testing.T, failures int) clienttesting.ReactionF
|
||||
}
|
||||
}
|
||||
|
||||
func TestPVCProtectionController(t *testing.T) {
|
||||
func testPVCProtectionController(t *testing.T, genericEphemeralVolumeFeatureEnabled bool) {
|
||||
pvcGVR := schema.GroupVersionResource{
|
||||
Group: v1.GroupName,
|
||||
Version: "v1",
|
||||
@ -430,7 +430,7 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
podInformer := informers.Core().V1().Pods()
|
||||
|
||||
// Create the controller
|
||||
ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client, test.storageObjectInUseProtectionEnabled)
|
||||
ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client, test.storageObjectInUseProtectionEnabled, genericEphemeralVolumeFeatureEnabled)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@ -518,3 +518,8 @@ func TestPVCProtectionController(t *testing.T) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestPVCProtectionController(t *testing.T) {
|
||||
t.Run("with-GenericEphemeralVolume", func(t *testing.T) { testPVCProtectionController(t, true) })
|
||||
t.Run("without-GenericEphemeralVolume", func(t *testing.T) { testPVCProtectionController(t, false) })
|
||||
}
|
||||
|
@ -661,13 +661,28 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (b *volumeBinder) isVolumeBound(namespace string, vol *v1.Volume) (bool, *v1.PersistentVolumeClaim, error) {
|
||||
if vol.PersistentVolumeClaim == nil {
|
||||
func (b *volumeBinder) isVolumeBound(pod *v1.Pod, vol *v1.Volume) (bound bool, pvc *v1.PersistentVolumeClaim, err error) {
|
||||
pvcName := ""
|
||||
ephemeral := false
|
||||
switch {
|
||||
case vol.PersistentVolumeClaim != nil:
|
||||
pvcName = vol.PersistentVolumeClaim.ClaimName
|
||||
case vol.Ephemeral != nil &&
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume):
|
||||
// Generic ephemeral inline volumes also use a PVC,
|
||||
// just with a computed name, and...
|
||||
pvcName = pod.Name + "-" + vol.Name
|
||||
ephemeral = true
|
||||
default:
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
pvcName := vol.PersistentVolumeClaim.ClaimName
|
||||
return b.isPVCBound(namespace, pvcName)
|
||||
bound, pvc, err = b.isPVCBound(pod.Namespace, pvcName)
|
||||
// ... the PVC must be owned by the pod.
|
||||
if ephemeral && err == nil && pvc != nil && !metav1.IsControlledBy(pvc, pod) {
|
||||
return false, nil, fmt.Errorf("PVC %s/%s is not owned by pod", pod.Namespace, pvcName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *volumeBinder) isPVCBound(namespace, pvcName string) (bool, *v1.PersistentVolumeClaim, error) {
|
||||
@ -703,7 +718,7 @@ func (b *volumeBinder) isPVCFullyBound(pvc *v1.PersistentVolumeClaim) bool {
|
||||
// arePodVolumesBound returns true if all volumes are fully bound
|
||||
func (b *volumeBinder) arePodVolumesBound(pod *v1.Pod) bool {
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if isBound, _, _ := b.isVolumeBound(pod.Namespace, &vol); !isBound {
|
||||
if isBound, _, _ := b.isVolumeBound(pod, &vol); !isBound {
|
||||
// Pod has at least one PVC that needs binding
|
||||
return false
|
||||
}
|
||||
@ -719,7 +734,7 @@ func (b *volumeBinder) GetPodVolumes(pod *v1.Pod) (boundClaims []*v1.PersistentV
|
||||
unboundClaimsDelayBinding = []*v1.PersistentVolumeClaim{}
|
||||
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
volumeBound, pvc, err := b.isVolumeBound(pod.Namespace, &vol)
|
||||
volumeBound, pvc, err := b.isVolumeBound(pod, &vol)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
@ -306,6 +306,12 @@ const (
|
||||
// spreading and disables legacy SelectorSpread plugin.
|
||||
DefaultPodTopologySpread featuregate.Feature = "DefaultPodTopologySpread"
|
||||
|
||||
// owner: @pohly
|
||||
// alpha: v1.19
|
||||
//
|
||||
// Enables generic ephemeral inline volume support for pods
|
||||
GenericEphemeralVolume featuregate.Feature = "GenericEphemeralVolume"
|
||||
|
||||
// owner: @tallclair
|
||||
// alpha: v1.12
|
||||
// beta: v1.14
|
||||
@ -678,6 +684,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
CSIBlockVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.20
|
||||
CSIInlineVolume: {Default: true, PreRelease: featuregate.Beta},
|
||||
CSIStorageCapacity: {Default: false, PreRelease: featuregate.Alpha},
|
||||
GenericEphemeralVolume: {Default: false, PreRelease: featuregate.Alpha},
|
||||
RuntimeClass: {Default: true, PreRelease: featuregate.Beta},
|
||||
NodeLease: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||
SCTPSupport: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
@ -316,7 +316,7 @@ func (dswp *desiredStateOfWorldPopulator) processPodVolumes(
|
||||
}
|
||||
|
||||
pvc, volumeSpec, volumeGidValue, err :=
|
||||
dswp.createVolumeSpec(podVolume, pod.Name, pod.Namespace, mounts, devices)
|
||||
dswp.createVolumeSpec(podVolume, pod, mounts, devices)
|
||||
if err != nil {
|
||||
klog.Errorf(
|
||||
"Error processing volume %q for pod %q: %v",
|
||||
@ -491,29 +491,50 @@ func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod(
|
||||
// specified volume. It dereference any PVC to get PV objects, if needed.
|
||||
// Returns an error if unable to obtain the volume at this time.
|
||||
func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
podVolume v1.Volume, podName string, podNamespace string, mounts, devices sets.String) (*v1.PersistentVolumeClaim, *volume.Spec, string, error) {
|
||||
if pvcSource :=
|
||||
podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
|
||||
podVolume v1.Volume, pod *v1.Pod, mounts, devices sets.String) (*v1.PersistentVolumeClaim, *volume.Spec, string, error) {
|
||||
pvcSource := podVolume.VolumeSource.PersistentVolumeClaim
|
||||
ephemeral := false
|
||||
if pvcSource == nil &&
|
||||
podVolume.VolumeSource.Ephemeral != nil &&
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
// Generic ephemeral inline volumes are handled the
|
||||
// same way as a PVC reference. The only additional
|
||||
// constraint (checked below) is that the PVC must be
|
||||
// owned by the pod.
|
||||
pvcSource = &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pod.Name + "-" + podVolume.Name,
|
||||
ReadOnly: podVolume.VolumeSource.Ephemeral.ReadOnly,
|
||||
}
|
||||
ephemeral = true
|
||||
}
|
||||
if pvcSource != nil {
|
||||
klog.V(5).Infof(
|
||||
"Found PVC, ClaimName: %q/%q",
|
||||
podNamespace,
|
||||
pod.Namespace,
|
||||
pvcSource.ClaimName)
|
||||
|
||||
// If podVolume is a PVC, fetch the real PV behind the claim
|
||||
pvc, err := dswp.getPVCExtractPV(
|
||||
podNamespace, pvcSource.ClaimName)
|
||||
pod.Namespace, pvcSource.ClaimName)
|
||||
if err != nil {
|
||||
return nil, nil, "", fmt.Errorf(
|
||||
"error processing PVC %s/%s: %v",
|
||||
podNamespace,
|
||||
pod.Namespace,
|
||||
pvcSource.ClaimName,
|
||||
err)
|
||||
}
|
||||
if ephemeral && !metav1.IsControlledBy(pvc, pod) {
|
||||
return nil, nil, "", fmt.Errorf(
|
||||
"error processing PVC %s/%s: not the ephemeral PVC for the pod",
|
||||
pod.Namespace,
|
||||
pvcSource.ClaimName,
|
||||
)
|
||||
}
|
||||
pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID
|
||||
|
||||
klog.V(5).Infof(
|
||||
"Found bound PV for PVC (ClaimName %q/%q pvcUID %v): pvName=%q",
|
||||
podNamespace,
|
||||
pod.Namespace,
|
||||
pvcSource.ClaimName,
|
||||
pvcUID,
|
||||
pvName)
|
||||
@ -524,7 +545,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
if err != nil {
|
||||
return nil, nil, "", fmt.Errorf(
|
||||
"error processing PVC %s/%s: %v",
|
||||
podNamespace,
|
||||
pod.Namespace,
|
||||
pvcSource.ClaimName,
|
||||
err)
|
||||
}
|
||||
@ -533,7 +554,7 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
"Extracted volumeSpec (%v) from bound PV (pvName %q) and PVC (ClaimName %q/%q pvcUID %v)",
|
||||
volumeSpec.Name(),
|
||||
pvName,
|
||||
podNamespace,
|
||||
pod.Namespace,
|
||||
pvcSource.ClaimName,
|
||||
pvcUID)
|
||||
|
||||
|
@ -518,7 +518,7 @@ func TestCreateVolumeSpec_Valid_File_VolumeMounts(t *testing.T) {
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := util.GetPodVolumeNames(pod)
|
||||
_, volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
if volumeSpec == nil || err != nil {
|
||||
@ -564,7 +564,7 @@ func TestCreateVolumeSpec_Valid_Nil_VolumeMounts(t *testing.T) {
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := util.GetPodVolumeNames(pod)
|
||||
_, volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
if volumeSpec == nil || err != nil {
|
||||
@ -610,7 +610,7 @@ func TestCreateVolumeSpec_Valid_Block_VolumeDevices(t *testing.T) {
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := util.GetPodVolumeNames(pod)
|
||||
_, volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
if volumeSpec == nil || err != nil {
|
||||
@ -656,7 +656,7 @@ func TestCreateVolumeSpec_Invalid_File_VolumeDevices(t *testing.T) {
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := util.GetPodVolumeNames(pod)
|
||||
_, volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
if volumeSpec != nil || err == nil {
|
||||
@ -702,7 +702,7 @@ func TestCreateVolumeSpec_Invalid_Block_VolumeMounts(t *testing.T) {
|
||||
fakePodManager.AddPod(pod)
|
||||
mountsMap, devicesMap := util.GetPodVolumeNames(pod)
|
||||
_, volumeSpec, _, err :=
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod.Name, pod.Namespace, mountsMap, devicesMap)
|
||||
dswp.createVolumeSpec(pod.Spec.Volumes[0], pod, mountsMap, devicesMap)
|
||||
|
||||
// Assert
|
||||
if volumeSpec != nil || err == nil {
|
||||
|
@ -10,6 +10,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/v1/pod:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/scheduler/apis/config:go_default_library",
|
||||
"//pkg/scheduler/framework/runtime:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
@ -19,8 +20,10 @@ go_library(
|
||||
"//pkg/scheduler/profile:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/kube-scheduler/extender/v1:go_default_library",
|
||||
|
@ -29,9 +29,12 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
extenderv1 "k8s.io/kube-scheduler/extender/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
@ -575,11 +578,19 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeCla
|
||||
manifest := &(pod.Spec)
|
||||
for i := range manifest.Volumes {
|
||||
volume := &manifest.Volumes[i]
|
||||
if volume.PersistentVolumeClaim == nil {
|
||||
// Volume is not a PVC, ignore
|
||||
var pvcName string
|
||||
ephemeral := false
|
||||
switch {
|
||||
case volume.PersistentVolumeClaim != nil:
|
||||
pvcName = volume.PersistentVolumeClaim.ClaimName
|
||||
case volume.Ephemeral != nil &&
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume):
|
||||
pvcName = pod.Name + "-" + volume.Name
|
||||
ephemeral = true
|
||||
default:
|
||||
// Volume is not using a PVC, ignore
|
||||
continue
|
||||
}
|
||||
pvcName := volume.PersistentVolumeClaim.ClaimName
|
||||
pvc, err := pvcLister.PersistentVolumeClaims(namespace).Get(pvcName)
|
||||
if err != nil {
|
||||
// The error has already enough context ("persistentvolumeclaim "myclaim" not found")
|
||||
@ -589,6 +600,11 @@ func podPassesBasicChecks(pod *v1.Pod, pvcLister corelisters.PersistentVolumeCla
|
||||
if pvc.DeletionTimestamp != nil {
|
||||
return fmt.Errorf("persistentvolumeclaim %q is being deleted", pvc.Name)
|
||||
}
|
||||
|
||||
if ephemeral &&
|
||||
!metav1.IsControlledBy(pvc, pod) {
|
||||
return fmt.Errorf("persistentvolumeclaim %q was not created for the pod", pvc.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -61,7 +61,8 @@ func (d *stateData) Clone() framework.StateData {
|
||||
// In the Filter phase, pod binding cache is created for the pod and used in
|
||||
// Reserve and PreBind phases.
|
||||
type VolumeBinding struct {
|
||||
Binder scheduling.SchedulerVolumeBinder
|
||||
Binder scheduling.SchedulerVolumeBinder
|
||||
GenericEphemeralVolumeFeatureEnabled bool
|
||||
}
|
||||
|
||||
var _ framework.PreFilterPlugin = &VolumeBinding{}
|
||||
@ -77,9 +78,10 @@ func (pl *VolumeBinding) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
func podHasPVCs(pod *v1.Pod) bool {
|
||||
func (pl *VolumeBinding) podHasPVCs(pod *v1.Pod) bool {
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
if vol.PersistentVolumeClaim != nil {
|
||||
if vol.PersistentVolumeClaim != nil ||
|
||||
pl.GenericEphemeralVolumeFeatureEnabled && vol.Ephemeral != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@ -91,7 +93,7 @@ func podHasPVCs(pod *v1.Pod) bool {
|
||||
// UnschedulableAndUnresolvable is returned.
|
||||
func (pl *VolumeBinding) PreFilter(ctx context.Context, state *framework.CycleState, pod *v1.Pod) *framework.Status {
|
||||
// If pod does not reference any PVC, we don't need to do anything.
|
||||
if !podHasPVCs(pod) {
|
||||
if !pl.podHasPVCs(pod) {
|
||||
state.Write(stateKey, &stateData{skip: true})
|
||||
return nil
|
||||
}
|
||||
@ -268,7 +270,8 @@ func New(plArgs runtime.Object, fh framework.FrameworkHandle) (framework.Plugin,
|
||||
}
|
||||
binder := scheduling.NewVolumeBinder(fh.ClientSet(), podInformer, nodeInformer, csiNodeInformer, pvcInformer, pvInformer, storageClassInformer, capacityCheck, time.Duration(args.BindTimeoutSeconds)*time.Second)
|
||||
return &VolumeBinding{
|
||||
Binder: binder,
|
||||
Binder: binder,
|
||||
GenericEphemeralVolumeFeatureEnabled: utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -346,6 +346,18 @@ func TestValidatePodFailures(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
failGenericEphemeralPod := defaultPod()
|
||||
failGenericEphemeralPod.Spec.Volumes = []api.Volume{
|
||||
{
|
||||
Name: "generic ephemeral volume",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Ephemeral: &api.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &api.PersistentVolumeClaimTemplate{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
errorCases := map[string]struct {
|
||||
pod *api.Pod
|
||||
psp *policy.PodSecurityPolicy
|
||||
@ -485,6 +497,11 @@ func TestValidatePodFailures(t *testing.T) {
|
||||
psp: defaultPSP(),
|
||||
expectedError: "csi volumes are not allowed to be used",
|
||||
},
|
||||
"generic ephemeral volumes without proper policy set": {
|
||||
pod: failGenericEphemeralPod,
|
||||
psp: defaultPSP(),
|
||||
expectedError: "ephemeral volumes are not allowed to be used",
|
||||
},
|
||||
}
|
||||
for name, test := range errorCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
@ -888,6 +905,18 @@ func TestValidatePodSuccess(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
genericEphemeralPod := defaultPod()
|
||||
genericEphemeralPod.Spec.Volumes = []api.Volume{
|
||||
{
|
||||
Name: "generic ephemeral volume",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Ephemeral: &api.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &api.PersistentVolumeClaimTemplate{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
successCases := map[string]struct {
|
||||
pod *api.Pod
|
||||
psp *policy.PodSecurityPolicy
|
||||
@ -995,6 +1024,22 @@ func TestValidatePodSuccess(t *testing.T) {
|
||||
return psp
|
||||
}(),
|
||||
},
|
||||
"generic ephemeral volume policy with generic ephemeral volume used": {
|
||||
pod: genericEphemeralPod,
|
||||
psp: func() *policy.PodSecurityPolicy {
|
||||
psp := defaultPSP()
|
||||
psp.Spec.Volumes = []policy.FSType{policy.Ephemeral}
|
||||
return psp
|
||||
}(),
|
||||
},
|
||||
"policy.All with generic ephemeral volume used": {
|
||||
pod: genericEphemeralPod,
|
||||
psp: func() *policy.PodSecurityPolicy {
|
||||
psp := defaultPSP()
|
||||
psp.Spec.Volumes = []policy.FSType{policy.All}
|
||||
return psp
|
||||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range successCases {
|
||||
@ -1328,6 +1373,7 @@ func defaultV1Pod() *v1.Pod {
|
||||
// the FSTypeAll wildcard.
|
||||
func TestValidateAllowedVolumes(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, true)()
|
||||
|
||||
val := reflect.ValueOf(api.VolumeSource{})
|
||||
|
||||
|
@ -68,6 +68,7 @@ func GetAllFSTypesAsSet() sets.String {
|
||||
string(policy.PortworxVolume),
|
||||
string(policy.ScaleIO),
|
||||
string(policy.CSI),
|
||||
string(policy.Ephemeral),
|
||||
)
|
||||
return fstypes
|
||||
}
|
||||
@ -131,6 +132,8 @@ func GetVolumeFSType(v api.Volume) (policy.FSType, error) {
|
||||
return policy.ScaleIO, nil
|
||||
case v.CSI != nil:
|
||||
return policy.CSI, nil
|
||||
case v.Ephemeral != nil:
|
||||
return policy.Ephemeral, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("unknown volume type for volume: %#v", v)
|
||||
|
@ -630,6 +630,7 @@ func TestAdmitCaps(t *testing.T) {
|
||||
|
||||
func TestAdmitVolumes(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, true)()
|
||||
|
||||
val := reflect.ValueOf(kapi.VolumeSource{})
|
||||
|
||||
|
@ -20,8 +20,10 @@ import (
|
||||
"sync"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
pvutil "k8s.io/kubernetes/pkg/api/v1/persistentvolume"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/third_party/forked/gonum/graph"
|
||||
"k8s.io/kubernetes/third_party/forked/gonum/graph/simple"
|
||||
)
|
||||
@ -375,8 +377,14 @@ func (g *Graph) AddPod(pod *corev1.Pod) {
|
||||
})
|
||||
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
claimName := ""
|
||||
if v.PersistentVolumeClaim != nil {
|
||||
pvcVertex := g.getOrCreateVertex_locked(pvcVertexType, pod.Namespace, v.PersistentVolumeClaim.ClaimName)
|
||||
claimName = v.PersistentVolumeClaim.ClaimName
|
||||
} else if v.Ephemeral != nil && utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
claimName = pod.Name + "-" + v.Name
|
||||
}
|
||||
if claimName != "" {
|
||||
pvcVertex := g.getOrCreateVertex_locked(pvcVertexType, pod.Namespace, claimName)
|
||||
e := newDestinationEdge(pvcVertex, podVertex, nodeVertex)
|
||||
g.graph.SetEdge(e)
|
||||
g.addEdgeToDestinationIndex_locked(e)
|
||||
|
@ -190,6 +190,17 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
|
||||
})
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "ephemeral-volume-controller"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "generic-garbage-collector"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
|
2519
staging/src/k8s.io/api/core/v1/generated.pb.go
generated
2519
staging/src/k8s.io/api/core/v1/generated.pb.go
generated
File diff suppressed because it is too large
Load Diff
@ -1371,6 +1371,37 @@ message EphemeralContainers {
|
||||
repeated EphemeralContainer ephemeralContainers = 2;
|
||||
}
|
||||
|
||||
// Represents an ephemeral volume that is handled by a normal storage driver.
|
||||
message EphemeralVolumeSource {
|
||||
// Will be used to create a stand-alone PVC to provision the volume.
|
||||
// The pod in which this EphemeralVolumeSource is embedded will be the
|
||||
// owner of the PVC, i.e. the PVC will be deleted together with the
|
||||
// pod. The name of the PVC will be `<pod name>-<volume name>` where
|
||||
// `<volume name>` is the name from the `PodSpec.Volumes` array
|
||||
// entry. Pod validation will reject the pod if the concatenated name
|
||||
// is not valid for a PVC (for example, too long).
|
||||
//
|
||||
// An existing PVC with that name that is not owned by the pod
|
||||
// will *not* be used for the pod to avoid using an unrelated
|
||||
// volume by mistake. Starting the pod is then blocked until
|
||||
// the unrelated PVC is removed. If such a pre-created PVC is
|
||||
// meant to be used by the pod, the PVC has to updated with an
|
||||
// owner reference to the pod once the pod exists. Normally
|
||||
// this should not be necessary, but it may be useful when
|
||||
// manually reconstructing a broken cluster.
|
||||
//
|
||||
// This field is read-only and no changes will be made by Kubernetes
|
||||
// to the PVC after it has been created.
|
||||
//
|
||||
// Required, must not be nil.
|
||||
optional PersistentVolumeClaimTemplate volumeClaimTemplate = 1;
|
||||
|
||||
// Specifies a read-only configuration for the volume.
|
||||
// Defaults to false (read/write).
|
||||
// +optional
|
||||
optional bool readOnly = 2;
|
||||
}
|
||||
|
||||
// Event is a report of an event somewhere in the cluster.
|
||||
message Event {
|
||||
// Standard object's metadata.
|
||||
@ -2682,6 +2713,23 @@ message PersistentVolumeClaimStatus {
|
||||
repeated PersistentVolumeClaimCondition conditions = 4;
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimTemplate is used to produce
|
||||
// PersistentVolumeClaim objects as part of an EphemeralVolumeSource.
|
||||
message PersistentVolumeClaimTemplate {
|
||||
// May contain labels and annotations that will be copied into the PVC
|
||||
// when creating it. No other fields are allowed and will be rejected during
|
||||
// validation.
|
||||
//
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// The specification for the PersistentVolumeClaim. The entire content is
|
||||
// copied unchanged into the PVC that gets created from this
|
||||
// template. The same fields as in a PersistentVolumeClaim
|
||||
// are also valid here.
|
||||
optional PersistentVolumeClaimSpec spec = 2;
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
|
||||
// This volume finds the bound PV and mounts that volume for the pod. A
|
||||
// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another
|
||||
@ -5341,6 +5389,34 @@ message VolumeSource {
|
||||
// CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
|
||||
// +optional
|
||||
optional CSIVolumeSource csi = 28;
|
||||
|
||||
// Ephemeral represents a volume that is handled by a cluster storage driver (Alpha feature).
|
||||
// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
|
||||
// and deleted when the pod is removed.
|
||||
//
|
||||
// Use this if:
|
||||
// a) the volume is only needed while the pod runs,
|
||||
// b) features of normal volumes like restoring from snapshot or capacity
|
||||
// tracking are needed,
|
||||
// c) the storage driver is specified through a storage class, and
|
||||
// d) the storage driver supports dynamic volume provisioning through
|
||||
// a PersistentVolumeClaim (see EphemeralVolumeSource for more
|
||||
// information on the connection between this volume type
|
||||
// and PersistentVolumeClaim).
|
||||
//
|
||||
// Use PersistentVolumeClaim or one of the vendor-specific
|
||||
// APIs for volumes that persist for longer than the lifecycle
|
||||
// of an individual pod.
|
||||
//
|
||||
// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
|
||||
// be used that way - see the documentation of the driver for
|
||||
// more information.
|
||||
//
|
||||
// A pod can use both types of ephemeral volumes and
|
||||
// persistent volumes at the same time.
|
||||
//
|
||||
// +optional
|
||||
optional EphemeralVolumeSource ephemeral = 29;
|
||||
}
|
||||
|
||||
// Represents a vSphere volume resource.
|
||||
|
@ -156,6 +156,33 @@ type VolumeSource struct {
|
||||
// CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).
|
||||
// +optional
|
||||
CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"`
|
||||
// Ephemeral represents a volume that is handled by a cluster storage driver (Alpha feature).
|
||||
// The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts,
|
||||
// and deleted when the pod is removed.
|
||||
//
|
||||
// Use this if:
|
||||
// a) the volume is only needed while the pod runs,
|
||||
// b) features of normal volumes like restoring from snapshot or capacity
|
||||
// tracking are needed,
|
||||
// c) the storage driver is specified through a storage class, and
|
||||
// d) the storage driver supports dynamic volume provisioning through
|
||||
// a PersistentVolumeClaim (see EphemeralVolumeSource for more
|
||||
// information on the connection between this volume type
|
||||
// and PersistentVolumeClaim).
|
||||
//
|
||||
// Use PersistentVolumeClaim or one of the vendor-specific
|
||||
// APIs for volumes that persist for longer than the lifecycle
|
||||
// of an individual pod.
|
||||
//
|
||||
// Use CSI for light-weight local ephemeral volumes if the CSI driver is meant to
|
||||
// be used that way - see the documentation of the driver for
|
||||
// more information.
|
||||
//
|
||||
// A pod can use both types of ephemeral volumes and
|
||||
// persistent volumes at the same time.
|
||||
//
|
||||
// +optional
|
||||
Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"`
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
|
||||
@ -1746,6 +1773,54 @@ type CSIVolumeSource struct {
|
||||
NodePublishSecretRef *LocalObjectReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,5,opt,name=nodePublishSecretRef"`
|
||||
}
|
||||
|
||||
// Represents an ephemeral volume that is handled by a normal storage driver.
|
||||
type EphemeralVolumeSource struct {
|
||||
// Will be used to create a stand-alone PVC to provision the volume.
|
||||
// The pod in which this EphemeralVolumeSource is embedded will be the
|
||||
// owner of the PVC, i.e. the PVC will be deleted together with the
|
||||
// pod. The name of the PVC will be `<pod name>-<volume name>` where
|
||||
// `<volume name>` is the name from the `PodSpec.Volumes` array
|
||||
// entry. Pod validation will reject the pod if the concatenated name
|
||||
// is not valid for a PVC (for example, too long).
|
||||
//
|
||||
// An existing PVC with that name that is not owned by the pod
|
||||
// will *not* be used for the pod to avoid using an unrelated
|
||||
// volume by mistake. Starting the pod is then blocked until
|
||||
// the unrelated PVC is removed. If such a pre-created PVC is
|
||||
// meant to be used by the pod, the PVC has to updated with an
|
||||
// owner reference to the pod once the pod exists. Normally
|
||||
// this should not be necessary, but it may be useful when
|
||||
// manually reconstructing a broken cluster.
|
||||
//
|
||||
// This field is read-only and no changes will be made by Kubernetes
|
||||
// to the PVC after it has been created.
|
||||
//
|
||||
// Required, must not be nil.
|
||||
VolumeClaimTemplate *PersistentVolumeClaimTemplate `json:"volumeClaimTemplate,omitempty" protobuf:"bytes,1,opt,name=volumeClaimTemplate"`
|
||||
|
||||
// Specifies a read-only configuration for the volume.
|
||||
// Defaults to false (read/write).
|
||||
// +optional
|
||||
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"`
|
||||
}
|
||||
|
||||
// PersistentVolumeClaimTemplate is used to produce
|
||||
// PersistentVolumeClaim objects as part of an EphemeralVolumeSource.
|
||||
type PersistentVolumeClaimTemplate struct {
|
||||
// May contain labels and annotations that will be copied into the PVC
|
||||
// when creating it. No other fields are allowed and will be rejected during
|
||||
// validation.
|
||||
//
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// The specification for the PersistentVolumeClaim. The entire content is
|
||||
// copied unchanged into the PVC that gets created from this
|
||||
// template. The same fields as in a PersistentVolumeClaim
|
||||
// are also valid here.
|
||||
Spec PersistentVolumeClaimSpec `json:"spec" protobuf:"bytes,2,name=spec"`
|
||||
}
|
||||
|
||||
// ContainerPort represents a network port in a single container.
|
||||
type ContainerPort struct {
|
||||
// If specified, this must be an IANA_SVC_NAME and unique within the pod. Each
|
||||
|
@ -626,6 +626,16 @@ func (EphemeralContainers) SwaggerDoc() map[string]string {
|
||||
return map_EphemeralContainers
|
||||
}
|
||||
|
||||
var map_EphemeralVolumeSource = map[string]string{
|
||||
"": "Represents an ephemeral volume that is handled by a normal storage driver.",
|
||||
"volumeClaimTemplate": "Will be used to create a stand-alone PVC to provision the volume. The pod in which this EphemeralVolumeSource is embedded will be the owner of the PVC, i.e. the PVC will be deleted together with the pod. The name of the PVC will be `<pod name>-<volume name>` where `<volume name>` is the name from the `PodSpec.Volumes` array entry. Pod validation will reject the pod if the concatenated name is not valid for a PVC (for example, too long).\n\nAn existing PVC with that name that is not owned by the pod will *not* be used for the pod to avoid using an unrelated volume by mistake. Starting the pod is then blocked until the unrelated PVC is removed. If such a pre-created PVC is meant to be used by the pod, the PVC has to updated with an owner reference to the pod once the pod exists. Normally this should not be necessary, but it may be useful when manually reconstructing a broken cluster.\n\nThis field is read-only and no changes will be made by Kubernetes to the PVC after it has been created.\n\nRequired, must not be nil.",
|
||||
"readOnly": "Specifies a read-only configuration for the volume. Defaults to false (read/write).",
|
||||
}
|
||||
|
||||
func (EphemeralVolumeSource) SwaggerDoc() map[string]string {
|
||||
return map_EphemeralVolumeSource
|
||||
}
|
||||
|
||||
var map_Event = map[string]string{
|
||||
"": "Event is a report of an event somewhere in the cluster.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
@ -1319,6 +1329,16 @@ func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string {
|
||||
return map_PersistentVolumeClaimStatus
|
||||
}
|
||||
|
||||
var map_PersistentVolumeClaimTemplate = map[string]string{
|
||||
"": "PersistentVolumeClaimTemplate is used to produce PersistentVolumeClaim objects as part of an EphemeralVolumeSource.",
|
||||
"metadata": "May contain labels and annotations that will be copied into the PVC when creating it. No other fields are allowed and will be rejected during validation.",
|
||||
"spec": "The specification for the PersistentVolumeClaim. The entire content is copied unchanged into the PVC that gets created from this template. The same fields as in a PersistentVolumeClaim are also valid here.",
|
||||
}
|
||||
|
||||
func (PersistentVolumeClaimTemplate) SwaggerDoc() map[string]string {
|
||||
return map_PersistentVolumeClaimTemplate
|
||||
}
|
||||
|
||||
var map_PersistentVolumeClaimVolumeSource = map[string]string{
|
||||
"": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).",
|
||||
"claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims",
|
||||
@ -2443,6 +2463,7 @@ var map_VolumeSource = map[string]string{
|
||||
"scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
|
||||
"storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
|
||||
"csi": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
|
||||
"ephemeral": "Ephemeral represents a volume that is handled by a cluster storage driver (Alpha feature). The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
|
||||
}
|
||||
|
||||
func (VolumeSource) SwaggerDoc() map[string]string {
|
||||
|
@ -1433,6 +1433,27 @@ func (in *EphemeralContainers) DeepCopyObject() runtime.Object {
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EphemeralVolumeSource) DeepCopyInto(out *EphemeralVolumeSource) {
|
||||
*out = *in
|
||||
if in.VolumeClaimTemplate != nil {
|
||||
in, out := &in.VolumeClaimTemplate, &out.VolumeClaimTemplate
|
||||
*out = new(PersistentVolumeClaimTemplate)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralVolumeSource.
|
||||
func (in *EphemeralVolumeSource) DeepCopy() *EphemeralVolumeSource {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(EphemeralVolumeSource)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Event) DeepCopyInto(out *Event) {
|
||||
*out = *in
|
||||
@ -2985,6 +3006,24 @@ func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PersistentVolumeClaimTemplate) DeepCopyInto(out *PersistentVolumeClaimTemplate) {
|
||||
*out = *in
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimTemplate.
|
||||
func (in *PersistentVolumeClaimTemplate) DeepCopy() *PersistentVolumeClaimTemplate {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PersistentVolumeClaimTemplate)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) {
|
||||
*out = *in
|
||||
@ -5763,6 +5802,11 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) {
|
||||
*out = new(CSIVolumeSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Ephemeral != nil {
|
||||
in, out := &in.Ephemeral, &out.Ephemeral
|
||||
*out = new(EphemeralVolumeSource)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -313,6 +313,7 @@ const (
|
||||
PortworxVolume FSType = "portworxVolume"
|
||||
ScaleIO FSType = "scaleIO"
|
||||
CSI FSType = "csi"
|
||||
Ephemeral FSType = "ephemeral"
|
||||
All FSType = "*"
|
||||
)
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
1245
staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json
vendored
1245
staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.json
vendored
File diff suppressed because it is too large
Load Diff
BIN
staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb
vendored
BIN
staging/src/k8s.io/api/testdata/HEAD/batch.v1.Job.pb
vendored
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user