mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge pull request #105609 from pohly/generic-ephemeral-volume-ga
generic ephemeral volume GA
This commit is contained in:
commit
c592bd40f2
2
api/openapi-spec/swagger.json
generated
2
api/openapi-spec/swagger.json
generated
@ -9728,7 +9728,7 @@
|
||||
},
|
||||
"ephemeral": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.EphemeralVolumeSource",
|
||||
"description": "Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.\n\nThis is a beta feature and only available when the GenericEphemeralVolume feature gate is enabled."
|
||||
"description": "Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time."
|
||||
},
|
||||
"fc": {
|
||||
"$ref": "#/definitions/io.k8s.api.core.v1.FCVolumeSource",
|
||||
|
@ -368,18 +368,15 @@ func startVolumeExpandController(ctx context.Context, controllerContext Controll
|
||||
}
|
||||
|
||||
func startEphemeralVolumeController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
ephemeralController, err := ephemeral.NewController(
|
||||
controllerContext.ClientBuilder.ClientOrDie("ephemeral-volume-controller"),
|
||||
controllerContext.InformerFactory.Core().V1().Pods(),
|
||||
controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims())
|
||||
if err != nil {
|
||||
return nil, true, fmt.Errorf("failed to start ephemeral volume controller: %v", err)
|
||||
}
|
||||
go ephemeralController.Run(int(controllerContext.ComponentConfig.EphemeralVolumeController.ConcurrentEphemeralVolumeSyncs), ctx.Done())
|
||||
return nil, true, nil
|
||||
ephemeralController, err := ephemeral.NewController(
|
||||
controllerContext.ClientBuilder.ClientOrDie("ephemeral-volume-controller"),
|
||||
controllerContext.InformerFactory.Core().V1().Pods(),
|
||||
controllerContext.InformerFactory.Core().V1().PersistentVolumeClaims())
|
||||
if err != nil {
|
||||
return nil, true, fmt.Errorf("failed to start ephemeral volume controller: %v", err)
|
||||
}
|
||||
return nil, false, nil
|
||||
go ephemeralController.Run(int(controllerContext.ComponentConfig.EphemeralVolumeController.ConcurrentEphemeralVolumeSyncs), ctx.Done())
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
func startEndpointController(ctx context.Context, controllerCtx ControllerContext) (controller.Interface, bool, error) {
|
||||
@ -554,7 +551,6 @@ func startPVCProtectionController(ctx context.Context, controllerContext Control
|
||||
controllerContext.InformerFactory.Core().V1().Pods(),
|
||||
controllerContext.ClientBuilder.ClientOrDie("pvc-protection-controller"),
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.StorageObjectInUseProtection),
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, true, fmt.Errorf("failed to start the pvc protection controller: %v", err)
|
||||
|
@ -556,7 +556,6 @@ func dropDisabledFields(
|
||||
dropDisabledProcMountField(podSpec, oldPodSpec)
|
||||
|
||||
dropDisabledCSIVolumeSourceAlphaFields(podSpec, oldPodSpec)
|
||||
dropDisabledEphemeralVolumeSourceAlphaFields(podSpec, oldPodSpec)
|
||||
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.NonPreemptingPriority) &&
|
||||
!podPriorityInUse(oldPodSpec) {
|
||||
@ -605,16 +604,6 @@ func dropDisabledCSIVolumeSourceAlphaFields(podSpec, oldPodSpec *api.PodSpec) {
|
||||
}
|
||||
}
|
||||
|
||||
// dropDisabledEphemeralVolumeSourceAlphaFields removes disabled alpha fields from []EphemeralVolumeSource.
|
||||
// This should be called from PrepareForCreate/PrepareForUpdate for all pod specs resources containing a EphemeralVolumeSource
|
||||
func dropDisabledEphemeralVolumeSourceAlphaFields(podSpec, oldPodSpec *api.PodSpec) {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) && !ephemeralInUse(oldPodSpec) {
|
||||
for i := range podSpec.Volumes {
|
||||
podSpec.Volumes[i].Ephemeral = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func dropPodAffinityTermNamespaceSelector(terms []api.PodAffinityTerm) {
|
||||
for i := range terms {
|
||||
terms[i].NamespaceSelector = nil
|
||||
@ -795,19 +784,6 @@ func csiInUse(podSpec *api.PodSpec) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ephemeralInUse returns true if any pod's spec include inline CSI volumes.
|
||||
func ephemeralInUse(podSpec *api.PodSpec) bool {
|
||||
if podSpec == nil {
|
||||
return false
|
||||
}
|
||||
for i := range podSpec.Volumes {
|
||||
if podSpec.Volumes[i].Ephemeral != nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// SeccompAnnotationForField takes a pod seccomp profile field and returns the
|
||||
// converted annotation value
|
||||
func SeccompAnnotationForField(field *api.SeccompProfile) string {
|
||||
|
@ -182,9 +182,6 @@ type VolumeSource struct {
|
||||
// A pod can use both types of ephemeral volumes and
|
||||
// persistent volumes at the same time.
|
||||
//
|
||||
// This is a beta feature and only available when the GenericEphemeralVolume
|
||||
// feature gate is enabled.
|
||||
//
|
||||
// +optional
|
||||
Ephemeral *EphemeralVolumeSource
|
||||
}
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyapiv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
@ -92,26 +92,19 @@ func ValidatePodDisruptionBudgetStatusUpdate(status, oldStatus policy.PodDisrupt
|
||||
// trailing dashes are allowed.
|
||||
var ValidatePodSecurityPolicyName = apimachineryvalidation.NameIsDNSSubdomain
|
||||
|
||||
// PodSecurityPolicyValidationOptions contains additional parameters for ValidatePodSecurityPolicy.
|
||||
type PodSecurityPolicyValidationOptions struct {
|
||||
// AllowEphemeralVolumeType determines whether Ephemeral is a valid entry
|
||||
// in PodSecurityPolicySpec.Volumes.
|
||||
AllowEphemeralVolumeType bool
|
||||
}
|
||||
|
||||
// ValidatePodSecurityPolicy validates a PodSecurityPolicy and returns an ErrorList
|
||||
// with any errors.
|
||||
func ValidatePodSecurityPolicy(psp *policy.PodSecurityPolicy, opts PodSecurityPolicyValidationOptions) field.ErrorList {
|
||||
func ValidatePodSecurityPolicy(psp *policy.PodSecurityPolicy) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMeta(&psp.ObjectMeta, false, ValidatePodSecurityPolicyName, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpecificAnnotations(psp.Annotations, field.NewPath("metadata").Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&psp.Spec, opts, field.NewPath("spec"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&psp.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidatePodSecurityPolicySpec validates a PodSecurityPolicySpec and returns an ErrorList
|
||||
// with any errors.
|
||||
func ValidatePodSecurityPolicySpec(spec *policy.PodSecurityPolicySpec, opts PodSecurityPolicyValidationOptions, fldPath *field.Path) field.ErrorList {
|
||||
func ValidatePodSecurityPolicySpec(spec *policy.PodSecurityPolicySpec, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
allErrs = append(allErrs, validatePSPRunAsUser(fldPath.Child("runAsUser"), &spec.RunAsUser)...)
|
||||
@ -119,7 +112,7 @@ func ValidatePodSecurityPolicySpec(spec *policy.PodSecurityPolicySpec, opts PodS
|
||||
allErrs = append(allErrs, validatePSPSELinux(fldPath.Child("seLinux"), &spec.SELinux)...)
|
||||
allErrs = append(allErrs, validatePSPSupplementalGroup(fldPath.Child("supplementalGroups"), &spec.SupplementalGroups)...)
|
||||
allErrs = append(allErrs, validatePSPFSGroup(fldPath.Child("fsGroup"), &spec.FSGroup)...)
|
||||
allErrs = append(allErrs, validatePodSecurityPolicyVolumes(opts, fldPath, spec.Volumes)...)
|
||||
allErrs = append(allErrs, validatePodSecurityPolicyVolumes(fldPath, spec.Volumes)...)
|
||||
if len(spec.RequiredDropCapabilities) > 0 && hasCap(policy.AllowAllCapabilities, spec.AllowedCapabilities) {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("requiredDropCapabilities"), spec.RequiredDropCapabilities,
|
||||
"must be empty when all capabilities are allowed by a wildcard"))
|
||||
@ -327,15 +320,11 @@ func validatePSPSupplementalGroup(fldPath *field.Path, groupOptions *policy.Supp
|
||||
}
|
||||
|
||||
// validatePodSecurityPolicyVolumes validates the volume fields of PodSecurityPolicy.
|
||||
func validatePodSecurityPolicyVolumes(opts PodSecurityPolicyValidationOptions, fldPath *field.Path, volumes []policy.FSType) field.ErrorList {
|
||||
func validatePodSecurityPolicyVolumes(fldPath *field.Path, volumes []policy.FSType) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allowed := psputil.GetAllFSTypesAsSet()
|
||||
// add in the * value since that is a pseudo type that is not included by default
|
||||
allowed.Insert(string(policy.All))
|
||||
// Ephemeral may or may not be allowed.
|
||||
if !opts.AllowEphemeralVolumeType {
|
||||
allowed.Delete(string(policy.Ephemeral))
|
||||
}
|
||||
for _, v := range volumes {
|
||||
if !allowed.Has(string(v)) {
|
||||
allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumes"), v, allowed.List()))
|
||||
@ -530,11 +519,11 @@ func validateRuntimeClassStrategy(fldPath *field.Path, rc *policy.RuntimeClassSt
|
||||
}
|
||||
|
||||
// ValidatePodSecurityPolicyUpdate validates a PSP for updates.
|
||||
func ValidatePodSecurityPolicyUpdate(old *policy.PodSecurityPolicy, new *policy.PodSecurityPolicy, opts PodSecurityPolicyValidationOptions) field.ErrorList {
|
||||
func ValidatePodSecurityPolicyUpdate(old *policy.PodSecurityPolicy, new *policy.PodSecurityPolicy) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpecificAnnotations(new.Annotations, field.NewPath("metadata").Child("annotations"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&new.Spec, opts, field.NewPath("spec"))...)
|
||||
allErrs = append(allErrs, ValidatePodSecurityPolicySpec(&new.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -590,7 +590,7 @@ func TestValidatePodSecurityPolicy(t *testing.T) {
|
||||
}
|
||||
|
||||
for k, v := range errorCases {
|
||||
errs := ValidatePodSecurityPolicy(v.psp, PodSecurityPolicyValidationOptions{})
|
||||
errs := ValidatePodSecurityPolicy(v.psp)
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("%s expected errors but got none", k)
|
||||
continue
|
||||
@ -613,7 +613,7 @@ func TestValidatePodSecurityPolicy(t *testing.T) {
|
||||
// Should not be able to update to an invalid policy.
|
||||
for k, v := range errorCases {
|
||||
v.psp.ResourceVersion = "444" // Required for updates.
|
||||
errs := ValidatePodSecurityPolicyUpdate(validPSP(), v.psp, PodSecurityPolicyValidationOptions{})
|
||||
errs := ValidatePodSecurityPolicyUpdate(validPSP(), v.psp)
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("[%s] expected update errors but got none", k)
|
||||
continue
|
||||
@ -743,13 +743,13 @@ func TestValidatePodSecurityPolicy(t *testing.T) {
|
||||
}
|
||||
|
||||
for k, v := range successCases {
|
||||
if errs := ValidatePodSecurityPolicy(v.psp, PodSecurityPolicyValidationOptions{}); len(errs) != 0 {
|
||||
if errs := ValidatePodSecurityPolicy(v.psp); len(errs) != 0 {
|
||||
t.Errorf("Expected success for %s, got %v", k, errs)
|
||||
}
|
||||
|
||||
// Should be able to update to a valid PSP.
|
||||
v.psp.ResourceVersion = "444" // Required for updates.
|
||||
if errs := ValidatePodSecurityPolicyUpdate(validPSP(), v.psp, PodSecurityPolicyValidationOptions{}); len(errs) != 0 {
|
||||
if errs := ValidatePodSecurityPolicyUpdate(validPSP(), v.psp); len(errs) != 0 {
|
||||
t.Errorf("Expected success for %s update, got %v", k, errs)
|
||||
}
|
||||
}
|
||||
@ -786,7 +786,7 @@ func TestValidatePSPVolumes(t *testing.T) {
|
||||
for _, strVolume := range volumes.List() {
|
||||
psp := validPSP()
|
||||
psp.Spec.Volumes = []policy.FSType{policy.FSType(strVolume)}
|
||||
errs := ValidatePodSecurityPolicy(psp, PodSecurityPolicyValidationOptions{AllowEphemeralVolumeType: true})
|
||||
errs := ValidatePodSecurityPolicy(psp)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("%s validation expected no errors but received %v", strVolume, errs)
|
||||
}
|
||||
@ -1118,34 +1118,25 @@ func TestAllowEphemeralVolumeType(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for _, allowed := range []bool{true, false} {
|
||||
for _, oldPSPInfo := range pspInfo {
|
||||
for _, newPSPInfo := range pspInfo {
|
||||
oldPSP := oldPSPInfo.psp()
|
||||
newPSP := newPSPInfo.psp()
|
||||
if newPSP == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(fmt.Sprintf("feature enabled=%v, old PodSecurityPolicySpec %v, new PodSecurityPolicySpec %v", allowed, oldPSPInfo.description, newPSPInfo.description), func(t *testing.T) {
|
||||
opts := PodSecurityPolicyValidationOptions{
|
||||
AllowEphemeralVolumeType: allowed,
|
||||
}
|
||||
var errs field.ErrorList
|
||||
expectErrors := newPSPInfo.hasGenericVolume && !allowed
|
||||
if oldPSP == nil {
|
||||
errs = ValidatePodSecurityPolicy(newPSP, opts)
|
||||
} else {
|
||||
errs = ValidatePodSecurityPolicyUpdate(oldPSP, newPSP, opts)
|
||||
}
|
||||
if expectErrors && len(errs) == 0 {
|
||||
t.Error("expected errors, got none")
|
||||
}
|
||||
if !expectErrors && len(errs) > 0 {
|
||||
t.Errorf("expected no errors, got: %v", errs)
|
||||
}
|
||||
})
|
||||
for _, oldPSPInfo := range pspInfo {
|
||||
for _, newPSPInfo := range pspInfo {
|
||||
oldPSP := oldPSPInfo.psp()
|
||||
newPSP := newPSPInfo.psp()
|
||||
if newPSP == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(fmt.Sprintf("old PodSecurityPolicySpec %v, new PodSecurityPolicySpec %v", oldPSPInfo.description, newPSPInfo.description), func(t *testing.T) {
|
||||
var errs field.ErrorList
|
||||
if oldPSP == nil {
|
||||
errs = ValidatePodSecurityPolicy(newPSP)
|
||||
} else {
|
||||
errs = ValidatePodSecurityPolicyUpdate(oldPSP, newPSP)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
t.Errorf("expected no errors, got: %v", errs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -47,10 +47,9 @@ func CreateVolumeSpec(podVolume v1.Volume, pod *v1.Pod, nodeName types.NodeName,
|
||||
claimName = pvcSource.ClaimName
|
||||
readOnly = pvcSource.ReadOnly
|
||||
}
|
||||
isEphemeral := false
|
||||
if ephemeralSource := podVolume.VolumeSource.Ephemeral; ephemeralSource != nil && utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
isEphemeral := podVolume.VolumeSource.Ephemeral != nil
|
||||
if isEphemeral {
|
||||
claimName = ephemeral.VolumeClaimName(pod, &podVolume)
|
||||
isEphemeral = true
|
||||
}
|
||||
if claimName != "" {
|
||||
klog.V(10).Infof(
|
||||
|
@ -20,9 +20,8 @@ import (
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/component-helpers/storage/ephemeral"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -31,9 +30,9 @@ const (
|
||||
)
|
||||
|
||||
// PodPVCIndexFunc creates an index function that returns PVC keys (=
|
||||
// namespace/name) for given pod. If enabled, this includes the PVCs
|
||||
// namespace/name) for given pod. This includes the PVCs
|
||||
// that might be created for generic ephemeral volumes.
|
||||
func PodPVCIndexFunc(genericEphemeralVolumeFeatureEnabled bool) func(obj interface{}) ([]string, error) {
|
||||
func PodPVCIndexFunc() func(obj interface{}) ([]string, error) {
|
||||
return func(obj interface{}) ([]string, error) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
@ -44,9 +43,8 @@ func PodPVCIndexFunc(genericEphemeralVolumeFeatureEnabled bool) func(obj interfa
|
||||
claimName := ""
|
||||
if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
|
||||
claimName = pvcSource.ClaimName
|
||||
}
|
||||
if ephemeralSource := podVolume.VolumeSource.Ephemeral; genericEphemeralVolumeFeatureEnabled && ephemeralSource != nil {
|
||||
claimName = pod.Name + "-" + podVolume.Name
|
||||
} else if podVolume.VolumeSource.Ephemeral != nil {
|
||||
claimName = ephemeral.VolumeClaimName(pod, &podVolume)
|
||||
}
|
||||
if claimName != "" {
|
||||
keys = append(keys, fmt.Sprintf("%s/%s", pod.Namespace, claimName))
|
||||
@ -56,10 +54,9 @@ func PodPVCIndexFunc(genericEphemeralVolumeFeatureEnabled bool) func(obj interfa
|
||||
}
|
||||
}
|
||||
|
||||
// AddPodPVCIndexerIfNotPresent adds the PodPVCIndexFunc with the current global setting for GenericEphemeralVolume.
|
||||
// AddPodPVCIndexerIfNotPresent adds the PodPVCIndexFunc.
|
||||
func AddPodPVCIndexerIfNotPresent(indexer cache.Indexer) error {
|
||||
return AddIndexerIfNotPresent(indexer, PodPVCIndex,
|
||||
PodPVCIndexFunc(utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume)))
|
||||
return AddIndexerIfNotPresent(indexer, PodPVCIndex, PodPVCIndexFunc())
|
||||
}
|
||||
|
||||
// AddIndexerIfNotPresent adds the index function with the name into the cache indexer if not present
|
||||
|
@ -56,18 +56,14 @@ type Controller struct {
|
||||
|
||||
// allows overriding of StorageObjectInUseProtection feature Enabled/Disabled for testing
|
||||
storageObjectInUseProtectionEnabled bool
|
||||
|
||||
// allows overriding of GenericEphemeralVolume feature Enabled/Disabled for testing
|
||||
genericEphemeralVolumeFeatureEnabled bool
|
||||
}
|
||||
|
||||
// NewPVCProtectionController returns a new instance of PVCProtectionController.
|
||||
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled, genericEphemeralVolumeFeatureEnabled bool) (*Controller, error) {
|
||||
func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimInformer, podInformer coreinformers.PodInformer, cl clientset.Interface, storageObjectInUseProtectionFeatureEnabled bool) (*Controller, error) {
|
||||
e := &Controller{
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
||||
storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled,
|
||||
genericEphemeralVolumeFeatureEnabled: genericEphemeralVolumeFeatureEnabled,
|
||||
client: cl,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "pvcprotection"),
|
||||
storageObjectInUseProtectionEnabled: storageObjectInUseProtectionFeatureEnabled,
|
||||
}
|
||||
if cl != nil && cl.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("persistentvolumeclaim_protection_controller", cl.CoreV1().RESTClient().GetRateLimiter())
|
||||
@ -85,7 +81,7 @@ func NewPVCProtectionController(pvcInformer coreinformers.PersistentVolumeClaimI
|
||||
e.podLister = podInformer.Lister()
|
||||
e.podListerSynced = podInformer.Informer().HasSynced
|
||||
e.podIndexer = podInformer.Informer().GetIndexer()
|
||||
if err := common.AddIndexerIfNotPresent(e.podIndexer, common.PodPVCIndex, common.PodPVCIndexFunc(genericEphemeralVolumeFeatureEnabled)); err != nil {
|
||||
if err := common.AddIndexerIfNotPresent(e.podIndexer, common.PodPVCIndex, common.PodPVCIndexFunc()); err != nil {
|
||||
return nil, fmt.Errorf("could not initialize pvc protection controller: %w", err)
|
||||
}
|
||||
podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@ -252,23 +248,12 @@ func (c *Controller) askInformer(pvc *v1.PersistentVolumeClaim) (bool, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if c.genericEphemeralVolumeFeatureEnabled {
|
||||
// We still need to look at each volume: that's redundant for volume.PersistentVolumeClaim,
|
||||
// but for volume.Ephemeral we need to be sure that this particular PVC is the one
|
||||
// created for the ephemeral volume.
|
||||
if c.podUsesPVC(pod, pvc) {
|
||||
return true, nil
|
||||
}
|
||||
continue
|
||||
|
||||
// We still need to look at each volume: that's redundant for volume.PersistentVolumeClaim,
|
||||
// but for volume.Ephemeral we need to be sure that this particular PVC is the one
|
||||
// created for the ephemeral volume.
|
||||
if c.podUsesPVC(pod, pvc) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// This is the traditional behavior without GenericEphemeralVolume enabled.
|
||||
if pod.Spec.NodeName == "" {
|
||||
continue
|
||||
}
|
||||
// found a pod using this PVC
|
||||
return true, nil
|
||||
}
|
||||
|
||||
klog.V(4).InfoS("No Pod using PVC was found in the Informer's cache", "PVC", klog.KObj(pvc))
|
||||
@ -300,7 +285,7 @@ func (c *Controller) podUsesPVC(pod *v1.Pod, pvc *v1.PersistentVolumeClaim) bool
|
||||
if pod.Spec.NodeName != "" {
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
if volume.PersistentVolumeClaim != nil && volume.PersistentVolumeClaim.ClaimName == pvc.Name ||
|
||||
c.genericEphemeralVolumeFeatureEnabled && !podIsShutDown(pod) && volume.Ephemeral != nil && ephemeral.VolumeClaimName(pod, &volume) == pvc.Name && ephemeral.VolumeIsForPod(pod, pvc) == nil {
|
||||
!podIsShutDown(pod) && volume.Ephemeral != nil && ephemeral.VolumeClaimName(pod, &volume) == pvc.Name && ephemeral.VolumeIsForPod(pod, pvc) == nil {
|
||||
klog.V(2).InfoS("Pod uses PVC", "pod", klog.KObj(pod), "PVC", klog.KObj(pvc))
|
||||
return true
|
||||
}
|
||||
@ -407,7 +392,7 @@ func (c *Controller) enqueuePVCs(pod *v1.Pod, deleted bool) {
|
||||
switch {
|
||||
case volume.PersistentVolumeClaim != nil:
|
||||
c.queue.Add(pod.Namespace + "/" + volume.PersistentVolumeClaim.ClaimName)
|
||||
case c.genericEphemeralVolumeFeatureEnabled && volume.Ephemeral != nil:
|
||||
case volume.Ephemeral != nil:
|
||||
c.queue.Add(pod.Namespace + "/" + ephemeral.VolumeClaimName(pod, &volume))
|
||||
}
|
||||
}
|
||||
|
@ -146,7 +146,7 @@ func generateUpdateErrorFunc(t *testing.T, failures int) clienttesting.ReactionF
|
||||
}
|
||||
}
|
||||
|
||||
func testPVCProtectionController(t *testing.T, genericEphemeralVolumeFeatureEnabled bool) {
|
||||
func TestPVCProtectionController(t *testing.T) {
|
||||
pvcGVR := schema.GroupVersionResource{
|
||||
Group: v1.GroupName,
|
||||
Version: "v1",
|
||||
@ -430,7 +430,7 @@ func testPVCProtectionController(t *testing.T, genericEphemeralVolumeFeatureEnab
|
||||
podInformer := informers.Core().V1().Pods()
|
||||
|
||||
// Create the controller
|
||||
ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client, test.storageObjectInUseProtectionEnabled, genericEphemeralVolumeFeatureEnabled)
|
||||
ctrl, err := NewPVCProtectionController(pvcInformer, podInformer, client, test.storageObjectInUseProtectionEnabled)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
@ -518,8 +518,3 @@ func testPVCProtectionController(t *testing.T, genericEphemeralVolumeFeatureEnab
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestPVCProtectionController(t *testing.T) {
|
||||
t.Run("with-GenericEphemeralVolume", func(t *testing.T) { testPVCProtectionController(t, true) })
|
||||
t.Run("without-GenericEphemeralVolume", func(t *testing.T) { testPVCProtectionController(t, false) })
|
||||
}
|
||||
|
@ -171,6 +171,7 @@ const (
|
||||
// owner: @pohly
|
||||
// alpha: v1.19
|
||||
// beta: v1.21
|
||||
// GA: v1.23
|
||||
//
|
||||
// Enables generic ephemeral inline volume support for pods
|
||||
GenericEphemeralVolume featuregate.Feature = "GenericEphemeralVolume"
|
||||
@ -826,7 +827,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
CSIInlineVolume: {Default: true, PreRelease: featuregate.Beta},
|
||||
CSIStorageCapacity: {Default: true, PreRelease: featuregate.Beta},
|
||||
CSIServiceAccountToken: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||
GenericEphemeralVolume: {Default: true, PreRelease: featuregate.Beta},
|
||||
GenericEphemeralVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
CSIVolumeFSGroupPolicy: {Default: true, PreRelease: featuregate.Beta},
|
||||
VolumeSubpath: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.25
|
||||
RuntimeClass: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||
|
@ -2038,9 +2038,6 @@ func (kl *Kubelet) hasHostMountPVC(pod *v1.Pod) bool {
|
||||
case volume.PersistentVolumeClaim != nil:
|
||||
pvcName = volume.PersistentVolumeClaim.ClaimName
|
||||
case volume.Ephemeral != nil:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
continue
|
||||
}
|
||||
pvcName = ephemeral.VolumeClaimName(pod, &volume)
|
||||
default:
|
||||
continue
|
||||
|
@ -38,10 +38,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/record"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
netutils "k8s.io/utils/net"
|
||||
|
||||
// TODO: remove this import if
|
||||
@ -49,7 +47,6 @@ import (
|
||||
// to "v1"?
|
||||
|
||||
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward"
|
||||
@ -2979,13 +2976,12 @@ func TestGetPortForward(t *testing.T) {
|
||||
|
||||
func TestHasHostMountPVC(t *testing.T) {
|
||||
type testcase struct {
|
||||
pvError error
|
||||
pvcError error
|
||||
expected bool
|
||||
podHasPVC bool
|
||||
pvcIsHostPath bool
|
||||
podHasEphemeral bool
|
||||
ephemeralEnabled bool
|
||||
pvError error
|
||||
pvcError error
|
||||
expected bool
|
||||
podHasPVC bool
|
||||
pvcIsHostPath bool
|
||||
podHasEphemeral bool
|
||||
}
|
||||
tests := map[string]testcase{
|
||||
"no pvc": {podHasPVC: false, expected: false},
|
||||
@ -3005,16 +3001,9 @@ func TestHasHostMountPVC(t *testing.T) {
|
||||
expected: true,
|
||||
},
|
||||
"enabled ephemeral host path": {
|
||||
podHasEphemeral: true,
|
||||
pvcIsHostPath: true,
|
||||
ephemeralEnabled: true,
|
||||
expected: true,
|
||||
},
|
||||
"disabled ephemeral host path": {
|
||||
podHasEphemeral: true,
|
||||
pvcIsHostPath: true,
|
||||
ephemeralEnabled: false,
|
||||
expected: false,
|
||||
podHasEphemeral: true,
|
||||
pvcIsHostPath: true,
|
||||
expected: true,
|
||||
},
|
||||
"non host path pvc": {
|
||||
podHasPVC: true,
|
||||
@ -3024,7 +3013,6 @@ func TestHasHostMountPVC(t *testing.T) {
|
||||
}
|
||||
|
||||
run := func(t *testing.T, v testcase) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, v.ephemeralEnabled)()
|
||||
testKubelet := newTestKubelet(t, false)
|
||||
defer testKubelet.Cleanup()
|
||||
pod := &v1.Pod{
|
||||
|
@ -149,8 +149,7 @@ func (s *volumeStatCalculator) calcAndStoreStats() {
|
||||
Name: pvcSource.ClaimName,
|
||||
Namespace: s.pod.GetNamespace(),
|
||||
}
|
||||
}
|
||||
if volSpec.Ephemeral != nil && utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
} else if volSpec.Ephemeral != nil {
|
||||
pvcRef = &stats.PVCReference{
|
||||
Name: ephemeral.VolumeClaimName(s.pod, &volSpec),
|
||||
Namespace: s.pod.GetNamespace(),
|
||||
|
@ -105,7 +105,6 @@ var (
|
||||
)
|
||||
|
||||
func TestPVCRef(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, true)()
|
||||
mockCtrl := gomock.NewController(t)
|
||||
defer mockCtrl.Finish()
|
||||
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/component-helpers/storage/ephemeral"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/config"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
@ -462,28 +463,15 @@ func (dswp *desiredStateOfWorldPopulator) deleteProcessedPod(
|
||||
func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
podVolume v1.Volume, pod *v1.Pod, mounts, devices sets.String) (*v1.PersistentVolumeClaim, *volume.Spec, string, error) {
|
||||
pvcSource := podVolume.VolumeSource.PersistentVolumeClaim
|
||||
ephemeral := false
|
||||
if pvcSource == nil &&
|
||||
podVolume.VolumeSource.Ephemeral != nil {
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
// Provide an unambiguous error message that
|
||||
// explains why the volume cannot be
|
||||
// processed. If we just ignore the volume
|
||||
// source, the error is just a vague "unknown
|
||||
// volume source".
|
||||
return nil, nil, "", fmt.Errorf(
|
||||
"volume %s is a generic ephemeral volume, but that feature is disabled in kubelet",
|
||||
podVolume.Name,
|
||||
)
|
||||
}
|
||||
isEphemeral := pvcSource == nil && podVolume.VolumeSource.Ephemeral != nil
|
||||
if isEphemeral {
|
||||
// Generic ephemeral inline volumes are handled the
|
||||
// same way as a PVC reference. The only additional
|
||||
// constraint (checked below) is that the PVC must be
|
||||
// owned by the pod.
|
||||
pvcSource = &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pod.Name + "-" + podVolume.Name,
|
||||
ClaimName: ephemeral.VolumeClaimName(pod, &podVolume),
|
||||
}
|
||||
ephemeral = true
|
||||
}
|
||||
if pvcSource != nil {
|
||||
klog.V(5).InfoS("Found PVC", "PVC", klog.KRef(pod.Namespace, pvcSource.ClaimName))
|
||||
@ -497,12 +485,10 @@ func (dswp *desiredStateOfWorldPopulator) createVolumeSpec(
|
||||
pvcSource.ClaimName,
|
||||
err)
|
||||
}
|
||||
if ephemeral && !metav1.IsControlledBy(pvc, pod) {
|
||||
return nil, nil, "", fmt.Errorf(
|
||||
"error processing PVC %s/%s: not the ephemeral PVC for the pod",
|
||||
pod.Namespace,
|
||||
pvcSource.ClaimName,
|
||||
)
|
||||
if isEphemeral {
|
||||
if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil {
|
||||
return nil, nil, "", err
|
||||
}
|
||||
}
|
||||
pvName, pvcUID := pvc.Spec.VolumeName, pvc.UID
|
||||
klog.V(5).InfoS("Found bound PV for PVC", "PVC", klog.KRef(pod.Namespace, pvcSource.ClaimName), "PVCUID", pvcUID, "PVName", pvName)
|
||||
|
@ -546,8 +546,6 @@ func TestFindAndRemoveNonattachableVolumes(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEphemeralVolumeOwnerCheck(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, true)()
|
||||
|
||||
// create dswp
|
||||
pod, pv, pvc := createEphemeralVolumeObjects("dswp-test-pod", "dswp-test-volume-name", false /* not owned */)
|
||||
dswp, fakePodManager, _, _, _ := createDswpWithVolume(t, pv, pvc)
|
||||
@ -560,109 +558,14 @@ func TestEphemeralVolumeOwnerCheck(t *testing.T) {
|
||||
t.Fatalf("%s should not have been processed by the populator", podName)
|
||||
}
|
||||
require.Equal(t,
|
||||
[]string{fmt.Sprintf("error processing PVC %s/%s: not the ephemeral PVC for the pod", pvc.Namespace, pvc.Name)},
|
||||
[]string{fmt.Sprintf("PVC %s/%s was not created for pod %s/%s (pod is not owner)",
|
||||
pvc.Namespace, pvc.Name,
|
||||
pod.Namespace, pod.Name,
|
||||
)},
|
||||
dswp.desiredStateOfWorld.PopPodErrors(podName),
|
||||
)
|
||||
}
|
||||
|
||||
func TestEphemeralVolumeEnablement(t *testing.T) {
|
||||
// create dswp
|
||||
pod, pv, pvc := createEphemeralVolumeObjects("dswp-test-pod", "dswp-test-volume-name", true /* owned */)
|
||||
dswp, fakePodManager, fakesDSW, _, fakePodState := createDswpWithVolume(t, pv, pvc)
|
||||
fakePodManager.AddPod(pod)
|
||||
|
||||
podName := util.GetUniquePodName(pod)
|
||||
volumeName := pod.Spec.Volumes[0].Name
|
||||
generatedVolumeName := "fake-plugin/" + volumeName
|
||||
|
||||
// Feature disabled -> refuse to process pod.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, false)()
|
||||
dswp.findAndAddNewPods()
|
||||
if dswp.pods.processedPods[podName] {
|
||||
t.Fatalf("%s should not have been processed by the populator", podName)
|
||||
}
|
||||
require.Equal(t,
|
||||
[]string{fmt.Sprintf("volume %s is a generic ephemeral volume, but that feature is disabled in kubelet", volumeName)},
|
||||
dswp.desiredStateOfWorld.PopPodErrors(podName),
|
||||
)
|
||||
|
||||
// Enabled -> process pod.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, true)()
|
||||
dswp.findAndAddNewPods()
|
||||
if !dswp.pods.processedPods[podName] {
|
||||
t.Fatalf("Failed to record that the volumes for the specified pod: %s have been processed by the populator", podName)
|
||||
}
|
||||
|
||||
expectedVolumeName := v1.UniqueVolumeName(generatedVolumeName)
|
||||
|
||||
volumeExists := fakesDSW.VolumeExists(expectedVolumeName)
|
||||
if !volumeExists {
|
||||
t.Fatalf(
|
||||
"VolumeExists(%q) failed. Expected: <true> Actual: <%v>",
|
||||
expectedVolumeName,
|
||||
volumeExists)
|
||||
}
|
||||
|
||||
if podExistsInVolume := fakesDSW.PodExistsInVolume(
|
||||
podName, expectedVolumeName); !podExistsInVolume {
|
||||
t.Fatalf(
|
||||
"DSW PodExistsInVolume returned incorrect value. Expected: <true> Actual: <%v>",
|
||||
podExistsInVolume)
|
||||
}
|
||||
|
||||
verifyVolumeExistsInVolumesToMount(
|
||||
t, v1.UniqueVolumeName(generatedVolumeName), false /* expectReportedInUse */, fakesDSW)
|
||||
|
||||
//let the pod be terminated
|
||||
podGet, exist := fakePodManager.GetPodByName(pod.Namespace, pod.Name)
|
||||
if !exist {
|
||||
t.Fatalf("Failed to get pod by pod name: %s and namespace: %s", pod.Name, pod.Namespace)
|
||||
}
|
||||
fakePodState.removed = map[kubetypes.UID]struct{}{podGet.UID: {}}
|
||||
|
||||
// Pretend again that the feature is disabled.
|
||||
// Removal of the pod and volumes is expected to work.
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, false)()
|
||||
|
||||
dswp.findAndRemoveDeletedPods()
|
||||
// Although Pod status is terminated, pod still exists in pod manager and actual state does not has this pod and volume information
|
||||
// desired state populator will fail to delete this pod and volume first
|
||||
volumeExists = fakesDSW.VolumeExists(expectedVolumeName)
|
||||
if !volumeExists {
|
||||
t.Fatalf(
|
||||
"VolumeExists(%q) failed. Expected: <true> Actual: <%v>",
|
||||
expectedVolumeName,
|
||||
volumeExists)
|
||||
}
|
||||
|
||||
if podExistsInVolume := fakesDSW.PodExistsInVolume(
|
||||
podName, expectedVolumeName); !podExistsInVolume {
|
||||
t.Fatalf(
|
||||
"DSW PodExistsInVolume returned incorrect value. Expected: <true> Actual: <%v>",
|
||||
podExistsInVolume)
|
||||
}
|
||||
|
||||
// reconcile with actual state so that volume is added into the actual state
|
||||
// desired state populator now can successfully delete the pod and volume
|
||||
fakeASW := dswp.actualStateOfWorld
|
||||
reconcileASW(fakeASW, fakesDSW, t)
|
||||
dswp.findAndRemoveDeletedPods()
|
||||
volumeExists = fakesDSW.VolumeExists(expectedVolumeName)
|
||||
if volumeExists {
|
||||
t.Fatalf(
|
||||
"VolumeExists(%q) failed. Expected: <false> Actual: <%v>",
|
||||
expectedVolumeName,
|
||||
volumeExists)
|
||||
}
|
||||
|
||||
if podExistsInVolume := fakesDSW.PodExistsInVolume(
|
||||
podName, expectedVolumeName); podExistsInVolume {
|
||||
t.Fatalf(
|
||||
"DSW PodExistsInVolume returned incorrect value. Expected: <false> Actual: <%v>",
|
||||
podExistsInVolume)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindAndAddNewPods_FindAndRemoveDeletedPods_Valid_Block_VolumeDevices(t *testing.T) {
|
||||
// create dswp
|
||||
mode := v1.PersistentVolumeBlock
|
||||
|
@ -1133,100 +1133,6 @@ func TestApplySeccompVersionSkew(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TestEphemeralVolumeEnablement checks the behavior of the API server
|
||||
// when the GenericEphemeralVolume feature is turned on and then off:
|
||||
// the Ephemeral struct must be preserved even during updates.
|
||||
func TestEphemeralVolumeEnablement(t *testing.T) {
|
||||
// Enable the Feature Gate during the first pod creation
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, true)()
|
||||
|
||||
pod := createPodWithGenericEphemeralVolume()
|
||||
expectedPod := pod.DeepCopy()
|
||||
|
||||
Strategy.PrepareForCreate(context.Background(), pod)
|
||||
require.Equal(t, expectedPod.Spec, pod.Spec, "pod spec")
|
||||
|
||||
errs := Strategy.Validate(context.Background(), pod)
|
||||
require.Empty(t, errs, "errors from validation")
|
||||
|
||||
// Now let's disable the Feature Gate, update some other field from the Pod and expect the volume to remain present
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, false)()
|
||||
updatePod := testUpdatePod(t, pod, "aaa")
|
||||
|
||||
// And let's enable the FG again, add another from and check if the volume is still present
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, true)()
|
||||
testUpdatePod(t, updatePod, "bbb")
|
||||
}
|
||||
|
||||
// TestEphemeralVolumeDisabled checks the behavior of the API server
|
||||
// when the GenericEphemeralVolume is off: the Ephemeral struct gets dropped,
|
||||
// validation fails.
|
||||
func TestEphemeralVolumeDisabled(t *testing.T) {
|
||||
// Disable the Feature Gate during the first pod creation
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, false)()
|
||||
|
||||
pod := createPodWithGenericEphemeralVolume()
|
||||
expectedPod := pod.DeepCopy()
|
||||
expectedPod.Spec.Volumes[0].VolumeSource.Ephemeral = nil
|
||||
|
||||
Strategy.PrepareForCreate(context.Background(), pod)
|
||||
require.Equal(t, expectedPod.Spec, pod.Spec, "pod spec")
|
||||
|
||||
errs := Strategy.Validate(context.Background(), pod)
|
||||
require.NotEmpty(t, errs, "no errors from validation")
|
||||
}
|
||||
|
||||
func testUpdatePod(t *testing.T, oldPod *api.Pod, labelValue string) *api.Pod {
|
||||
updatedPod := oldPod.DeepCopy()
|
||||
updatedPod.Labels = map[string]string{"XYZ": labelValue}
|
||||
expectedPod := updatedPod.DeepCopy()
|
||||
Strategy.PrepareForUpdate(context.Background(), updatedPod, oldPod)
|
||||
require.Equal(t, expectedPod.Spec, updatedPod.Spec, "updated pod spec")
|
||||
errs := Strategy.Validate(context.Background(), updatedPod)
|
||||
require.Empty(t, errs, "errors from validation")
|
||||
return updatedPod
|
||||
}
|
||||
|
||||
func createPodWithGenericEphemeralVolume() *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "ns",
|
||||
Name: "pod",
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
Containers: []api.Container{{
|
||||
Name: "foo",
|
||||
Image: "example",
|
||||
TerminationMessagePolicy: api.TerminationMessageReadFile,
|
||||
ImagePullPolicy: api.PullAlways,
|
||||
}},
|
||||
Volumes: []api.Volume{
|
||||
{
|
||||
Name: "ephemeral",
|
||||
VolumeSource: api.VolumeSource{
|
||||
Ephemeral: &api.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &api.PersistentVolumeClaimTemplate{
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{
|
||||
api.ReadWriteOnce,
|
||||
},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceStorage: resource.MustParse("1Gi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPodtWithHugePageValue(reousreceName api.ResourceName, value resource.Quantity) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
@ -23,12 +23,10 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
psputil "k8s.io/kubernetes/pkg/api/podsecuritypolicy"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/apis/policy/validation"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// strategy implements behavior for PodSecurityPolicy objects
|
||||
@ -74,39 +72,17 @@ func (strategy) Canonicalize(obj runtime.Object) {
|
||||
}
|
||||
|
||||
func (strategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
opts := validation.PodSecurityPolicyValidationOptions{
|
||||
// Only allowed if the feature is enabled.
|
||||
AllowEphemeralVolumeType: utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume),
|
||||
}
|
||||
return validation.ValidatePodSecurityPolicy(obj.(*policy.PodSecurityPolicy), opts)
|
||||
return validation.ValidatePodSecurityPolicy(obj.(*policy.PodSecurityPolicy))
|
||||
}
|
||||
|
||||
// WarningsOnCreate returns warnings for the creation of the given object.
|
||||
func (strategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string { return nil }
|
||||
|
||||
func (strategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
opts := validation.PodSecurityPolicyValidationOptions{
|
||||
// Allowed if the feature is enabled or the old policy already had it.
|
||||
// A policy that had the type set when that was valid must remain valid.
|
||||
AllowEphemeralVolumeType: utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) ||
|
||||
volumeInUse(old.(*policy.PodSecurityPolicy), policy.Ephemeral),
|
||||
}
|
||||
return validation.ValidatePodSecurityPolicyUpdate(old.(*policy.PodSecurityPolicy), obj.(*policy.PodSecurityPolicy), opts)
|
||||
return validation.ValidatePodSecurityPolicyUpdate(old.(*policy.PodSecurityPolicy), obj.(*policy.PodSecurityPolicy))
|
||||
}
|
||||
|
||||
// WarningsOnUpdate returns warnings for the given update.
|
||||
func (strategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func volumeInUse(oldPSP *policy.PodSecurityPolicy, volume policy.FSType) bool {
|
||||
if oldPSP == nil {
|
||||
return false
|
||||
}
|
||||
for _, v := range oldPSP.Spec.Volumes {
|
||||
if v == volume {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
@ -23,10 +23,7 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func TestAllowEphemeralVolumeType(t *testing.T) {
|
||||
@ -83,35 +80,25 @@ func TestAllowEphemeralVolumeType(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for _, enabled := range []bool{true, false} {
|
||||
for _, oldPSPInfo := range pspInfo {
|
||||
for _, newPSPInfo := range pspInfo {
|
||||
oldPSP := oldPSPInfo.psp()
|
||||
newPSP := newPSPInfo.psp()
|
||||
if newPSP == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(fmt.Sprintf("feature enabled=%v, old PodSecurityPolicySpec %v, new PodSecurityPolicySpec %v", enabled, oldPSPInfo.description, newPSPInfo.description), func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, enabled)()
|
||||
|
||||
var errs field.ErrorList
|
||||
var expectErrors bool
|
||||
if oldPSP == nil {
|
||||
errs = Strategy.Validate(context.Background(), newPSP)
|
||||
expectErrors = newPSPInfo.hasGenericVolume && !enabled
|
||||
} else {
|
||||
errs = Strategy.ValidateUpdate(context.Background(), newPSP, oldPSP)
|
||||
expectErrors = !oldPSPInfo.hasGenericVolume && newPSPInfo.hasGenericVolume && !enabled
|
||||
}
|
||||
if expectErrors && len(errs) == 0 {
|
||||
t.Error("expected errors, got none")
|
||||
}
|
||||
if !expectErrors && len(errs) > 0 {
|
||||
t.Errorf("expected no errors, got: %v", errs)
|
||||
}
|
||||
})
|
||||
for _, oldPSPInfo := range pspInfo {
|
||||
for _, newPSPInfo := range pspInfo {
|
||||
oldPSP := oldPSPInfo.psp()
|
||||
newPSP := newPSPInfo.psp()
|
||||
if newPSP == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
t.Run(fmt.Sprintf("old PodSecurityPolicySpec %v, new PodSecurityPolicySpec %v", oldPSPInfo.description, newPSPInfo.description), func(t *testing.T) {
|
||||
var errs field.ErrorList
|
||||
if oldPSP == nil {
|
||||
errs = Strategy.Validate(context.Background(), newPSP)
|
||||
} else {
|
||||
errs = Strategy.ValidateUpdate(context.Background(), newPSP, oldPSP)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
t.Errorf("expected no errors, got: %v", errs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -26,5 +26,4 @@ type Features struct {
|
||||
EnableReadWriteOncePod bool
|
||||
EnableVolumeCapacityPriority bool
|
||||
EnableCSIStorageCapacity bool
|
||||
EnableGenericEphemeralVolume bool
|
||||
}
|
||||
|
@ -22,11 +22,11 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
ephemeral "k8s.io/component-helpers/storage/ephemeral"
|
||||
storagehelpers "k8s.io/component-helpers/storage/volume"
|
||||
csitrans "k8s.io/csi-translation-lib"
|
||||
"k8s.io/klog/v2"
|
||||
@ -56,8 +56,6 @@ type CSILimits struct {
|
||||
randomVolumeIDPrefix string
|
||||
|
||||
translator InTreeToCSITranslator
|
||||
|
||||
enableGenericEphemeralVolume bool
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &CSILimits{}
|
||||
@ -152,23 +150,17 @@ func (pl *CSILimits) filterAttachableVolumes(
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
// CSI volumes can only be used through a PVC.
|
||||
pvcName := ""
|
||||
ephemeral := false
|
||||
isEphemeral := false
|
||||
switch {
|
||||
case vol.PersistentVolumeClaim != nil:
|
||||
pvcName = vol.PersistentVolumeClaim.ClaimName
|
||||
case vol.Ephemeral != nil:
|
||||
if newPod && !pl.enableGenericEphemeralVolume {
|
||||
return fmt.Errorf(
|
||||
"volume %s is a generic ephemeral volume, but that feature is disabled in kube-scheduler",
|
||||
vol.Name,
|
||||
)
|
||||
}
|
||||
// Generic ephemeral inline volumes also use a PVC,
|
||||
// just with a computed name and certain ownership.
|
||||
// That is checked below once the pvc object is
|
||||
// retrieved.
|
||||
pvcName = pod.Name + "-" + vol.Name
|
||||
ephemeral = true
|
||||
pvcName = ephemeral.VolumeClaimName(pod, &vol)
|
||||
isEphemeral = true
|
||||
default:
|
||||
continue
|
||||
}
|
||||
@ -193,8 +185,10 @@ func (pl *CSILimits) filterAttachableVolumes(
|
||||
}
|
||||
|
||||
// The PVC for an ephemeral volume must be owned by the pod.
|
||||
if ephemeral && !metav1.IsControlledBy(pvc, pod) {
|
||||
return fmt.Errorf("PVC %s/%s is not owned by pod", pod.Namespace, pvcName)
|
||||
if isEphemeral {
|
||||
if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
driverName, volumeHandle := pl.getCSIDriverInfo(csiNode, pvc)
|
||||
@ -316,13 +310,12 @@ func NewCSI(_ runtime.Object, handle framework.Handle, fts feature.Features) (fr
|
||||
scLister := informerFactory.Storage().V1().StorageClasses().Lister()
|
||||
|
||||
return &CSILimits{
|
||||
csiNodeLister: csiNodesLister,
|
||||
pvLister: pvLister,
|
||||
pvcLister: pvcLister,
|
||||
scLister: scLister,
|
||||
randomVolumeIDPrefix: rand.String(32),
|
||||
translator: csitrans.New(),
|
||||
enableGenericEphemeralVolume: fts.EnableGenericEphemeralVolume,
|
||||
csiNodeLister: csiNodesLister,
|
||||
pvLister: pvLister,
|
||||
pvcLister: pvcLister,
|
||||
scLister: scLister,
|
||||
randomVolumeIDPrefix: rand.String(32),
|
||||
translator: csitrans.New(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -546,13 +546,6 @@ func TestCSILimits(t *testing.T) {
|
||||
test: "should not count in-tree and count csi volumes if migration is disabled (when scheduling in-tree volumes)",
|
||||
},
|
||||
// ephemeral volumes
|
||||
{
|
||||
newPod: ephemeralVolumePod,
|
||||
filterName: "csi",
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
test: "ephemeral volume feature disabled",
|
||||
wantStatus: framework.NewStatus(framework.Error, "volume xyz is a generic ephemeral volume, but that feature is disabled in kube-scheduler"),
|
||||
},
|
||||
{
|
||||
newPod: ephemeralVolumePod,
|
||||
filterName: "csi",
|
||||
@ -568,7 +561,7 @@ func TestCSILimits(t *testing.T) {
|
||||
extraClaims: []v1.PersistentVolumeClaim{*conflictingClaim},
|
||||
driverNames: []string{ebsCSIDriverName},
|
||||
test: "ephemeral volume not owned",
|
||||
wantStatus: framework.NewStatus(framework.Error, "PVC test/abc-xyz is not owned by pod"),
|
||||
wantStatus: framework.NewStatus(framework.Error, "PVC test/abc-xyz was not created for pod test/abc (pod is not owner)"),
|
||||
},
|
||||
{
|
||||
newPod: ephemeralVolumePod,
|
||||
@ -657,8 +650,6 @@ func TestCSILimits(t *testing.T) {
|
||||
scLister: getFakeCSIStorageClassLister(scName, test.driverNames[0]),
|
||||
randomVolumeIDPrefix: rand.String(32),
|
||||
translator: csitrans.New(),
|
||||
|
||||
enableGenericEphemeralVolume: test.ephemeralEnabled,
|
||||
}
|
||||
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
|
||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||
|
@ -26,13 +26,13 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
"k8s.io/component-helpers/storage/ephemeral"
|
||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
@ -117,8 +117,6 @@ type nonCSILimits struct {
|
||||
// It is used to prefix volumeID generated inside the predicate() method to
|
||||
// avoid conflicts with any real volume.
|
||||
randomVolumeIDPrefix string
|
||||
|
||||
enableGenericEphemeralVolume bool
|
||||
}
|
||||
|
||||
var _ framework.FilterPlugin = &nonCSILimits{}
|
||||
@ -191,8 +189,6 @@ func newNonCSILimits(
|
||||
pvcLister: pvcLister,
|
||||
scLister: scLister,
|
||||
randomVolumeIDPrefix: rand.String(32),
|
||||
|
||||
enableGenericEphemeralVolume: fts.EnableGenericEphemeralVolume,
|
||||
}
|
||||
|
||||
return pl
|
||||
@ -288,23 +284,17 @@ func (pl *nonCSILimits) filterVolumes(pod *v1.Pod, newPod bool, filteredVolumes
|
||||
}
|
||||
|
||||
pvcName := ""
|
||||
ephemeral := false
|
||||
isEphemeral := false
|
||||
switch {
|
||||
case vol.PersistentVolumeClaim != nil:
|
||||
pvcName = vol.PersistentVolumeClaim.ClaimName
|
||||
case vol.Ephemeral != nil:
|
||||
if !pl.enableGenericEphemeralVolume {
|
||||
return fmt.Errorf(
|
||||
"volume %s is a generic ephemeral volume, but that feature is disabled in kube-scheduler",
|
||||
vol.Name,
|
||||
)
|
||||
}
|
||||
// Generic ephemeral inline volumes also use a PVC,
|
||||
// just with a computed name and certain ownership.
|
||||
// That is checked below once the pvc object is
|
||||
// retrieved.
|
||||
pvcName = pod.Name + "-" + vol.Name
|
||||
ephemeral = true
|
||||
pvcName = ephemeral.VolumeClaimName(pod, vol)
|
||||
isEphemeral = true
|
||||
default:
|
||||
continue
|
||||
}
|
||||
@ -332,8 +322,10 @@ func (pl *nonCSILimits) filterVolumes(pod *v1.Pod, newPod bool, filteredVolumes
|
||||
}
|
||||
|
||||
// The PVC for an ephemeral volume must be owned by the pod.
|
||||
if ephemeral && !metav1.IsControlledBy(pvc, pod) {
|
||||
return fmt.Errorf("PVC %s/%s is not owned by pod", pod.Namespace, pvcName)
|
||||
if isEphemeral {
|
||||
if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
pvName := pvc.Spec.VolumeName
|
||||
|
@ -86,11 +86,6 @@ func TestEphemeralLimits(t *testing.T) {
|
||||
test string
|
||||
wantStatus *framework.Status
|
||||
}{
|
||||
{
|
||||
newPod: ephemeralVolumePod,
|
||||
test: "volume feature disabled",
|
||||
wantStatus: framework.NewStatus(framework.Error, "volume xyz is a generic ephemeral volume, but that feature is disabled in kube-scheduler"),
|
||||
},
|
||||
{
|
||||
newPod: ephemeralVolumePod,
|
||||
ephemeralEnabled: true,
|
||||
@ -102,7 +97,7 @@ func TestEphemeralLimits(t *testing.T) {
|
||||
ephemeralEnabled: true,
|
||||
extraClaims: []v1.PersistentVolumeClaim{*conflictingClaim},
|
||||
test: "volume not owned",
|
||||
wantStatus: framework.NewStatus(framework.Error, "PVC test/abc-xyz is not owned by pod"),
|
||||
wantStatus: framework.NewStatus(framework.Error, "PVC test/abc-xyz was not created for pod test/abc (pod is not owner)"),
|
||||
},
|
||||
{
|
||||
newPod: ephemeralVolumePod,
|
||||
@ -123,9 +118,7 @@ func TestEphemeralLimits(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.test, func(t *testing.T) {
|
||||
fts := feature.Features{
|
||||
EnableGenericEphemeralVolume: test.ephemeralEnabled,
|
||||
}
|
||||
fts := feature.Features{}
|
||||
node, csiNode := getNodeWithPodAndVolumeLimits("node", test.existingPods, int64(test.maxVols), filterName)
|
||||
p := newNonCSILimits(filterName, getFakeCSINodeLister(csiNode), getFakeCSIStorageClassLister(filterName, driverName), getFakePVLister(filterName), append(getFakePVCLister(filterName), test.extraClaims...), fts).(framework.FilterPlugin)
|
||||
gotStatus := p.Filter(context.Background(), nil, test.newPod, node)
|
||||
|
@ -51,7 +51,6 @@ func NewInTreeRegistry() runtime.Registry {
|
||||
EnableReadWriteOncePod: feature.DefaultFeatureGate.Enabled(features.ReadWriteOncePod),
|
||||
EnableVolumeCapacityPriority: feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority),
|
||||
EnableCSIStorageCapacity: feature.DefaultFeatureGate.Enabled(features.CSIStorageCapacity),
|
||||
EnableGenericEphemeralVolume: feature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume),
|
||||
}
|
||||
|
||||
return runtime.Registry{
|
||||
|
@ -40,6 +40,7 @@ import (
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
storagelisters "k8s.io/client-go/listers/storage/v1"
|
||||
storagelistersv1beta1 "k8s.io/client-go/listers/storage/v1beta1"
|
||||
"k8s.io/component-helpers/storage/ephemeral"
|
||||
storagehelpers "k8s.io/component-helpers/storage/volume"
|
||||
csitrans "k8s.io/csi-translation-lib"
|
||||
csiplugins "k8s.io/csi-translation-lib/plugins"
|
||||
@ -686,29 +687,25 @@ func (b *volumeBinder) checkBindings(pod *v1.Pod, bindings []*BindingInfo, claim
|
||||
|
||||
func (b *volumeBinder) isVolumeBound(pod *v1.Pod, vol *v1.Volume) (bound bool, pvc *v1.PersistentVolumeClaim, err error) {
|
||||
pvcName := ""
|
||||
ephemeral := false
|
||||
isEphemeral := false
|
||||
switch {
|
||||
case vol.PersistentVolumeClaim != nil:
|
||||
pvcName = vol.PersistentVolumeClaim.ClaimName
|
||||
case vol.Ephemeral != nil:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
return false, nil, fmt.Errorf(
|
||||
"volume %s is a generic ephemeral volume, but that feature is disabled in kube-scheduler",
|
||||
vol.Name,
|
||||
)
|
||||
}
|
||||
// Generic ephemeral inline volumes also use a PVC,
|
||||
// just with a computed name, and...
|
||||
pvcName = pod.Name + "-" + vol.Name
|
||||
ephemeral = true
|
||||
pvcName = ephemeral.VolumeClaimName(pod, vol)
|
||||
isEphemeral = true
|
||||
default:
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
bound, pvc, err = b.isPVCBound(pod.Namespace, pvcName)
|
||||
// ... the PVC must be owned by the pod.
|
||||
if ephemeral && err == nil && pvc != nil && !metav1.IsControlledBy(pvc, pod) {
|
||||
return false, nil, fmt.Errorf("PVC %s/%s is not owned by pod", pod.Namespace, pvcName)
|
||||
if isEphemeral && err == nil && pvc != nil {
|
||||
if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -833,9 +833,6 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
||||
// If nil, makePod with podPVCs
|
||||
pod *v1.Pod
|
||||
|
||||
// GenericEphemeralVolume feature enabled?
|
||||
ephemeral bool
|
||||
|
||||
// Expected podBindingCache fields
|
||||
expectedBindings []*BindingInfo
|
||||
|
||||
@ -942,7 +939,6 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
||||
withNamespace("testns").
|
||||
withNodeName("node1").
|
||||
withGenericEphemeralVolume("no-such-pvc").Pod,
|
||||
ephemeral: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
"generic-ephemeral,with-pvc": {
|
||||
@ -952,7 +948,6 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
||||
withGenericEphemeralVolume("test-volume").Pod,
|
||||
cachePVCs: []*v1.PersistentVolumeClaim{correctGenericPVC},
|
||||
pvs: []*v1.PersistentVolume{pvBoundGeneric},
|
||||
ephemeral: true,
|
||||
},
|
||||
"generic-ephemeral,wrong-pvc": {
|
||||
pod: makePod("test-pod").
|
||||
@ -961,17 +956,6 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
||||
withGenericEphemeralVolume("test-volume").Pod,
|
||||
cachePVCs: []*v1.PersistentVolumeClaim{conflictingGenericPVC},
|
||||
pvs: []*v1.PersistentVolume{pvBoundGeneric},
|
||||
ephemeral: true,
|
||||
shouldFail: true,
|
||||
},
|
||||
"generic-ephemeral,disabled": {
|
||||
pod: makePod("test-pod").
|
||||
withNamespace("testns").
|
||||
withNodeName("node1").
|
||||
withGenericEphemeralVolume("test-volume").Pod,
|
||||
cachePVCs: []*v1.PersistentVolumeClaim{correctGenericPVC},
|
||||
pvs: []*v1.PersistentVolume{pvBoundGeneric},
|
||||
ephemeral: false,
|
||||
shouldFail: true,
|
||||
},
|
||||
}
|
||||
@ -986,8 +970,6 @@ func TestFindPodVolumesWithoutProvisioning(t *testing.T) {
|
||||
}
|
||||
|
||||
run := func(t *testing.T, scenario scenarioType, csiStorageCapacity bool, csiDriver *storagev1.CSIDriver) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, scenario.ephemeral)()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
|
@ -25,9 +25,9 @@ import (
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/component-helpers/storage/ephemeral"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
|
||||
@ -69,11 +69,10 @@ func (d *stateData) Clone() framework.StateData {
|
||||
// In the Filter phase, pod binding cache is created for the pod and used in
|
||||
// Reserve and PreBind phases.
|
||||
type VolumeBinding struct {
|
||||
Binder SchedulerVolumeBinder
|
||||
PVCLister corelisters.PersistentVolumeClaimLister
|
||||
GenericEphemeralVolumeFeatureEnabled bool
|
||||
scorer volumeCapacityScorer
|
||||
fts feature.Features
|
||||
Binder SchedulerVolumeBinder
|
||||
PVCLister corelisters.PersistentVolumeClaimLister
|
||||
scorer volumeCapacityScorer
|
||||
fts feature.Features
|
||||
}
|
||||
|
||||
var _ framework.PreFilterPlugin = &VolumeBinding{}
|
||||
@ -127,13 +126,13 @@ func (pl *VolumeBinding) podHasPVCs(pod *v1.Pod) (bool, error) {
|
||||
hasPVC := false
|
||||
for _, vol := range pod.Spec.Volumes {
|
||||
var pvcName string
|
||||
ephemeral := false
|
||||
isEphemeral := false
|
||||
switch {
|
||||
case vol.PersistentVolumeClaim != nil:
|
||||
pvcName = vol.PersistentVolumeClaim.ClaimName
|
||||
case vol.Ephemeral != nil && pl.GenericEphemeralVolumeFeatureEnabled:
|
||||
pvcName = pod.Name + "-" + vol.Name
|
||||
ephemeral = true
|
||||
case vol.Ephemeral != nil:
|
||||
pvcName = ephemeral.VolumeClaimName(pod, &vol)
|
||||
isEphemeral = true
|
||||
default:
|
||||
// Volume is not using a PVC, ignore
|
||||
continue
|
||||
@ -144,7 +143,7 @@ func (pl *VolumeBinding) podHasPVCs(pod *v1.Pod) (bool, error) {
|
||||
// The error usually has already enough context ("persistentvolumeclaim "myclaim" not found"),
|
||||
// but we can do better for generic ephemeral inline volumes where that situation
|
||||
// is normal directly after creating a pod.
|
||||
if ephemeral && apierrors.IsNotFound(err) {
|
||||
if isEphemeral && apierrors.IsNotFound(err) {
|
||||
err = fmt.Errorf("waiting for ephemeral volume controller to create the persistentvolumeclaim %q", pvcName)
|
||||
}
|
||||
return hasPVC, err
|
||||
@ -158,8 +157,10 @@ func (pl *VolumeBinding) podHasPVCs(pod *v1.Pod) (bool, error) {
|
||||
return hasPVC, fmt.Errorf("persistentvolumeclaim %q is being deleted", pvc.Name)
|
||||
}
|
||||
|
||||
if ephemeral && !metav1.IsControlledBy(pvc, pod) {
|
||||
return hasPVC, fmt.Errorf("persistentvolumeclaim %q was not created for the pod", pvc.Name)
|
||||
if isEphemeral {
|
||||
if err := ephemeral.VolumeIsForPod(pod, pvc); err != nil {
|
||||
return hasPVC, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return hasPVC, nil
|
||||
@ -400,10 +401,9 @@ func New(plArgs runtime.Object, fh framework.Handle, fts feature.Features) (fram
|
||||
scorer = buildScorerFunction(shape)
|
||||
}
|
||||
return &VolumeBinding{
|
||||
Binder: binder,
|
||||
PVCLister: pvcInformer.Lister(),
|
||||
GenericEphemeralVolumeFeatureEnabled: fts.EnableGenericEphemeralVolume,
|
||||
scorer: scorer,
|
||||
fts: fts,
|
||||
Binder: binder,
|
||||
PVCLister: pvcInformer.Lister(),
|
||||
scorer: scorer,
|
||||
fts: fts,
|
||||
}, nil
|
||||
}
|
||||
|
@ -1417,7 +1417,6 @@ func moveContainersToEphemeral(in *api.Pod) *api.Pod {
|
||||
// the FSTypeAll wildcard.
|
||||
func TestValidateAllowedVolumes(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, true)()
|
||||
|
||||
val := reflect.ValueOf(api.VolumeSource{})
|
||||
|
||||
|
@ -630,7 +630,6 @@ func TestAdmitCaps(t *testing.T) {
|
||||
|
||||
func TestAdmitVolumes(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.GenericEphemeralVolume, true)()
|
||||
|
||||
val := reflect.ValueOf(kapi.VolumeSource{})
|
||||
|
||||
|
@ -21,10 +21,9 @@ import (
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-helpers/storage/ephemeral"
|
||||
pvutil "k8s.io/kubernetes/pkg/api/v1/persistentvolume"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/third_party/forked/gonum/graph"
|
||||
"k8s.io/kubernetes/third_party/forked/gonum/graph/simple"
|
||||
)
|
||||
@ -385,8 +384,8 @@ func (g *Graph) AddPod(pod *corev1.Pod) {
|
||||
claimName := ""
|
||||
if v.PersistentVolumeClaim != nil {
|
||||
claimName = v.PersistentVolumeClaim.ClaimName
|
||||
} else if v.Ephemeral != nil && utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
claimName = pod.Name + "-" + v.Name
|
||||
} else if v.Ephemeral != nil {
|
||||
claimName = ephemeral.VolumeClaimName(pod, &v)
|
||||
}
|
||||
if claimName != "" {
|
||||
pvcVertex := g.getOrCreateVertex_locked(pvcVertexType, pod.Namespace, claimName)
|
||||
|
@ -193,17 +193,15 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
|
||||
})
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.GenericEphemeralVolume) {
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "ephemeral-volume-controller"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/finalizers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
}
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "ephemeral-volume-controller"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/finalizers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "create").Groups(legacyGroup).Resources("persistentvolumeclaims").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "generic-garbage-collector"},
|
||||
|
@ -5580,9 +5580,6 @@ message VolumeSource {
|
||||
// A pod can use both types of ephemeral volumes and
|
||||
// persistent volumes at the same time.
|
||||
//
|
||||
// This is a beta feature and only available when the GenericEphemeralVolume
|
||||
// feature gate is enabled.
|
||||
//
|
||||
// +optional
|
||||
optional EphemeralVolumeSource ephemeral = 29;
|
||||
}
|
||||
|
@ -179,9 +179,6 @@ type VolumeSource struct {
|
||||
// A pod can use both types of ephemeral volumes and
|
||||
// persistent volumes at the same time.
|
||||
//
|
||||
// This is a beta feature and only available when the GenericEphemeralVolume
|
||||
// feature gate is enabled.
|
||||
//
|
||||
// +optional
|
||||
Ephemeral *EphemeralVolumeSource `json:"ephemeral,omitempty" protobuf:"bytes,29,opt,name=ephemeral"`
|
||||
}
|
||||
|
@ -2472,7 +2472,7 @@ var map_VolumeSource = map[string]string{
|
||||
"scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.",
|
||||
"storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.",
|
||||
"csi": "CSI (Container Storage Interface) represents ephemeral storage that is handled by certain external CSI drivers (Beta feature).",
|
||||
"ephemeral": "Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.\n\nThis is a beta feature and only available when the GenericEphemeralVolume feature gate is enabled.",
|
||||
"ephemeral": "Ephemeral represents a volume that is handled by a cluster storage driver. The volume's lifecycle is tied to the pod that defines it - it will be created before the pod starts, and deleted when the pod is removed.\n\nUse this if: a) the volume is only needed while the pod runs, b) features of normal volumes like restoring from snapshot or capacity\n tracking are needed,\nc) the storage driver is specified through a storage class, and d) the storage driver supports dynamic volume provisioning through\n a PersistentVolumeClaim (see EphemeralVolumeSource for more\n information on the connection between this volume type\n and PersistentVolumeClaim).\n\nUse PersistentVolumeClaim or one of the vendor-specific APIs for volumes that persist for longer than the lifecycle of an individual pod.\n\nUse CSI for light-weight local ephemeral volumes if the CSI driver is meant to be used that way - see the documentation of the driver for more information.\n\nA pod can use both types of ephemeral volumes and persistent volumes at the same time.",
|
||||
}
|
||||
|
||||
func (VolumeSource) SwaggerDoc() map[string]string {
|
||||
|
@ -123,6 +123,9 @@ func (p *ephemeralTestSuite) DefineTests(driver storageframework.TestDriver, pat
|
||||
eDriver, _ = driver.(storageframework.EphemeralTestDriver)
|
||||
}
|
||||
if pattern.VolType == storageframework.GenericEphemeralVolume {
|
||||
// The GenericEphemeralVolume feature is GA, but
|
||||
// perhaps this test is run against an older Kubernetes
|
||||
// where the feature might be disabled.
|
||||
enabled, err := GenericEphemeralVolumesEnabled(f.ClientSet, f.Timeouts, f.Namespace.Name)
|
||||
framework.ExpectNoError(err, "check GenericEphemeralVolume feature")
|
||||
if !enabled {
|
||||
|
Loading…
Reference in New Issue
Block a user