mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Merge pull request #95739 from huffmanca/relax-csi-fsgroup-validation
Relax validation for CSIVolumeFSGroupPolicy and move to beta
This commit is contained in:
commit
198b2fdb14
@ -393,10 +393,13 @@ func validateCSINodeDriver(driver storage.CSINodeDriver, driverNamesInSpecs sets
|
|||||||
return allErrs
|
return allErrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ValidateCSIDriverName checks that a name is appropriate for a
|
||||||
|
// CSIDriver object.
|
||||||
|
var ValidateCSIDriverName = apimachineryvalidation.NameIsDNSSubdomain
|
||||||
|
|
||||||
// ValidateCSIDriver validates a CSIDriver.
|
// ValidateCSIDriver validates a CSIDriver.
|
||||||
func ValidateCSIDriver(csiDriver *storage.CSIDriver) field.ErrorList {
|
func ValidateCSIDriver(csiDriver *storage.CSIDriver) field.ErrorList {
|
||||||
allErrs := field.ErrorList{}
|
allErrs := apivalidation.ValidateObjectMeta(&csiDriver.ObjectMeta, false, ValidateCSIDriverName, field.NewPath("metadata"))
|
||||||
allErrs = append(allErrs, apivalidation.ValidateCSIDriverName(csiDriver.Name, field.NewPath("name"))...)
|
|
||||||
|
|
||||||
allErrs = append(allErrs, validateCSIDriverSpec(&csiDriver.Spec, field.NewPath("spec"))...)
|
allErrs = append(allErrs, validateCSIDriverSpec(&csiDriver.Spec, field.NewPath("spec"))...)
|
||||||
return allErrs
|
return allErrs
|
||||||
@ -404,7 +407,7 @@ func ValidateCSIDriver(csiDriver *storage.CSIDriver) field.ErrorList {
|
|||||||
|
|
||||||
// ValidateCSIDriverUpdate validates a CSIDriver.
|
// ValidateCSIDriverUpdate validates a CSIDriver.
|
||||||
func ValidateCSIDriverUpdate(new, old *storage.CSIDriver) field.ErrorList {
|
func ValidateCSIDriverUpdate(new, old *storage.CSIDriver) field.ErrorList {
|
||||||
allErrs := ValidateCSIDriver(new)
|
allErrs := apivalidation.ValidateObjectMetaUpdate(&new.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))
|
||||||
|
|
||||||
// Spec is read-only
|
// Spec is read-only
|
||||||
// If this ever relaxes in the future, make sure to increment the Generation number in PrepareForUpdate
|
// If this ever relaxes in the future, make sure to increment the Generation number in PrepareForUpdate
|
||||||
|
@ -1870,8 +1870,11 @@ func TestCSIDriverValidationUpdate(t *testing.T) {
|
|||||||
podInfoOnMount := true
|
podInfoOnMount := true
|
||||||
notPodInfoOnMount := false
|
notPodInfoOnMount := false
|
||||||
notRequiresRepublish := false
|
notRequiresRepublish := false
|
||||||
|
resourceVersion := "1"
|
||||||
|
invalidFSGroupPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy
|
||||||
|
invalidFSGroupPolicy = "invalid-mode"
|
||||||
old := storage.CSIDriver{
|
old := storage.CSIDriver{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: driverName},
|
ObjectMeta: metav1.ObjectMeta{Name: driverName, ResourceVersion: resourceVersion},
|
||||||
Spec: storage.CSIDriverSpec{
|
Spec: storage.CSIDriverSpec{
|
||||||
AttachRequired: &attachNotRequired,
|
AttachRequired: &attachNotRequired,
|
||||||
PodInfoOnMount: ¬PodInfoOnMount,
|
PodInfoOnMount: ¬PodInfoOnMount,
|
||||||
@ -1883,11 +1886,27 @@ func TestCSIDriverValidationUpdate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
// Currently there is only one success case: exactly the same
|
// Currently we compare the object against itself
|
||||||
// as the existing object.
|
// and ensure updates succeed
|
||||||
successCases := []storage.CSIDriver{old}
|
successCases := []storage.CSIDriver{
|
||||||
|
old,
|
||||||
|
// An invalid FSGroupPolicy should still pass
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: driverName, ResourceVersion: resourceVersion},
|
||||||
|
Spec: storage.CSIDriverSpec{
|
||||||
|
AttachRequired: &attachNotRequired,
|
||||||
|
PodInfoOnMount: ¬PodInfoOnMount,
|
||||||
|
VolumeLifecycleModes: []storage.VolumeLifecycleMode{
|
||||||
|
storage.VolumeLifecycleEphemeral,
|
||||||
|
storage.VolumeLifecyclePersistent,
|
||||||
|
},
|
||||||
|
FSGroupPolicy: &invalidFSGroupPolicy,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
for _, csiDriver := range successCases {
|
for _, csiDriver := range successCases {
|
||||||
if errs := ValidateCSIDriverUpdate(&csiDriver, &old); len(errs) != 0 {
|
newDriver := csiDriver.DeepCopy()
|
||||||
|
if errs := ValidateCSIDriverUpdate(&csiDriver, newDriver); len(errs) != 0 {
|
||||||
t.Errorf("expected success for %+v: %v", csiDriver, errs)
|
t.Errorf("expected success for %+v: %v", csiDriver, errs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1963,6 +1982,21 @@ func TestCSIDriverValidationUpdate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "FSGroupPolicy invalidated",
|
||||||
|
modify: func(new *storage.CSIDriver) {
|
||||||
|
invalidFSGroupPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy
|
||||||
|
invalidFSGroupPolicy = "invalid"
|
||||||
|
new.Spec.FSGroupPolicy = &invalidFSGroupPolicy
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "FSGroupPolicy changed",
|
||||||
|
modify: func(new *storage.CSIDriver) {
|
||||||
|
fileFSGroupPolicy := storage.FileFSGroupPolicy
|
||||||
|
new.Spec.FSGroupPolicy = &fileFSGroupPolicy
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range errorCases {
|
for _, test := range errorCases {
|
||||||
|
@ -432,6 +432,7 @@ const (
|
|||||||
|
|
||||||
// owner: @huffmanca
|
// owner: @huffmanca
|
||||||
// alpha: v1.19
|
// alpha: v1.19
|
||||||
|
// beta: v1.20
|
||||||
//
|
//
|
||||||
// Determines if a CSI Driver supports applying fsGroup.
|
// Determines if a CSI Driver supports applying fsGroup.
|
||||||
CSIVolumeFSGroupPolicy featuregate.Feature = "CSIVolumeFSGroupPolicy"
|
CSIVolumeFSGroupPolicy featuregate.Feature = "CSIVolumeFSGroupPolicy"
|
||||||
@ -770,7 +771,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
|||||||
CSIStorageCapacity: {Default: false, PreRelease: featuregate.Alpha},
|
CSIStorageCapacity: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
CSIServiceAccountToken: {Default: false, PreRelease: featuregate.Alpha},
|
CSIServiceAccountToken: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
GenericEphemeralVolume: {Default: false, PreRelease: featuregate.Alpha},
|
GenericEphemeralVolume: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
CSIVolumeFSGroupPolicy: {Default: false, PreRelease: featuregate.Alpha},
|
CSIVolumeFSGroupPolicy: {Default: true, PreRelease: featuregate.Beta},
|
||||||
RuntimeClass: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
RuntimeClass: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.23
|
||||||
NodeLease: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
NodeLease: {Default: true, PreRelease: featuregate.GA, LockToDefault: true},
|
||||||
SCTPSupport: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22
|
SCTPSupport: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.22
|
||||||
|
@ -113,8 +113,7 @@ func (csiDriverStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.
|
|||||||
func (csiDriverStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
func (csiDriverStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||||
newCSIDriverObj := obj.(*storage.CSIDriver)
|
newCSIDriverObj := obj.(*storage.CSIDriver)
|
||||||
oldCSIDriverObj := old.(*storage.CSIDriver)
|
oldCSIDriverObj := old.(*storage.CSIDriver)
|
||||||
errorList := validation.ValidateCSIDriver(newCSIDriverObj)
|
return validation.ValidateCSIDriverUpdate(newCSIDriverObj, oldCSIDriverObj)
|
||||||
return append(errorList, validation.ValidateCSIDriverUpdate(newCSIDriverObj, oldCSIDriverObj)...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (csiDriverStrategy) AllowUnconditionalUpdate() bool {
|
func (csiDriverStrategy) AllowUnconditionalUpdate() bool {
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
api "k8s.io/api/core/v1"
|
api "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
storage "k8s.io/api/storage/v1"
|
storage "k8s.io/api/storage/v1"
|
||||||
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
@ -47,17 +48,19 @@ import (
|
|||||||
// based on operations from the volume manager/reconciler/operation executor
|
// based on operations from the volume manager/reconciler/operation executor
|
||||||
func TestCSI_VolumeAll(t *testing.T) {
|
func TestCSI_VolumeAll(t *testing.T) {
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, true)()
|
||||||
|
defaultFSGroupPolicy := storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
specName string
|
specName string
|
||||||
driver string
|
driver string
|
||||||
volName string
|
volName string
|
||||||
specFunc func(specName, driver, volName string) *volume.Spec
|
specFunc func(specName, driver, volName string) *volume.Spec
|
||||||
podFunc func() *api.Pod
|
podFunc func() *api.Pod
|
||||||
isInline bool
|
isInline bool
|
||||||
shouldFail bool
|
shouldFail bool
|
||||||
driverSpec *storage.CSIDriverSpec
|
disableFSGroupPolicyFeatureGate bool
|
||||||
|
driverSpec *storage.CSIDriverSpec
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "PersistentVolume",
|
name: "PersistentVolume",
|
||||||
@ -87,6 +90,26 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||||||
driverSpec: &storage.CSIDriverSpec{
|
driverSpec: &storage.CSIDriverSpec{
|
||||||
// Required for the driver to be accepted for the persistent volume.
|
// Required for the driver to be accepted for the persistent volume.
|
||||||
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent},
|
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent},
|
||||||
|
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "PersistentVolume with driver info and FSGroup disabled",
|
||||||
|
specName: "pv2",
|
||||||
|
driver: "simple-driver",
|
||||||
|
volName: "vol2",
|
||||||
|
specFunc: func(specName, driver, volName string) *volume.Spec {
|
||||||
|
return volume.NewSpecFromPersistentVolume(makeTestPV(specName, 20, driver, volName), false)
|
||||||
|
},
|
||||||
|
podFunc: func() *api.Pod {
|
||||||
|
podUID := types.UID(fmt.Sprintf("%08X", rand.Uint64()))
|
||||||
|
return &api.Pod{ObjectMeta: meta.ObjectMeta{UID: podUID, Namespace: testns}}
|
||||||
|
},
|
||||||
|
disableFSGroupPolicyFeatureGate: true,
|
||||||
|
driverSpec: &storage.CSIDriverSpec{
|
||||||
|
// Required for the driver to be accepted for the persistent volume.
|
||||||
|
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent},
|
||||||
|
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -104,6 +127,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||||||
driverSpec: &storage.CSIDriverSpec{
|
driverSpec: &storage.CSIDriverSpec{
|
||||||
// This will cause the volume to be rejected.
|
// This will cause the volume to be rejected.
|
||||||
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecycleEphemeral},
|
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecycleEphemeral},
|
||||||
|
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||||
},
|
},
|
||||||
shouldFail: true,
|
shouldFail: true,
|
||||||
},
|
},
|
||||||
@ -122,6 +146,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||||||
driverSpec: &storage.CSIDriverSpec{
|
driverSpec: &storage.CSIDriverSpec{
|
||||||
// Required for the driver to be accepted for the inline volume.
|
// Required for the driver to be accepted for the inline volume.
|
||||||
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecycleEphemeral},
|
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecycleEphemeral},
|
||||||
|
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -139,6 +164,7 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||||||
driverSpec: &storage.CSIDriverSpec{
|
driverSpec: &storage.CSIDriverSpec{
|
||||||
// Required for the driver to be accepted for the inline volume.
|
// Required for the driver to be accepted for the inline volume.
|
||||||
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent, storage.VolumeLifecycleEphemeral},
|
VolumeLifecycleModes: []storage.VolumeLifecycleMode{storage.VolumeLifecyclePersistent, storage.VolumeLifecycleEphemeral},
|
||||||
|
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -221,6 +247,8 @@ func TestCSI_VolumeAll(t *testing.T) {
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.CSIInlineVolume, !test.disableFSGroupPolicyFeatureGate)()
|
||||||
|
|
||||||
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
tmpDir, err := utiltesting.MkTmpdir("csi-test")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("can't create temp dir: %v", err)
|
t.Fatalf("can't create temp dir: %v", err)
|
||||||
|
@ -85,6 +85,7 @@ func makeTestVol(name string, driverName string) *api.Volume {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getTestCSIDriver(name string, podInfoMount *bool, attachable *bool, volumeLifecycleModes []storagev1.VolumeLifecycleMode) *storagev1.CSIDriver {
|
func getTestCSIDriver(name string, podInfoMount *bool, attachable *bool, volumeLifecycleModes []storagev1.VolumeLifecycleMode) *storagev1.CSIDriver {
|
||||||
|
defaultFSGroupPolicy := storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy
|
||||||
return &storagev1.CSIDriver{
|
return &storagev1.CSIDriver{
|
||||||
ObjectMeta: meta.ObjectMeta{
|
ObjectMeta: meta.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
@ -93,6 +94,7 @@ func getTestCSIDriver(name string, podInfoMount *bool, attachable *bool, volumeL
|
|||||||
PodInfoOnMount: podInfoMount,
|
PodInfoOnMount: podInfoMount,
|
||||||
AttachRequired: attachable,
|
AttachRequired: attachable,
|
||||||
VolumeLifecycleModes: volumeLifecycleModes,
|
VolumeLifecycleModes: volumeLifecycleModes,
|
||||||
|
FSGroupPolicy: &defaultFSGroupPolicy,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,7 @@ import (
|
|||||||
"encoding/json"
|
"encoding/json"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math/rand"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@ -49,6 +50,7 @@ import (
|
|||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||||
|
e2evolume "k8s.io/kubernetes/test/e2e/framework/volume"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
"k8s.io/kubernetes/test/e2e/storage/drivers"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||||
@ -114,6 +116,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
javascriptHooks map[string]string
|
javascriptHooks map[string]string
|
||||||
tokenRequests []storagev1.TokenRequest
|
tokenRequests []storagev1.TokenRequest
|
||||||
requiresRepublish *bool
|
requiresRepublish *bool
|
||||||
|
fsGroupPolicy *storagev1.FSGroupPolicy
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockDriverSetup struct {
|
type mockDriverSetup struct {
|
||||||
@ -155,6 +158,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
JavascriptHooks: tp.javascriptHooks,
|
JavascriptHooks: tp.javascriptHooks,
|
||||||
TokenRequests: tp.tokenRequests,
|
TokenRequests: tp.tokenRequests,
|
||||||
RequiresRepublish: tp.requiresRepublish,
|
RequiresRepublish: tp.requiresRepublish,
|
||||||
|
FSGroupPolicy: tp.fsGroupPolicy,
|
||||||
}
|
}
|
||||||
|
|
||||||
// this just disable resizing on driver, keeping resizing on SC enabled.
|
// this just disable resizing on driver, keeping resizing on SC enabled.
|
||||||
@ -229,6 +233,39 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
return pod, err
|
return pod, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
createPodWithFSGroup := func(fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||||
|
ginkgo.By("Creating pod with fsGroup")
|
||||||
|
nodeSelection := m.config.ClientNodeSelection
|
||||||
|
var sc *storagev1.StorageClass
|
||||||
|
if dDriver, ok := m.driver.(testsuites.DynamicPVTestDriver); ok {
|
||||||
|
sc = dDriver.GetDynamicProvisionStorageClass(m.config, "")
|
||||||
|
}
|
||||||
|
scTest := testsuites.StorageClassTest{
|
||||||
|
Name: m.driver.GetDriverInfo().Name,
|
||||||
|
Provisioner: sc.Provisioner,
|
||||||
|
Parameters: sc.Parameters,
|
||||||
|
ClaimSize: "1Gi",
|
||||||
|
ExpectedSize: "1Gi",
|
||||||
|
DelayBinding: m.tp.lateBinding,
|
||||||
|
AllowVolumeExpansion: m.tp.enableResizing,
|
||||||
|
}
|
||||||
|
|
||||||
|
class, claim, pod := startBusyBoxPod(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, fsGroup)
|
||||||
|
|
||||||
|
if class != nil {
|
||||||
|
m.sc[class.Name] = class
|
||||||
|
}
|
||||||
|
if claim != nil {
|
||||||
|
m.pvcs = append(m.pvcs, claim)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pod != nil {
|
||||||
|
m.pods = append(m.pods, pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
return class, claim, pod
|
||||||
|
}
|
||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
var errs []error
|
var errs []error
|
||||||
@ -1369,6 +1406,89 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
// These tests *only* work on a cluster which has the CSIVolumeFSGroupPolicy feature enabled.
|
||||||
|
ginkgo.Context("CSI FSGroupPolicy [LinuxOnly]", func() {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
fsGroupPolicy storagev1.FSGroupPolicy
|
||||||
|
modified bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "should modify fsGroup if fsGroupPolicy=default",
|
||||||
|
fsGroupPolicy: storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
|
modified: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should modify fsGroup if fsGroupPolicy=File",
|
||||||
|
fsGroupPolicy: storagev1.FileFSGroupPolicy,
|
||||||
|
modified: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should not modify fsGroup if fsGroupPolicy=None",
|
||||||
|
fsGroupPolicy: storagev1.NoneFSGroupPolicy,
|
||||||
|
modified: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, t := range tests {
|
||||||
|
test := t
|
||||||
|
ginkgo.It(test.name, func() {
|
||||||
|
if framework.NodeOSDistroIs("windows") {
|
||||||
|
e2eskipper.Skipf("FSGroupPolicy is only applied on linux nodes -- skipping")
|
||||||
|
}
|
||||||
|
init(testParameters{
|
||||||
|
disableAttach: true,
|
||||||
|
registerDriver: true,
|
||||||
|
fsGroupPolicy: &test.fsGroupPolicy,
|
||||||
|
})
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
// kube-scheduler may need some time before it gets the CSIDriver object.
|
||||||
|
// Without them, scheduling doesn't run as expected by the test.
|
||||||
|
syncDelay := 5 * time.Second
|
||||||
|
time.Sleep(syncDelay)
|
||||||
|
|
||||||
|
fsGroupVal := int64(rand.Int63n(20000) + 1024)
|
||||||
|
fsGroup := &fsGroupVal
|
||||||
|
|
||||||
|
_, _, pod := createPodWithFSGroup(fsGroup) /* persistent volume */
|
||||||
|
|
||||||
|
mountPath := pod.Spec.Containers[0].VolumeMounts[0].MountPath
|
||||||
|
dirName := mountPath + "/" + f.UniqueName
|
||||||
|
fileName := dirName + "/" + f.UniqueName
|
||||||
|
|
||||||
|
err := e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||||
|
framework.ExpectNoError(err, "failed to start pod")
|
||||||
|
|
||||||
|
// Create the subdirectory to ensure that fsGroup propagates
|
||||||
|
createDirectory := fmt.Sprintf("mkdir %s", dirName)
|
||||||
|
_, _, err = utils.PodExec(f, pod, createDirectory)
|
||||||
|
framework.ExpectNoError(err, "failed: creating the directory: %s", err)
|
||||||
|
|
||||||
|
// Inject the contents onto the mount
|
||||||
|
createFile := fmt.Sprintf("echo '%s' > '%s'; sync", "filecontents", fileName)
|
||||||
|
_, _, err = utils.PodExec(f, pod, createFile)
|
||||||
|
framework.ExpectNoError(err, "failed: writing the contents: %s", err)
|
||||||
|
|
||||||
|
// Delete the created file. This step is mandatory, as the mock driver
|
||||||
|
// won't clean up the contents automatically.
|
||||||
|
defer func() {
|
||||||
|
delete := fmt.Sprintf("rm -fr %s", dirName)
|
||||||
|
_, _, err = utils.PodExec(f, pod, delete)
|
||||||
|
framework.ExpectNoError(err, "failed: deleting the directory: %s", err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Ensure that the fsGroup matches what we expect
|
||||||
|
if test.modified {
|
||||||
|
utils.VerifyFSGroupInPod(f, fileName, strconv.FormatInt(*fsGroup, 10), pod)
|
||||||
|
} else {
|
||||||
|
utils.VerifyFSGroupInPod(f, fileName, "root", pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The created resources will be removed by the cleanup() function,
|
||||||
|
// so need to delete anything here.
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
// A lot of this code was copied from e2e/framework. It would be nicer
|
// A lot of this code was copied from e2e/framework. It would be nicer
|
||||||
@ -1505,7 +1625,7 @@ func checkCSINodeForLimits(nodeName string, driverName string, cs clientset.Inte
|
|||||||
return attachLimit, nil
|
return attachLimit, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
func createClaim(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim) {
|
||||||
class := newStorageClass(t, ns, "")
|
class := newStorageClass(t, ns, "")
|
||||||
if scName != "" {
|
if scName != "" {
|
||||||
class.Name = scName
|
class.Name = scName
|
||||||
@ -1530,9 +1650,21 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e
|
|||||||
_, err = e2epv.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout)
|
_, err = e2epv.WaitForPVClaimBoundPhase(cs, pvcClaims, framework.ClaimProvisionTimeout)
|
||||||
framework.ExpectNoError(err, "Failed waiting for PVC to be bound: %v", err)
|
framework.ExpectNoError(err, "Failed waiting for PVC to be bound: %v", err)
|
||||||
}
|
}
|
||||||
|
return class, claim
|
||||||
|
}
|
||||||
|
|
||||||
|
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||||
|
class, claim := createClaim(cs, t, node, scName, ns)
|
||||||
|
|
||||||
pod, err := startPausePodWithClaim(cs, claim, node, ns)
|
pod, err := startPausePodWithClaim(cs, claim, node, ns)
|
||||||
framework.ExpectNoError(err, "Failed to create pod: %v", err)
|
framework.ExpectNoError(err, "Failed to create pause pod: %v", err)
|
||||||
|
return class, claim, pod
|
||||||
|
}
|
||||||
|
|
||||||
|
func startBusyBoxPod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||||
|
class, claim := createClaim(cs, t, node, scName, ns)
|
||||||
|
pod, err := startBusyBoxPodWithClaim(cs, claim, node, ns, fsGroup)
|
||||||
|
framework.ExpectNoError(err, "Failed to create busybox pod: %v", err)
|
||||||
return class, claim, pod
|
return class, claim, pod
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1557,6 +1689,17 @@ func startPausePodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClai
|
|||||||
node, ns)
|
node, ns)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func startBusyBoxPodWithClaim(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, fsGroup *int64) (*v1.Pod, error) {
|
||||||
|
return startBusyBoxPodWithVolumeSource(cs,
|
||||||
|
v1.VolumeSource{
|
||||||
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||||
|
ClaimName: pvc.Name,
|
||||||
|
ReadOnly: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
node, ns, fsGroup)
|
||||||
|
}
|
||||||
|
|
||||||
func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
|
func startPausePodWithInlineVolume(cs clientset.Interface, inlineVolume *v1.CSIVolumeSource, node e2epod.NodeSelection, ns string) (*v1.Pod, error) {
|
||||||
return startPausePodWithVolumeSource(cs,
|
return startPausePodWithVolumeSource(cs,
|
||||||
v1.VolumeSource{
|
v1.VolumeSource{
|
||||||
@ -1596,6 +1739,41 @@ func startPausePodWithVolumeSource(cs clientset.Interface, volumeSource v1.Volum
|
|||||||
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.VolumeSource, node e2epod.NodeSelection, ns string, fsGroup *int64) (*v1.Pod, error) {
|
||||||
|
pod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
GenerateName: "pvc-volume-tester-",
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "volume-tester",
|
||||||
|
Image: framework.BusyBoxImage,
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "my-volume",
|
||||||
|
MountPath: "/mnt/test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Command: e2evolume.GenerateScriptCmd("while true ; do sleep 2; done"),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
SecurityContext: &v1.PodSecurityContext{
|
||||||
|
FSGroup: fsGroup,
|
||||||
|
},
|
||||||
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
|
Volumes: []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "my-volume",
|
||||||
|
VolumeSource: volumeSource,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
e2epod.SetNodeSelection(&pod.Spec, node)
|
||||||
|
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
// Dummy structure that parses just volume_attributes and error code out of logged CSI call
|
// Dummy structure that parses just volume_attributes and error code out of logged CSI call
|
||||||
type mockCSICall struct {
|
type mockCSICall struct {
|
||||||
json string // full log entry
|
json string // full log entry
|
||||||
|
@ -255,6 +255,7 @@ type mockCSIDriver struct {
|
|||||||
javascriptHooks map[string]string
|
javascriptHooks map[string]string
|
||||||
tokenRequests []storagev1.TokenRequest
|
tokenRequests []storagev1.TokenRequest
|
||||||
requiresRepublish *bool
|
requiresRepublish *bool
|
||||||
|
fsGroupPolicy *storagev1.FSGroupPolicy
|
||||||
}
|
}
|
||||||
|
|
||||||
// CSIMockDriverOpts defines options used for csi driver
|
// CSIMockDriverOpts defines options used for csi driver
|
||||||
@ -271,6 +272,7 @@ type CSIMockDriverOpts struct {
|
|||||||
JavascriptHooks map[string]string
|
JavascriptHooks map[string]string
|
||||||
TokenRequests []storagev1.TokenRequest
|
TokenRequests []storagev1.TokenRequest
|
||||||
RequiresRepublish *bool
|
RequiresRepublish *bool
|
||||||
|
FSGroupPolicy *storagev1.FSGroupPolicy
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ testsuites.TestDriver = &mockCSIDriver{}
|
var _ testsuites.TestDriver = &mockCSIDriver{}
|
||||||
@ -330,6 +332,7 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) testsuites.TestDriver {
|
|||||||
javascriptHooks: driverOpts.JavascriptHooks,
|
javascriptHooks: driverOpts.JavascriptHooks,
|
||||||
tokenRequests: driverOpts.TokenRequests,
|
tokenRequests: driverOpts.TokenRequests,
|
||||||
requiresRepublish: driverOpts.RequiresRepublish,
|
requiresRepublish: driverOpts.RequiresRepublish,
|
||||||
|
fsGroupPolicy: driverOpts.FSGroupPolicy,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -433,6 +436,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) (*testsuites.PerTest
|
|||||||
},
|
},
|
||||||
TokenRequests: m.tokenRequests,
|
TokenRequests: m.tokenRequests,
|
||||||
RequiresRepublish: m.requiresRepublish,
|
RequiresRepublish: m.requiresRepublish,
|
||||||
|
FSGroupPolicy: m.fsGroupPolicy,
|
||||||
}
|
}
|
||||||
cleanup, err := utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error {
|
cleanup, err := utils.CreateFromManifests(f, driverNamespace, func(item interface{}) error {
|
||||||
return utils.PatchCSIDeployment(f, o, item)
|
return utils.PatchCSIDeployment(f, o, item)
|
||||||
|
@ -139,6 +139,9 @@ func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interf
|
|||||||
if o.RequiresRepublish != nil {
|
if o.RequiresRepublish != nil {
|
||||||
object.Spec.RequiresRepublish = o.RequiresRepublish
|
object.Spec.RequiresRepublish = o.RequiresRepublish
|
||||||
}
|
}
|
||||||
|
if o.FSGroupPolicy != nil {
|
||||||
|
object.Spec.FSGroupPolicy = o.FSGroupPolicy
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -194,4 +197,8 @@ type PatchCSIOptions struct {
|
|||||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||||
// otherwise.
|
// otherwise.
|
||||||
RequiresRepublish *bool
|
RequiresRepublish *bool
|
||||||
|
// If not nil, the value to use for the CSIDriver.Spec.FSGroupPolicy
|
||||||
|
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||||
|
// otherwise.
|
||||||
|
FSGroupPolicy *storagev1.FSGroupPolicy
|
||||||
}
|
}
|
||||||
|
@ -94,6 +94,17 @@ func VerifyExecInPodSucceed(f *framework.Framework, pod *v1.Pod, shExec string)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// VerifyFSGroupInPod verifies that the passed in filePath contains the expectedFSGroup
|
||||||
|
func VerifyFSGroupInPod(f *framework.Framework, filePath, expectedFSGroup string, pod *v1.Pod) {
|
||||||
|
cmd := fmt.Sprintf("ls -l %s", filePath)
|
||||||
|
stdout, stderr, err := PodExec(f, pod, cmd)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
framework.Logf("pod %s/%s exec for cmd %s, stdout: %s, stderr: %s", pod.Namespace, pod.Name, cmd, stdout, stderr)
|
||||||
|
fsGroupResult := strings.Fields(stdout)[3]
|
||||||
|
framework.ExpectEqual(expectedFSGroup, fsGroupResult,
|
||||||
|
"Expected fsGroup of %s, got %s", expectedFSGroup, fsGroupResult)
|
||||||
|
}
|
||||||
|
|
||||||
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
|
// VerifyExecInPodFail verifies shell cmd in target pod fail with certain exit code
|
||||||
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
|
func VerifyExecInPodFail(f *framework.Framework, pod *v1.Pod, shExec string, exitCode int) {
|
||||||
stdout, stderr, err := PodExec(f, pod, shExec)
|
stdout, stderr, err := PodExec(f, pod, shExec)
|
||||||
|
@ -48,10 +48,10 @@ spec:
|
|||||||
- mountPath: /registration
|
- mountPath: /registration
|
||||||
name: registration-dir
|
name: registration-dir
|
||||||
- name: mock
|
- name: mock
|
||||||
image: k8s.gcr.io/sig-storage/mock-driver:v3.1.0
|
image: k8s.gcr.io/sig-storage/mock-driver:v4.0.2
|
||||||
args:
|
args:
|
||||||
- "--name=mock.storage.k8s.io"
|
- "--name=mock.storage.k8s.io"
|
||||||
- "--permissive-target-path" # because of https://github.com/kubernetes/kubernetes/issues/75535
|
- "-v=3" # enabled the gRPC call logging
|
||||||
env:
|
env:
|
||||||
- name: CSI_ENDPOINT
|
- name: CSI_ENDPOINT
|
||||||
value: /csi/csi.sock
|
value: /csi/csi.sock
|
||||||
|
Loading…
Reference in New Issue
Block a user