mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-15 06:43:54 +00:00
allow FSGroupPolicy and PodInfoOnMount to be mutable in CSIDriver.Spec
This commit is contained in:
parent
fc479f41b3
commit
531324cb1d
@ -284,7 +284,7 @@ type CSIDriverSpec struct {
|
|||||||
// permission of the volume before being mounted.
|
// permission of the volume before being mounted.
|
||||||
// Refer to the specific FSGroupPolicy values for additional details.
|
// Refer to the specific FSGroupPolicy values for additional details.
|
||||||
//
|
//
|
||||||
// This field is immutable.
|
// This field was immutable in Kubernetes < 1.29 and now is mutable.
|
||||||
//
|
//
|
||||||
// Defaults to ReadWriteOnceWithFSType, which will examine each volume
|
// Defaults to ReadWriteOnceWithFSType, which will examine each volume
|
||||||
// to determine if Kubernetes should modify ownership and permissions of the volume.
|
// to determine if Kubernetes should modify ownership and permissions of the volume.
|
||||||
@ -318,7 +318,7 @@ type CSIDriverSpec struct {
|
|||||||
// deployed on such a cluster and the deployment determines which mode that is, for example
|
// deployed on such a cluster and the deployment determines which mode that is, for example
|
||||||
// via a command line parameter of the driver.
|
// via a command line parameter of the driver.
|
||||||
//
|
//
|
||||||
// This field is immutable.
|
// This field was immutable in Kubernetes < 1.29 and now is mutable.
|
||||||
//
|
//
|
||||||
// +optional
|
// +optional
|
||||||
PodInfoOnMount *bool
|
PodInfoOnMount *bool
|
||||||
|
@ -414,8 +414,6 @@ func ValidateCSIDriverUpdate(new, old *storage.CSIDriver) field.ErrorList {
|
|||||||
|
|
||||||
// immutable fields should not be mutated.
|
// immutable fields should not be mutated.
|
||||||
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(new.Spec.AttachRequired, old.Spec.AttachRequired, field.NewPath("spec", "attachedRequired"))...)
|
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(new.Spec.AttachRequired, old.Spec.AttachRequired, field.NewPath("spec", "attachedRequired"))...)
|
||||||
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(new.Spec.FSGroupPolicy, old.Spec.FSGroupPolicy, field.NewPath("spec", "fsGroupPolicy"))...)
|
|
||||||
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(new.Spec.PodInfoOnMount, old.Spec.PodInfoOnMount, field.NewPath("spec", "podInfoOnMount"))...)
|
|
||||||
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(new.Spec.VolumeLifecycleModes, old.Spec.VolumeLifecycleModes, field.NewPath("spec", "volumeLifecycleModes"))...)
|
allErrs = append(allErrs, apimachineryvalidation.ValidateImmutableField(new.Spec.VolumeLifecycleModes, old.Spec.VolumeLifecycleModes, field.NewPath("spec", "volumeLifecycleModes"))...)
|
||||||
|
|
||||||
allErrs = append(allErrs, validateTokenRequests(new.Spec.TokenRequests, field.NewPath("spec", "tokenRequests"))...)
|
allErrs = append(allErrs, validateTokenRequests(new.Spec.TokenRequests, field.NewPath("spec", "tokenRequests"))...)
|
||||||
|
@ -1715,6 +1715,17 @@ func TestCSIDriverValidationUpdate(t *testing.T) {
|
|||||||
modify: func(new *storage.CSIDriver) {
|
modify: func(new *storage.CSIDriver) {
|
||||||
new.Spec.SELinuxMount = ¬SELinuxMount
|
new.Spec.SELinuxMount = ¬SELinuxMount
|
||||||
},
|
},
|
||||||
|
}, {
|
||||||
|
name: "change PodInfoOnMount",
|
||||||
|
modify: func(new *storage.CSIDriver) {
|
||||||
|
new.Spec.PodInfoOnMount = &podInfoOnMount
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
name: "change FSGroupPolicy",
|
||||||
|
modify: func(new *storage.CSIDriver) {
|
||||||
|
fileFSGroupPolicy := storage.FileFSGroupPolicy
|
||||||
|
new.Spec.FSGroupPolicy = &fileFSGroupPolicy
|
||||||
|
},
|
||||||
}}
|
}}
|
||||||
for _, test := range successCases {
|
for _, test := range successCases {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
@ -1755,11 +1766,6 @@ func TestCSIDriverValidationUpdate(t *testing.T) {
|
|||||||
modify: func(new *storage.CSIDriver) {
|
modify: func(new *storage.CSIDriver) {
|
||||||
new.Spec.PodInfoOnMount = nil
|
new.Spec.PodInfoOnMount = nil
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
name: "PodInfoOnMount changed",
|
|
||||||
modify: func(new *storage.CSIDriver) {
|
|
||||||
new.Spec.PodInfoOnMount = &podInfoOnMount
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
name: "invalid volume lifecycle mode",
|
name: "invalid volume lifecycle mode",
|
||||||
modify: func(new *storage.CSIDriver) {
|
modify: func(new *storage.CSIDriver) {
|
||||||
@ -1792,12 +1798,6 @@ func TestCSIDriverValidationUpdate(t *testing.T) {
|
|||||||
invalidFSGroupPolicy := storage.FSGroupPolicy("invalid")
|
invalidFSGroupPolicy := storage.FSGroupPolicy("invalid")
|
||||||
new.Spec.FSGroupPolicy = &invalidFSGroupPolicy
|
new.Spec.FSGroupPolicy = &invalidFSGroupPolicy
|
||||||
},
|
},
|
||||||
}, {
|
|
||||||
name: "FSGroupPolicy changed",
|
|
||||||
modify: func(new *storage.CSIDriver) {
|
|
||||||
fileFSGroupPolicy := storage.FileFSGroupPolicy
|
|
||||||
new.Spec.FSGroupPolicy = &fileFSGroupPolicy
|
|
||||||
},
|
|
||||||
}, {
|
}, {
|
||||||
name: "TokenRequests invalidated",
|
name: "TokenRequests invalidated",
|
||||||
modify: func(new *storage.CSIDriver) {
|
modify: func(new *storage.CSIDriver) {
|
||||||
|
@ -83,10 +83,8 @@ func (csiDriverStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.
|
|||||||
newCSIDriver.Spec.SELinuxMount = nil
|
newCSIDriver.Spec.SELinuxMount = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Any changes to the mutable fields increment the generation number.
|
// Any changes to the spec increment the generation number.
|
||||||
if !apiequality.Semantic.DeepEqual(oldCSIDriver.Spec.TokenRequests, newCSIDriver.Spec.TokenRequests) ||
|
if !apiequality.Semantic.DeepEqual(oldCSIDriver.Spec, newCSIDriver.Spec) {
|
||||||
!apiequality.Semantic.DeepEqual(oldCSIDriver.Spec.RequiresRepublish, newCSIDriver.Spec.RequiresRepublish) ||
|
|
||||||
!apiequality.Semantic.DeepEqual(oldCSIDriver.Spec.SELinuxMount, newCSIDriver.Spec.SELinuxMount) {
|
|
||||||
newCSIDriver.Generation = oldCSIDriver.Generation + 1
|
newCSIDriver.Generation = oldCSIDriver.Generation + 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -88,7 +88,6 @@ func TestCSIDriverPrepareForUpdate(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
attachRequired := true
|
attachRequired := true
|
||||||
podInfoOnMount := true
|
|
||||||
driverWithNothing := &storage.CSIDriver{
|
driverWithNothing := &storage.CSIDriver{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@ -100,7 +99,6 @@ func TestCSIDriverPrepareForUpdate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
Spec: storage.CSIDriverSpec{
|
Spec: storage.CSIDriverSpec{
|
||||||
AttachRequired: &attachRequired,
|
AttachRequired: &attachRequired,
|
||||||
PodInfoOnMount: &podInfoOnMount,
|
|
||||||
VolumeLifecycleModes: []storage.VolumeLifecycleMode{
|
VolumeLifecycleModes: []storage.VolumeLifecycleMode{
|
||||||
storage.VolumeLifecyclePersistent,
|
storage.VolumeLifecyclePersistent,
|
||||||
},
|
},
|
||||||
@ -109,6 +107,49 @@ func TestCSIDriverPrepareForUpdate(t *testing.T) {
|
|||||||
enabled := true
|
enabled := true
|
||||||
disabled := false
|
disabled := false
|
||||||
gcp := "gcp"
|
gcp := "gcp"
|
||||||
|
noneFsGroupPolicy := storage.NoneFSGroupPolicy
|
||||||
|
readWriteOnceWithFSTypeFSGroupPolicy := storage.ReadWriteOnceWithFSTypeFSGroupPolicy
|
||||||
|
fileFSGroupPolicy := storage.FileFSGroupPolicy
|
||||||
|
driverWithPodInfoOnMountEnabled := &storage.CSIDriver{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
},
|
||||||
|
Spec: storage.CSIDriverSpec{
|
||||||
|
PodInfoOnMount: &enabled,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
driverWithPodInfoOnMountDisabled := &storage.CSIDriver{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
},
|
||||||
|
Spec: storage.CSIDriverSpec{
|
||||||
|
PodInfoOnMount: &disabled,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
driverWithNoneFSGroupPolicy := &storage.CSIDriver{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
},
|
||||||
|
Spec: storage.CSIDriverSpec{
|
||||||
|
FSGroupPolicy: &noneFsGroupPolicy,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
driverWithReadWriteOnceWithFSTypeFSGroupPolicy := &storage.CSIDriver{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
},
|
||||||
|
Spec: storage.CSIDriverSpec{
|
||||||
|
FSGroupPolicy: &readWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
driverWithFileFSGroupPolicy := &storage.CSIDriver{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: "foo",
|
||||||
|
},
|
||||||
|
Spec: storage.CSIDriverSpec{
|
||||||
|
FSGroupPolicy: &fileFSGroupPolicy,
|
||||||
|
},
|
||||||
|
}
|
||||||
driverWithCapacityEnabled := &storage.CSIDriver{
|
driverWithCapacityEnabled := &storage.CSIDriver{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
@ -164,23 +205,92 @@ func TestCSIDriverPrepareForUpdate(t *testing.T) {
|
|||||||
wantGeneration int64
|
wantGeneration int64
|
||||||
wantSELinuxMount *bool
|
wantSELinuxMount *bool
|
||||||
}{
|
}{
|
||||||
|
{
|
||||||
|
name: "podInfoOnMount feature enabled, before: none, update: enabled",
|
||||||
|
old: driverWithNothing,
|
||||||
|
update: driverWithPodInfoOnMountEnabled,
|
||||||
|
wantGeneration: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "podInfoOnMount feature enabled, before: enabled, update: disabled",
|
||||||
|
old: driverWithPodInfoOnMountEnabled,
|
||||||
|
update: driverWithPodInfoOnMountDisabled,
|
||||||
|
wantGeneration: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fSGroupPolicy feature enabled, before: nil, update: none",
|
||||||
|
old: driverWithNothing,
|
||||||
|
update: driverWithNoneFSGroupPolicy,
|
||||||
|
wantGeneration: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fSGroupPolicy feature enabled, before: nil, update: readWriteOnceWithFSType",
|
||||||
|
old: driverWithNothing,
|
||||||
|
update: driverWithReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
|
wantGeneration: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fSGroupPolicy feature enabled, before: nil, update: file",
|
||||||
|
old: driverWithNothing,
|
||||||
|
update: driverWithFileFSGroupPolicy,
|
||||||
|
wantGeneration: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fSGroupPolicy feature enabled, before: none, update: readWriteOnceWithFSType",
|
||||||
|
old: driverWithNoneFSGroupPolicy,
|
||||||
|
update: driverWithReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
|
wantGeneration: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fSGroupPolicy feature enabled, before: none, update: file",
|
||||||
|
old: driverWithNoneFSGroupPolicy,
|
||||||
|
update: driverWithFileFSGroupPolicy,
|
||||||
|
wantGeneration: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fSGroupPolicy feature enabled, before: readWriteOnceWithFSType, update: none",
|
||||||
|
old: driverWithReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
|
update: driverWithNoneFSGroupPolicy,
|
||||||
|
wantGeneration: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fSGroupPolicy feature enabled, before: readWriteOnceWithFSType, update: file",
|
||||||
|
old: driverWithReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
|
update: driverWithFileFSGroupPolicy,
|
||||||
|
wantGeneration: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fSGroupPolicy feature enabled, before: file, update: none",
|
||||||
|
old: driverWithFileFSGroupPolicy,
|
||||||
|
update: driverWithNoneFSGroupPolicy,
|
||||||
|
wantGeneration: 1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "fSGroupPolicy feature enabled, before: file, update: readWriteOnceWithFSType",
|
||||||
|
old: driverWithFileFSGroupPolicy,
|
||||||
|
update: driverWithReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
|
wantGeneration: 1,
|
||||||
|
},
|
||||||
{
|
{
|
||||||
name: "capacity feature enabled, before: none, update: enabled",
|
name: "capacity feature enabled, before: none, update: enabled",
|
||||||
old: driverWithNothing,
|
old: driverWithNothing,
|
||||||
update: driverWithCapacityEnabled,
|
update: driverWithCapacityEnabled,
|
||||||
wantCapacity: &enabled,
|
wantCapacity: &enabled,
|
||||||
|
wantGeneration: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "capacity feature enabled, before: enabled, update: disabled",
|
name: "capacity feature enabled, before: enabled, update: disabled",
|
||||||
old: driverWithCapacityEnabled,
|
old: driverWithCapacityEnabled,
|
||||||
update: driverWithCapacityDisabled,
|
update: driverWithCapacityDisabled,
|
||||||
wantCapacity: &disabled,
|
wantCapacity: &disabled,
|
||||||
|
wantGeneration: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "inline feature enabled, before: none, update: persistent",
|
name: "inline feature enabled, before: none, update: persistent",
|
||||||
old: driverWithNothing,
|
old: driverWithNothing,
|
||||||
update: driverWithPersistent,
|
update: driverWithPersistent,
|
||||||
wantModes: resultPersistent,
|
wantModes: resultPersistent,
|
||||||
|
wantGeneration: 1,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "service account token feature enabled, before: none, update: audience=gcp",
|
name: "service account token feature enabled, before: none, update: audience=gcp",
|
||||||
|
@ -306,7 +306,7 @@ type CSIDriverSpec struct {
|
|||||||
// deployed on such a cluster and the deployment determines which mode that is, for example
|
// deployed on such a cluster and the deployment determines which mode that is, for example
|
||||||
// via a command line parameter of the driver.
|
// via a command line parameter of the driver.
|
||||||
//
|
//
|
||||||
// This field is immutable.
|
// This field was immutable in Kubernetes < 1.29 and now is mutable.
|
||||||
//
|
//
|
||||||
// +optional
|
// +optional
|
||||||
PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"`
|
PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"`
|
||||||
@ -353,7 +353,7 @@ type CSIDriverSpec struct {
|
|||||||
// permission of the volume before being mounted.
|
// permission of the volume before being mounted.
|
||||||
// Refer to the specific FSGroupPolicy values for additional details.
|
// Refer to the specific FSGroupPolicy values for additional details.
|
||||||
//
|
//
|
||||||
// This field is immutable.
|
// This field was immutable in Kubernetes < 1.29 and now is mutable.
|
||||||
//
|
//
|
||||||
// Defaults to ReadWriteOnceWithFSType, which will examine each volume
|
// Defaults to ReadWriteOnceWithFSType, which will examine each volume
|
||||||
// to determine if Kubernetes should modify ownership and permissions of the volume.
|
// to determine if Kubernetes should modify ownership and permissions of the volume.
|
||||||
|
@ -248,6 +248,17 @@ func (m *mockDriverSetup) cleanup(ctx context.Context) {
|
|||||||
framework.ExpectNoError(err, "while cleaning up after test")
|
framework.ExpectNoError(err, "while cleaning up after test")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *mockDriverSetup) update(o utils.PatchCSIOptions) {
|
||||||
|
item, err := m.cs.StorageV1().CSIDrivers().Get(context.TODO(), m.config.GetUniqueDriverName(), metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err, "Failed to get CSIDriver %v", m.config.GetUniqueDriverName())
|
||||||
|
|
||||||
|
err = utils.PatchCSIDeployment(nil, o, item)
|
||||||
|
framework.ExpectNoError(err, "Failed to apply %v to CSIDriver object %v", o, m.config.GetUniqueDriverName())
|
||||||
|
|
||||||
|
_, err = m.cs.StorageV1().CSIDrivers().Update(context.TODO(), item, metav1.UpdateOptions{})
|
||||||
|
framework.ExpectNoError(err, "Failed to update CSIDriver %v", m.config.GetUniqueDriverName())
|
||||||
|
}
|
||||||
|
|
||||||
func (m *mockDriverSetup) createPod(ctx context.Context, withVolume volumeType) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) {
|
func (m *mockDriverSetup) createPod(ctx context.Context, withVolume volumeType) (class *storagev1.StorageClass, claim *v1.PersistentVolumeClaim, pod *v1.Pod) {
|
||||||
ginkgo.By("Creating pod")
|
ginkgo.By("Creating pod")
|
||||||
f := m.f
|
f := m.f
|
||||||
@ -722,8 +733,8 @@ func checkPodLogs(ctx context.Context, getCalls func(ctx context.Context) ([]dri
|
|||||||
switch call.Method {
|
switch call.Method {
|
||||||
case "NodePublishVolume":
|
case "NodePublishVolume":
|
||||||
numNodePublishVolume++
|
numNodePublishVolume++
|
||||||
if numNodePublishVolume == 1 {
|
if numNodePublishVolume == expectedNumNodePublish {
|
||||||
// Check that NodePublish had expected attributes for first volume
|
// Check that NodePublish had expected attributes for last of expected volume
|
||||||
for k, v := range expectedAttributes {
|
for k, v := range expectedAttributes {
|
||||||
vv, found := call.Request.VolumeContext[k]
|
vv, found := call.Request.VolumeContext[k]
|
||||||
if found && (v == vv || (v == "<nonempty>" && len(vv) != 0)) {
|
if found && (v == vv || (v == "<nonempty>" && len(vv) != 0)) {
|
||||||
|
@ -21,9 +21,9 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
@ -74,47 +74,67 @@ var _ = utils.SIGDescribe("CSI Mock volume fsgroup policies", func() {
|
|||||||
})
|
})
|
||||||
ginkgo.DeferCleanup(m.cleanup)
|
ginkgo.DeferCleanup(m.cleanup)
|
||||||
|
|
||||||
// kube-scheduler may need some time before it gets the CSIDriver object.
|
waitUtilFSGroupInPod(ctx, m, test.modified)
|
||||||
// Without them, scheduling doesn't run as expected by the test.
|
|
||||||
syncDelay := 5 * time.Second
|
|
||||||
time.Sleep(syncDelay)
|
|
||||||
|
|
||||||
fsGroupVal := int64(rand.Int63n(20000) + 1024)
|
// The created resources will be removed by the cleanup() function,
|
||||||
fsGroup := &fsGroupVal
|
// so need to delete anything here.
|
||||||
|
})
|
||||||
_, _, pod := m.createPodWithFSGroup(ctx, fsGroup) /* persistent volume */
|
|
||||||
|
|
||||||
mountPath := pod.Spec.Containers[0].VolumeMounts[0].MountPath
|
|
||||||
dirName := mountPath + "/" + f.UniqueName
|
|
||||||
fileName := dirName + "/" + f.UniqueName
|
|
||||||
|
|
||||||
err := e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
|
|
||||||
framework.ExpectNoError(err, "failed to start pod")
|
|
||||||
|
|
||||||
// Create the subdirectory to ensure that fsGroup propagates
|
|
||||||
createDirectory := fmt.Sprintf("mkdir %s", dirName)
|
|
||||||
_, _, err = e2evolume.PodExec(f, pod, createDirectory)
|
|
||||||
framework.ExpectNoError(err, "failed: creating the directory: %s", err)
|
|
||||||
|
|
||||||
// Inject the contents onto the mount
|
|
||||||
createFile := fmt.Sprintf("echo '%s' > '%s'; sync", "filecontents", fileName)
|
|
||||||
_, _, err = e2evolume.PodExec(f, pod, createFile)
|
|
||||||
framework.ExpectNoError(err, "failed: writing the contents: %s", err)
|
|
||||||
|
|
||||||
// Delete the created file. This step is mandatory, as the mock driver
|
|
||||||
// won't clean up the contents automatically.
|
|
||||||
defer func() {
|
|
||||||
deleteDir := fmt.Sprintf("rm -fr %s", dirName)
|
|
||||||
_, _, err = e2evolume.PodExec(f, pod, deleteDir)
|
|
||||||
framework.ExpectNoError(err, "failed: deleting the directory: %s", err)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Ensure that the fsGroup matches what we expect
|
|
||||||
if test.modified {
|
|
||||||
utils.VerifyFSGroupInPod(f, fileName, strconv.FormatInt(*fsGroup, 10), pod)
|
|
||||||
} else {
|
|
||||||
utils.VerifyFSGroupInPod(f, fileName, "root", pod)
|
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("CSI FSGroupPolicy Update [LinuxOnly]", func() {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
oldFSGroupPolicy storagev1.FSGroupPolicy
|
||||||
|
newFSGroupPolicy storagev1.FSGroupPolicy
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "should update fsGroup if update from None to File",
|
||||||
|
oldFSGroupPolicy: storagev1.NoneFSGroupPolicy,
|
||||||
|
newFSGroupPolicy: storagev1.FileFSGroupPolicy,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should update fsGroup if update from None to default",
|
||||||
|
oldFSGroupPolicy: storagev1.NoneFSGroupPolicy,
|
||||||
|
newFSGroupPolicy: storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should not update fsGroup if update from File to None",
|
||||||
|
oldFSGroupPolicy: storagev1.FileFSGroupPolicy,
|
||||||
|
newFSGroupPolicy: storagev1.NoneFSGroupPolicy,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should update fsGroup if update from File to default",
|
||||||
|
oldFSGroupPolicy: storagev1.FileFSGroupPolicy,
|
||||||
|
newFSGroupPolicy: storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should not update fsGroup if update from detault to None",
|
||||||
|
oldFSGroupPolicy: storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
|
newFSGroupPolicy: storagev1.NoneFSGroupPolicy,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should update fsGroup if update from detault to File",
|
||||||
|
oldFSGroupPolicy: storagev1.ReadWriteOnceWithFSTypeFSGroupPolicy,
|
||||||
|
newFSGroupPolicy: storagev1.FileFSGroupPolicy,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, t := range tests {
|
||||||
|
test := t
|
||||||
|
ginkgo.It(test.name, func(ctx context.Context) {
|
||||||
|
if framework.NodeOSDistroIs("windows") {
|
||||||
|
e2eskipper.Skipf("FSGroupPolicy is only applied on linux nodes -- skipping")
|
||||||
|
}
|
||||||
|
m.init(ctx, testParameters{
|
||||||
|
disableAttach: true,
|
||||||
|
registerDriver: true,
|
||||||
|
fsGroupPolicy: &test.oldFSGroupPolicy,
|
||||||
|
})
|
||||||
|
ginkgo.DeferCleanup(m.cleanup)
|
||||||
|
|
||||||
|
waitUtilFSGroupInPod(ctx, m, test.oldFSGroupPolicy != storagev1.NoneFSGroupPolicy)
|
||||||
|
m.update(utils.PatchCSIOptions{FSGroupPolicy: &test.newFSGroupPolicy})
|
||||||
|
waitUtilFSGroupInPod(ctx, m, test.newFSGroupPolicy != storagev1.NoneFSGroupPolicy)
|
||||||
|
|
||||||
// The created resources will be removed by the cleanup() function,
|
// The created resources will be removed by the cleanup() function,
|
||||||
// so need to delete anything here.
|
// so need to delete anything here.
|
||||||
@ -122,3 +142,56 @@ var _ = utils.SIGDescribe("CSI Mock volume fsgroup policies", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
func waitUtilFSGroupInPod(ctx context.Context, m *mockDriverSetup, modified bool) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
utils.WaitUntil(framework.Poll, framework.PodStartTimeout, func() bool {
|
||||||
|
err = gomega.InterceptGomegaFailure(func() {
|
||||||
|
fsGroupVal := int64(rand.Int63n(20000) + 1024)
|
||||||
|
fsGroup := &fsGroupVal
|
||||||
|
|
||||||
|
_, _, pod := m.createPodWithFSGroup(ctx, fsGroup) /* persistent volume */
|
||||||
|
defer func() {
|
||||||
|
err = e2epod.DeletePodWithWait(context.TODO(), m.f.ClientSet, pod)
|
||||||
|
framework.ExpectNoError(err, "failed: deleting the pod: %s", err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
mountPath := pod.Spec.Containers[0].VolumeMounts[0].MountPath
|
||||||
|
dirName := mountPath + "/" + m.f.UniqueName
|
||||||
|
fileName := dirName + "/" + m.f.UniqueName
|
||||||
|
|
||||||
|
err = e2epod.WaitForPodNameRunningInNamespace(ctx, m.cs, pod.Name, pod.Namespace)
|
||||||
|
framework.ExpectNoError(err, "failed to start pod")
|
||||||
|
|
||||||
|
// Create the subdirectory to ensure that fsGroup propagates
|
||||||
|
createDirectory := fmt.Sprintf("mkdir %s", dirName)
|
||||||
|
_, _, err = e2evolume.PodExec(m.f, pod, createDirectory)
|
||||||
|
framework.ExpectNoError(err, "failed: creating the directory: %s", err)
|
||||||
|
|
||||||
|
// Inject the contents onto the mount
|
||||||
|
createFile := fmt.Sprintf("echo '%s' > '%s'; sync", "filecontents", fileName)
|
||||||
|
_, _, err = e2evolume.PodExec(m.f, pod, createFile)
|
||||||
|
framework.ExpectNoError(err, "failed: writing the contents: %s", err)
|
||||||
|
|
||||||
|
// Delete the created file. This step is mandatory, as the mock driver
|
||||||
|
// won't clean up the contents automatically.
|
||||||
|
defer func() {
|
||||||
|
deleteDir := fmt.Sprintf("rm -fr %s", dirName)
|
||||||
|
_, _, err = e2evolume.PodExec(m.f, pod, deleteDir)
|
||||||
|
framework.ExpectNoError(err, "failed: deleting the directory: %s", err)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Ensure that the fsGroup matches what we expect
|
||||||
|
if modified {
|
||||||
|
utils.VerifyFSGroupInPod(m.f, fileName, strconv.FormatInt(*fsGroup, 10), pod)
|
||||||
|
} else {
|
||||||
|
utils.VerifyFSGroupInPod(m.f, fileName, "root", pod)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
return err == nil
|
||||||
|
})
|
||||||
|
|
||||||
|
framework.ExpectNoError(err, "failed: verifing fsgroup in pod: %s", err)
|
||||||
|
}
|
||||||
|
@ -20,6 +20,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
"github.com/onsi/gomega"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
"k8s.io/kubernetes/test/e2e/storage/testsuites"
|
||||||
@ -34,7 +35,6 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() {
|
|||||||
m := newMockDriverSetup(f)
|
m := newMockDriverSetup(f)
|
||||||
ginkgo.Context("CSI workload information using mock driver", func() {
|
ginkgo.Context("CSI workload information using mock driver", func() {
|
||||||
var (
|
var (
|
||||||
err error
|
|
||||||
podInfoTrue = true
|
podInfoTrue = true
|
||||||
podInfoFalse = false
|
podInfoFalse = false
|
||||||
)
|
)
|
||||||
@ -89,8 +89,69 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() {
|
|||||||
|
|
||||||
ginkgo.DeferCleanup(m.cleanup)
|
ginkgo.DeferCleanup(m.cleanup)
|
||||||
|
|
||||||
|
waitUntilPodInfoInLog(ctx, m, test.expectPodInfo, test.expectEphemeral, 1)
|
||||||
|
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("CSI PodInfoOnMount Update", func() {
|
||||||
|
var (
|
||||||
|
podInfoTrue = true
|
||||||
|
podInfoFalse = false
|
||||||
|
)
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
oldPodInfoOnMount *bool
|
||||||
|
newPodInfoOnMount *bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "should not be passed when update from true to false",
|
||||||
|
oldPodInfoOnMount: &podInfoTrue,
|
||||||
|
newPodInfoOnMount: &podInfoFalse,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should not be passed when update from true to nil",
|
||||||
|
oldPodInfoOnMount: &podInfoTrue,
|
||||||
|
newPodInfoOnMount: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should be passed when update from false to true",
|
||||||
|
oldPodInfoOnMount: &podInfoFalse,
|
||||||
|
newPodInfoOnMount: &podInfoTrue,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should be passed when update from nil to true",
|
||||||
|
oldPodInfoOnMount: nil,
|
||||||
|
newPodInfoOnMount: &podInfoTrue,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, t := range tests {
|
||||||
|
test := t
|
||||||
|
ginkgo.It(t.name, func(ctx context.Context) {
|
||||||
|
m.init(ctx, testParameters{
|
||||||
|
registerDriver: true,
|
||||||
|
podInfo: test.oldPodInfoOnMount})
|
||||||
|
|
||||||
|
ginkgo.DeferCleanup(m.cleanup)
|
||||||
|
|
||||||
|
waitUntilPodInfoInLog(ctx, m, test.oldPodInfoOnMount != nil && *test.oldPodInfoOnMount, false, 1)
|
||||||
|
m.update(utils.PatchCSIOptions{PodInfo: test.newPodInfoOnMount})
|
||||||
|
waitUntilPodInfoInLog(ctx, m, test.newPodInfoOnMount != nil && *test.newPodInfoOnMount, false, 2)
|
||||||
|
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
func waitUntilPodInfoInLog(ctx context.Context, m *mockDriverSetup, expectPodInfo, expectEphemeral bool, expectedNumNodePublish int) {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
utils.WaitUntil(framework.Poll, framework.PodStartTimeout, func() bool {
|
||||||
|
err = gomega.InterceptGomegaFailure(func() {
|
||||||
withVolume := pvcReference
|
withVolume := pvcReference
|
||||||
if test.expectEphemeral {
|
if expectEphemeral {
|
||||||
withVolume = csiEphemeral
|
withVolume = csiEphemeral
|
||||||
}
|
}
|
||||||
_, _, pod := m.createPod(ctx, withVolume)
|
_, _, pod := m.createPod(ctx, withVolume)
|
||||||
@ -103,10 +164,10 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() {
|
|||||||
// If we expect an ephemeral volume, the feature has to be enabled.
|
// If we expect an ephemeral volume, the feature has to be enabled.
|
||||||
// Otherwise need to check if we expect pod info, because the content
|
// Otherwise need to check if we expect pod info, because the content
|
||||||
// of that depends on whether the feature is enabled or not.
|
// of that depends on whether the feature is enabled or not.
|
||||||
csiInlineVolumesEnabled := test.expectEphemeral
|
csiInlineVolumesEnabled := expectEphemeral
|
||||||
if test.expectPodInfo {
|
if expectPodInfo {
|
||||||
ginkgo.By("checking for CSIInlineVolumes feature")
|
ginkgo.By("checking for CSIInlineVolumes feature")
|
||||||
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(ctx, m.cs, f.Timeouts, f.Namespace.Name)
|
csiInlineVolumesEnabled, err = testsuites.CSIInlineVolumesEnabled(ctx, m.cs, m.f.Timeouts, m.f.Namespace.Name)
|
||||||
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
|
framework.ExpectNoError(err, "failed to test for CSIInlineVolumes")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -115,10 +176,12 @@ var _ = utils.SIGDescribe("CSI Mock workload info", func() {
|
|||||||
framework.ExpectNoError(err, "while deleting")
|
framework.ExpectNoError(err, "while deleting")
|
||||||
|
|
||||||
ginkgo.By("Checking CSI driver logs")
|
ginkgo.By("Checking CSI driver logs")
|
||||||
err = checkPodLogs(ctx, m.driver.GetCalls, pod, test.expectPodInfo, test.expectEphemeral, csiInlineVolumesEnabled, false, 1)
|
err = checkPodLogs(ctx, m.driver.GetCalls, pod, expectPodInfo, expectEphemeral, csiInlineVolumesEnabled, false, expectedNumNodePublish)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
})
|
})
|
||||||
}
|
|
||||||
|
return err == nil
|
||||||
})
|
})
|
||||||
|
|
||||||
})
|
framework.ExpectNoError(err, "failed: verifing PodInfo: %s", err)
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user