mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 19:01:49 +00:00
Merge pull request #113596 from jsafrane/selinux-reconstruction
Reconstruct SELinux mount label
This commit is contained in:
commit
da735b5415
@ -290,7 +290,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
klog.V(4).InfoS("volume final SELinux label decided", "volume", volumeSpec.Name(), "label", seLinuxFileLabel)
|
klog.V(4).InfoS("expected volume SELinux label context", "volume", volumeSpec.Name(), "label", seLinuxFileLabel)
|
||||||
|
|
||||||
if vol, volumeExists := dsw.volumesToMount[volumeName]; !volumeExists {
|
if vol, volumeExists := dsw.volumesToMount[volumeName]; !volumeExists {
|
||||||
var sizeLimit *resource.Quantity
|
var sizeLimit *resource.Quantity
|
||||||
@ -309,6 +309,7 @@ func (dsw *desiredStateOfWorld) AddPodToVolume(
|
|||||||
}
|
}
|
||||||
if !util.VolumeSupportsSELinuxMount(volumeSpec) {
|
if !util.VolumeSupportsSELinuxMount(volumeSpec) {
|
||||||
// Clear SELinux label for the volume with unsupported access modes.
|
// Clear SELinux label for the volume with unsupported access modes.
|
||||||
|
klog.V(4).InfoS("volume does not support SELinux context mount, clearing the expected label", "volume", volumeSpec.Name())
|
||||||
seLinuxFileLabel = ""
|
seLinuxFileLabel = ""
|
||||||
}
|
}
|
||||||
if seLinuxFileLabel != "" {
|
if seLinuxFileLabel != "" {
|
||||||
|
@ -160,7 +160,6 @@ func (rc *reconciler) updateStates(volumesNeedUpdate map[v1.UniqueVolumeName]*gl
|
|||||||
klog.ErrorS(err, "Could not find device mount path for volume", "volumeName", gvl.volumeName)
|
klog.ErrorS(err, "Could not find device mount path for volume", "volumeName", gvl.volumeName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// TODO(jsafrane): add reconstructed SELinux context
|
|
||||||
err = rc.actualStateOfWorld.MarkDeviceAsMounted(gvl.volumeName, gvl.devicePath, deviceMountPath, "")
|
err = rc.actualStateOfWorld.MarkDeviceAsMounted(gvl.volumeName, gvl.devicePath, deviceMountPath, "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "Could not mark device is mounted to actual state of world", "volume", gvl.volumeName)
|
klog.ErrorS(err, "Could not mark device is mounted to actual state of world", "volume", gvl.volumeName)
|
||||||
|
@ -56,6 +56,7 @@ type reconstructedVolume struct {
|
|||||||
mounter volumepkg.Mounter
|
mounter volumepkg.Mounter
|
||||||
deviceMounter volumepkg.DeviceMounter
|
deviceMounter volumepkg.DeviceMounter
|
||||||
blockVolumeMapper volumepkg.BlockVolumeMapper
|
blockVolumeMapper volumepkg.BlockVolumeMapper
|
||||||
|
seLinuxMountContext string
|
||||||
}
|
}
|
||||||
|
|
||||||
// globalVolumeInfo stores reconstructed volume information
|
// globalVolumeInfo stores reconstructed volume information
|
||||||
@ -211,6 +212,9 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
volumeSpec := reconstructed.Spec
|
volumeSpec := reconstructed.Spec
|
||||||
|
if volumeSpec == nil {
|
||||||
|
return nil, fmt.Errorf("failed to reconstruct volume for plugin %q (spec.Name: %q) pod %q (UID: %q): got nil", volume.pluginName, volume.volumeSpecName, volume.podName, pod.UID)
|
||||||
|
}
|
||||||
|
|
||||||
// We have to find the plugins by volume spec (NOT by plugin name) here
|
// We have to find the plugins by volume spec (NOT by plugin name) here
|
||||||
// in order to correctly reconstruct ephemeral volume types.
|
// in order to correctly reconstruct ephemeral volume types.
|
||||||
@ -315,6 +319,7 @@ func (rc *reconciler) reconstructVolume(volume podVolume) (*reconstructedVolume,
|
|||||||
devicePath: "",
|
devicePath: "",
|
||||||
mounter: volumeMounter,
|
mounter: volumeMounter,
|
||||||
blockVolumeMapper: volumeMapper,
|
blockVolumeMapper: volumeMapper,
|
||||||
|
seLinuxMountContext: reconstructed.SELinuxMountContext,
|
||||||
}
|
}
|
||||||
return reconstructedVolume, nil
|
return reconstructedVolume, nil
|
||||||
}
|
}
|
||||||
|
@ -112,6 +112,7 @@ func (rc *reconciler) updateStatesNew(reconstructedVolumes map[v1.UniqueVolumeNa
|
|||||||
klog.ErrorS(err, "Could not add volume information to actual state of world", "volumeName", gvl.volumeName)
|
klog.ErrorS(err, "Could not add volume information to actual state of world", "volumeName", gvl.volumeName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
var seLinuxMountContext string
|
||||||
for _, volume := range gvl.podVolumes {
|
for _, volume := range gvl.podVolumes {
|
||||||
markVolumeOpts := operationexecutor.MarkVolumeOpts{
|
markVolumeOpts := operationexecutor.MarkVolumeOpts{
|
||||||
PodName: volume.podName,
|
PodName: volume.podName,
|
||||||
@ -123,6 +124,7 @@ func (rc *reconciler) updateStatesNew(reconstructedVolumes map[v1.UniqueVolumeNa
|
|||||||
VolumeGidVolume: volume.volumeGidValue,
|
VolumeGidVolume: volume.volumeGidValue,
|
||||||
VolumeSpec: volume.volumeSpec,
|
VolumeSpec: volume.volumeSpec,
|
||||||
VolumeMountState: operationexecutor.VolumeMountUncertain,
|
VolumeMountState: operationexecutor.VolumeMountUncertain,
|
||||||
|
SELinuxMountContext: volume.seLinuxMountContext,
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = rc.actualStateOfWorld.CheckAndMarkVolumeAsUncertainViaReconstruction(markVolumeOpts)
|
_, err = rc.actualStateOfWorld.CheckAndMarkVolumeAsUncertainViaReconstruction(markVolumeOpts)
|
||||||
@ -130,7 +132,8 @@ func (rc *reconciler) updateStatesNew(reconstructedVolumes map[v1.UniqueVolumeNa
|
|||||||
klog.ErrorS(err, "Could not add pod to volume information to actual state of world", "pod", klog.KObj(volume.pod))
|
klog.ErrorS(err, "Could not add pod to volume information to actual state of world", "pod", klog.KObj(volume.pod))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
klog.V(2).InfoS("Volume is marked as uncertain and added into the actual state", "pod", klog.KObj(volume.pod), "podName", volume.podName, "volumeName", volume.volumeName)
|
seLinuxMountContext = volume.seLinuxMountContext
|
||||||
|
klog.V(2).InfoS("Volume is marked as uncertain and added into the actual state", "pod", klog.KObj(volume.pod), "podName", volume.podName, "volumeName", volume.volumeName, "seLinuxMountContext", volume.seLinuxMountContext)
|
||||||
}
|
}
|
||||||
// If the volume has device to mount, we mark its device as uncertain.
|
// If the volume has device to mount, we mark its device as uncertain.
|
||||||
if gvl.deviceMounter != nil || gvl.blockVolumeMapper != nil {
|
if gvl.deviceMounter != nil || gvl.blockVolumeMapper != nil {
|
||||||
@ -139,7 +142,7 @@ func (rc *reconciler) updateStatesNew(reconstructedVolumes map[v1.UniqueVolumeNa
|
|||||||
klog.ErrorS(err, "Could not find device mount path for volume", "volumeName", gvl.volumeName)
|
klog.ErrorS(err, "Could not find device mount path for volume", "volumeName", gvl.volumeName)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
err = rc.actualStateOfWorld.MarkDeviceAsUncertain(gvl.volumeName, gvl.devicePath, deviceMountPath, "")
|
err = rc.actualStateOfWorld.MarkDeviceAsUncertain(gvl.volumeName, gvl.devicePath, deviceMountPath, seLinuxMountContext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.ErrorS(err, "Could not mark device is uncertain to actual state of world", "volumeName", gvl.volumeName, "deviceMountPath", deviceMountPath)
|
klog.ErrorS(err, "Could not mark device is uncertain to actual state of world", "volumeName", gvl.volumeName, "deviceMountPath", deviceMountPath)
|
||||||
continue
|
continue
|
||||||
|
@ -320,6 +320,23 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var mountOptions []string
|
||||||
|
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.MountOptions != nil {
|
||||||
|
mountOptions = spec.PersistentVolume.Spec.MountOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
var seLinuxSupported bool
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
|
||||||
|
support, err := c.plugin.SupportsSELinuxContextMount(spec)
|
||||||
|
if err != nil {
|
||||||
|
return errors.New(log("failed to query for SELinuxMount support: %s", err))
|
||||||
|
}
|
||||||
|
if support && deviceMounterArgs.SELinuxLabel != "" {
|
||||||
|
mountOptions = util.AddSELinuxMountOption(mountOptions, deviceMounterArgs.SELinuxLabel)
|
||||||
|
seLinuxSupported = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Store volume metadata for UnmountDevice. Keep it around even if the
|
// Store volume metadata for UnmountDevice. Keep it around even if the
|
||||||
// driver does not support NodeStage, UnmountDevice still needs it.
|
// driver does not support NodeStage, UnmountDevice still needs it.
|
||||||
if err = os.MkdirAll(deviceMountPath, 0750); err != nil {
|
if err = os.MkdirAll(deviceMountPath, 0750); err != nil {
|
||||||
@ -330,7 +347,10 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
|||||||
data := map[string]string{
|
data := map[string]string{
|
||||||
volDataKey.volHandle: csiSource.VolumeHandle,
|
volDataKey.volHandle: csiSource.VolumeHandle,
|
||||||
volDataKey.driverName: csiSource.Driver,
|
volDataKey.driverName: csiSource.Driver,
|
||||||
volDataKey.seLinuxMountContext: deviceMounterArgs.SELinuxLabel,
|
}
|
||||||
|
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) && seLinuxSupported {
|
||||||
|
data[volDataKey.seLinuxMountContext] = deviceMounterArgs.SELinuxLabel
|
||||||
}
|
}
|
||||||
|
|
||||||
err = saveVolumeData(dataDir, volDataFileName, data)
|
err = saveVolumeData(dataDir, volDataFileName, data)
|
||||||
@ -364,21 +384,6 @@ func (c *csiAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMo
|
|||||||
accessMode = spec.PersistentVolume.Spec.AccessModes[0]
|
accessMode = spec.PersistentVolume.Spec.AccessModes[0]
|
||||||
}
|
}
|
||||||
|
|
||||||
var mountOptions []string
|
|
||||||
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.MountOptions != nil {
|
|
||||||
mountOptions = spec.PersistentVolume.Spec.MountOptions
|
|
||||||
}
|
|
||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
|
|
||||||
support, err := c.plugin.SupportsSELinuxContextMount(spec)
|
|
||||||
if err != nil {
|
|
||||||
return errors.New(log("failed to query for SELinuxMount support: %s", err))
|
|
||||||
}
|
|
||||||
if support && deviceMounterArgs.SELinuxLabel != "" {
|
|
||||||
mountOptions = util.AddSELinuxMountOption(mountOptions, deviceMounterArgs.SELinuxLabel)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var nodeStageFSGroupArg *int64
|
var nodeStageFSGroupArg *int64
|
||||||
driverSupportsCSIVolumeMountGroup, err := csi.NodeSupportsVolumeMountGroup(ctx)
|
driverSupportsCSIVolumeMountGroup, err := csi.NodeSupportsVolumeMountGroup(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -275,6 +275,10 @@ func (c *csiMountMgr) SetUpAt(dir string, mounterArgs volume.MounterArgs) error
|
|||||||
volDataKey.attachmentID: getAttachmentName(volumeHandle, string(c.driverName), nodeName),
|
volDataKey.attachmentID: getAttachmentName(volumeHandle, string(c.driverName), nodeName),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) && selinuxLabelMount {
|
||||||
|
volData[volDataKey.seLinuxMountContext] = mounterArgs.SELinuxLabel
|
||||||
|
}
|
||||||
|
|
||||||
err = saveVolumeData(parentDir, volDataFileName, volData)
|
err = saveVolumeData(parentDir, volDataFileName, volData)
|
||||||
defer func() {
|
defer func() {
|
||||||
// Only if there was an error and volume operation was considered
|
// Only if there was an error and volume operation was considered
|
||||||
|
@ -60,7 +60,7 @@ var (
|
|||||||
testAccount = "test-service-account"
|
testAccount = "test-service-account"
|
||||||
)
|
)
|
||||||
|
|
||||||
func prepareVolumeInfoFile(mountPath string, plug *csiPlugin, specVolumeName, volumeID, driverName, lifecycleMode string) error {
|
func prepareVolumeInfoFile(mountPath string, plug *csiPlugin, specVolumeName, volumeID, driverName, lifecycleMode, seLinuxMountContext string) error {
|
||||||
nodeName := string(plug.host.GetNodeName())
|
nodeName := string(plug.host.GetNodeName())
|
||||||
volData := map[string]string{
|
volData := map[string]string{
|
||||||
volDataKey.specVolID: specVolumeName,
|
volDataKey.specVolID: specVolumeName,
|
||||||
@ -69,6 +69,7 @@ func prepareVolumeInfoFile(mountPath string, plug *csiPlugin, specVolumeName, vo
|
|||||||
volDataKey.nodeName: nodeName,
|
volDataKey.nodeName: nodeName,
|
||||||
volDataKey.attachmentID: getAttachmentName(volumeID, driverName, nodeName),
|
volDataKey.attachmentID: getAttachmentName(volumeID, driverName, nodeName),
|
||||||
volDataKey.volumeLifecycleMode: lifecycleMode,
|
volDataKey.volumeLifecycleMode: lifecycleMode,
|
||||||
|
volDataKey.seLinuxMountContext: seLinuxMountContext,
|
||||||
}
|
}
|
||||||
if err := os.MkdirAll(mountPath, 0755); err != nil {
|
if err := os.MkdirAll(mountPath, 0755); err != nil {
|
||||||
return fmt.Errorf("failed to create dir for volume info file: %s", err)
|
return fmt.Errorf("failed to create dir for volume info file: %s", err)
|
||||||
|
@ -455,22 +455,23 @@ func (p *csiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (volume.Re
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return volume.ReconstructedVolume{}, errors.New(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err))
|
return volume.ReconstructedVolume{}, errors.New(log("plugin.ConstructVolumeSpec failed loading volume data using [%s]: %v", mountPath, err))
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData))
|
klog.V(4).Info(log("plugin.ConstructVolumeSpec extracted [%#v]", volData))
|
||||||
|
|
||||||
var spec *volume.Spec
|
var ret volume.ReconstructedVolume
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
|
||||||
|
ret.SELinuxMountContext = volData[volDataKey.seLinuxMountContext]
|
||||||
|
}
|
||||||
|
|
||||||
// If mode is VolumeLifecycleEphemeral, use constructVolSourceSpec
|
// If mode is VolumeLifecycleEphemeral, use constructVolSourceSpec
|
||||||
// to construct volume source spec. If mode is VolumeLifecyclePersistent,
|
// to construct volume source spec. If mode is VolumeLifecyclePersistent,
|
||||||
// use constructPVSourceSpec to construct volume construct pv source spec.
|
// use constructPVSourceSpec to construct volume construct pv source spec.
|
||||||
if storage.VolumeLifecycleMode(volData[volDataKey.volumeLifecycleMode]) == storage.VolumeLifecycleEphemeral {
|
if storage.VolumeLifecycleMode(volData[volDataKey.volumeLifecycleMode]) == storage.VolumeLifecycleEphemeral {
|
||||||
spec = p.constructVolSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName])
|
ret.Spec = p.constructVolSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName])
|
||||||
return volume.ReconstructedVolume{Spec: spec}, nil
|
return ret, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
spec = p.constructPVSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName], volData[volDataKey.volHandle])
|
ret.Spec = p.constructPVSourceSpec(volData[volDataKey.specVolID], volData[volDataKey.driverName], volData[volDataKey.volHandle])
|
||||||
return volume.ReconstructedVolume{
|
return ret, nil
|
||||||
Spec: spec,
|
|
||||||
}, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// constructVolSourceSpec constructs volume.Spec with CSIVolumeSource
|
// constructVolSourceSpec constructs volume.Spec with CSIVolumeSource
|
||||||
|
@ -29,9 +29,12 @@ import (
|
|||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
fakeclient "k8s.io/client-go/kubernetes/fake"
|
fakeclient "k8s.io/client-go/kubernetes/fake"
|
||||||
utiltesting "k8s.io/client-go/util/testing"
|
utiltesting "k8s.io/client-go/util/testing"
|
||||||
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
)
|
)
|
||||||
@ -317,15 +320,19 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
|
|||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
|
seLinuxMountEnabled bool
|
||||||
originSpec *volume.Spec
|
originSpec *volume.Spec
|
||||||
|
originSELinuxMountContext string
|
||||||
specVolID string
|
specVolID string
|
||||||
volHandle string
|
volHandle string
|
||||||
|
expectedSELinuxContext string
|
||||||
podUID types.UID
|
podUID types.UID
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "construct spec1 from original persistent spec",
|
name: "construct spec1 from original persistent spec",
|
||||||
specVolID: "test.vol.id",
|
specVolID: "test.vol.id",
|
||||||
volHandle: "testvol-handle1",
|
volHandle: "testvol-handle1",
|
||||||
|
|
||||||
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("test.vol.id", 20, testDriver, "testvol-handle1"), true),
|
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("test.vol.id", 20, testDriver, "testvol-handle1"), true),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
},
|
},
|
||||||
@ -336,12 +343,35 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
|
|||||||
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec2", 20, testDriver, "handle2"), true),
|
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec2", 20, testDriver, "handle2"), true),
|
||||||
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "construct SELinux context from original persistent spec when the feature is enabled",
|
||||||
|
seLinuxMountEnabled: true,
|
||||||
|
specVolID: "spec3",
|
||||||
|
volHandle: "handle3",
|
||||||
|
originSELinuxMountContext: "system_u:object_r:container_file_t:s0:c314,c894",
|
||||||
|
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec3", 20, testDriver, "handle3"), true),
|
||||||
|
expectedSELinuxContext: "system_u:object_r:container_file_t:s0:c314,c894",
|
||||||
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "construct empty SELinux from original persistent spec when the feature is disabled",
|
||||||
|
seLinuxMountEnabled: false,
|
||||||
|
specVolID: "spec4",
|
||||||
|
volHandle: "handle4",
|
||||||
|
originSELinuxMountContext: "system_u:object_r:container_file_t:s0:c314,c894",
|
||||||
|
originSpec: volume.NewSpecFromPersistentVolume(makeTestPV("spec4", 20, testDriver, "handle4"), true),
|
||||||
|
expectedSELinuxContext: "", // The context is cleared when the feature gate is off
|
||||||
|
podUID: types.UID(fmt.Sprintf("%08X", rand.Uint64())),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
|
registerFakePlugin(testDriver, "endpoint", []string{"1.0.0"}, t)
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ReadWriteOncePod, tc.seLinuxMountEnabled)()
|
||||||
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.SELinuxMountReadWriteOncePod, tc.seLinuxMountEnabled)()
|
||||||
|
|
||||||
mounter, err := plug.NewMounter(
|
mounter, err := plug.NewMounter(
|
||||||
tc.originSpec,
|
tc.originSpec,
|
||||||
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
&api.Pod{ObjectMeta: meta.ObjectMeta{UID: tc.podUID, Namespace: testns}},
|
||||||
@ -356,7 +386,7 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
|
|||||||
csiMounter := mounter.(*csiMountMgr)
|
csiMounter := mounter.(*csiMountMgr)
|
||||||
|
|
||||||
mountPath := filepath.Dir(csiMounter.GetPath())
|
mountPath := filepath.Dir(csiMounter.GetPath())
|
||||||
err = prepareVolumeInfoFile(mountPath, plug, tc.originSpec.Name(), csiMounter.volumeID, testDriver, string(csiMounter.volumeLifecycleMode))
|
err = prepareVolumeInfoFile(mountPath, plug, tc.originSpec.Name(), csiMounter.volumeID, testDriver, string(csiMounter.volumeLifecycleMode), tc.originSELinuxMountContext)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to save fake volume info file: %s", err)
|
t.Fatalf("failed to save fake volume info file: %s", err)
|
||||||
}
|
}
|
||||||
@ -395,6 +425,10 @@ func TestPluginConstructVolumeSpec(t *testing.T) {
|
|||||||
if rec.Spec.Name() != tc.specVolID {
|
if rec.Spec.Name() != tc.specVolID {
|
||||||
t.Errorf("Unexpected spec name constructed %s", rec.Spec.Name())
|
t.Errorf("Unexpected spec name constructed %s", rec.Spec.Name())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if rec.SELinuxMountContext != tc.expectedSELinuxContext {
|
||||||
|
t.Errorf("Expected SELinux context %q, got %q", tc.expectedSELinuxContext, rec.SELinuxMountContext)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -490,7 +524,7 @@ func TestPluginConstructVolumeSpecWithInline(t *testing.T) {
|
|||||||
csiMounter := mounter.(*csiMountMgr)
|
csiMounter := mounter.(*csiMountMgr)
|
||||||
|
|
||||||
mountPath := filepath.Dir(csiMounter.GetPath())
|
mountPath := filepath.Dir(csiMounter.GetPath())
|
||||||
err = prepareVolumeInfoFile(mountPath, plug, tc.originSpec.Name(), csiMounter.volumeID, testDriver, string(csiMounter.volumeLifecycleMode))
|
err = prepareVolumeInfoFile(mountPath, plug, tc.originSpec.Name(), csiMounter.volumeID, testDriver, string(csiMounter.volumeLifecycleMode), "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to save fake volume info file: %s", err)
|
t.Fatalf("failed to save fake volume info file: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -283,10 +283,25 @@ func (plugin *fcPlugin) ConstructVolumeSpec(volumeName, mountPath string) (volum
|
|||||||
FC: &v1.FCVolumeSource{WWIDs: wwids, Lun: &lun, TargetWWNs: wwns},
|
FC: &v1.FCVolumeSource{WWIDs: wwids, Lun: &lun, TargetWWNs: wwns},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var mountContext string
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
|
||||||
|
kvh, ok := plugin.host.(volume.KubeletVolumeHost)
|
||||||
|
if !ok {
|
||||||
|
return volume.ReconstructedVolume{}, fmt.Errorf("plugin volume host does not implement KubeletVolumeHost interface")
|
||||||
|
}
|
||||||
|
hu := kvh.GetHostUtil()
|
||||||
|
mountContext, err = hu.GetSELinuxMountContext(mountPath)
|
||||||
|
if err != nil {
|
||||||
|
return volume.ReconstructedVolume{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
klog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v, WWIDs: %v",
|
klog.V(5).Infof("ConstructVolumeSpec: TargetWWNs: %v, Lun: %v, WWIDs: %v",
|
||||||
fcVolume.VolumeSource.FC.TargetWWNs, *fcVolume.VolumeSource.FC.Lun, fcVolume.VolumeSource.FC.WWIDs)
|
fcVolume.VolumeSource.FC.TargetWWNs, *fcVolume.VolumeSource.FC.Lun, fcVolume.VolumeSource.FC.WWIDs)
|
||||||
return volume.ReconstructedVolume{
|
return volume.ReconstructedVolume{
|
||||||
Spec: volume.NewSpecFromVolume(fcVolume),
|
Spec: volume.NewSpecFromVolume(fcVolume),
|
||||||
|
SELinuxMountContext: mountContext,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -279,8 +279,23 @@ func (plugin *iscsiPlugin) ConstructVolumeSpec(volumeName, mountPath string) (vo
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var mountContext string
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
|
||||||
|
kvh, ok := plugin.host.(volume.KubeletVolumeHost)
|
||||||
|
if !ok {
|
||||||
|
return volume.ReconstructedVolume{}, fmt.Errorf("plugin volume host does not implement KubeletVolumeHost interface")
|
||||||
|
}
|
||||||
|
hu := kvh.GetHostUtil()
|
||||||
|
mountContext, err = hu.GetSELinuxMountContext(mountPath)
|
||||||
|
if err != nil {
|
||||||
|
return volume.ReconstructedVolume{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return volume.ReconstructedVolume{
|
return volume.ReconstructedVolume{
|
||||||
Spec: volume.NewSpecFromVolume(iscsiVolume),
|
Spec: volume.NewSpecFromVolume(iscsiVolume),
|
||||||
|
SELinuxMountContext: mountContext,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -573,7 +573,11 @@ type VolumeConfig struct {
|
|||||||
// ReconstructedVolume contains information about a volume reconstructed by
|
// ReconstructedVolume contains information about a volume reconstructed by
|
||||||
// ConstructVolumeSpec().
|
// ConstructVolumeSpec().
|
||||||
type ReconstructedVolume struct {
|
type ReconstructedVolume struct {
|
||||||
|
// Spec is the volume spec of a mounted volume
|
||||||
Spec *Spec
|
Spec *Spec
|
||||||
|
// SELinuxMountContext is value of -o context=XYZ mount option.
|
||||||
|
// If empty, no such mount option is used.
|
||||||
|
SELinuxMountContext string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSpecFromVolume creates an Spec from an v1.Volume
|
// NewSpecFromVolume creates an Spec from an v1.Volume
|
||||||
|
@ -430,8 +430,18 @@ func (plugin *rbdPlugin) ConstructVolumeSpec(volumeName, mountPath string) (volu
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var mountContext string
|
||||||
|
if utilfeature.DefaultFeatureGate.Enabled(features.SELinuxMountReadWriteOncePod) {
|
||||||
|
mountContext, err = hu.GetSELinuxMountContext(mountPath)
|
||||||
|
if err != nil {
|
||||||
|
return volume.ReconstructedVolume{}, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return volume.ReconstructedVolume{
|
return volume.ReconstructedVolume{
|
||||||
Spec: volume.NewSpecFromVolume(rbdVolume),
|
Spec: volume.NewSpecFromVolume(rbdVolume),
|
||||||
|
SELinuxMountContext: mountContext,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,3 +116,9 @@ func (hu *FakeHostUtil) GetSELinuxSupport(pathname string) (bool, error) {
|
|||||||
func (hu *FakeHostUtil) GetMode(pathname string) (os.FileMode, error) {
|
func (hu *FakeHostUtil) GetMode(pathname string) (os.FileMode, error) {
|
||||||
return 0, errors.New("not implemented")
|
return 0, errors.New("not implemented")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
|
||||||
|
// given mount point.
|
||||||
|
func (hu *FakeHostUtil) GetSELinuxMountContext(pathname string) (string, error) {
|
||||||
|
return "", errors.New("not implemented")
|
||||||
|
}
|
||||||
|
@ -68,6 +68,9 @@ type HostUtils interface {
|
|||||||
GetSELinuxSupport(pathname string) (bool, error)
|
GetSELinuxSupport(pathname string) (bool, error)
|
||||||
// GetMode returns permissions of the path.
|
// GetMode returns permissions of the path.
|
||||||
GetMode(pathname string) (os.FileMode, error)
|
GetMode(pathname string) (os.FileMode, error)
|
||||||
|
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
|
||||||
|
// given mount point.
|
||||||
|
GetSELinuxMountContext(pathname string) (string, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Compile-time check to ensure all HostUtil implementations satisfy
|
// Compile-time check to ensure all HostUtil implementations satisfy
|
||||||
|
@ -299,3 +299,35 @@ func GetModeLinux(pathname string) (os.FileMode, error) {
|
|||||||
}
|
}
|
||||||
return info.Mode(), nil
|
return info.Mode(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
|
||||||
|
// given mount point.
|
||||||
|
func (hu *HostUtil) GetSELinuxMountContext(pathname string) (string, error) {
|
||||||
|
return getSELinuxMountContext(pathname, procMountInfoPath, selinux.GetEnabled)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getSELinux is common implementation of GetSELinuxSupport on Linux.
|
||||||
|
// Using an extra function for unit tests.
|
||||||
|
func getSELinuxMountContext(path string, mountInfoFilename string, selinuxEnabled seLinuxEnabledFunc) (string, error) {
|
||||||
|
// Skip /proc/mounts parsing if SELinux is disabled.
|
||||||
|
if !selinuxEnabled() {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := findMountInfo(path, mountInfoFilename)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range info.SuperOptions {
|
||||||
|
if !strings.HasPrefix(opt, "context=") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Remove context=
|
||||||
|
context := strings.TrimPrefix(opt, "context=")
|
||||||
|
// Remove double quotes
|
||||||
|
context = strings.Trim(context, "\"")
|
||||||
|
return context, nil
|
||||||
|
}
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
@ -331,3 +331,60 @@ func writeFile(content string) (string, string, error) {
|
|||||||
}
|
}
|
||||||
return tempDir, filename, nil
|
return tempDir, filename, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestGetSELinuxMountContext(t *testing.T) {
|
||||||
|
info :=
|
||||||
|
`840 60 8:0 / /var/lib/kubelet/pods/d4f3b306-ad4c-4f7a-8983-b5b228039a8c/volumes/kubernetes.io~iscsi/mypv rw,relatime shared:421 - ext4 /dev/sda rw,context="system_u:object_r:container_file_t:s0:c314,c894"
|
||||||
|
224 62 253:0 /var/lib/docker/devicemapper/test/shared /var/lib/docker/devicemapper/test/shared rw,relatime master:1 shared:44 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered
|
||||||
|
82 62 0:43 / /var/lib/foo rw,relatime shared:32 - tmpfs tmpfs rw
|
||||||
|
83 63 0:44 / /var/lib/bar rw,relatime - tmpfs tmpfs rw
|
||||||
|
`
|
||||||
|
tempDir, filename, err := writeFile(info)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("cannot create temporary file: %v", err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(tempDir)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
mountPoint string
|
||||||
|
seLinuxEnabled bool
|
||||||
|
expectedContext string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"no context",
|
||||||
|
"/var/lib/foo",
|
||||||
|
true,
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"with context with SELinux",
|
||||||
|
"/var/lib/kubelet/pods/d4f3b306-ad4c-4f7a-8983-b5b228039a8c/volumes/kubernetes.io~iscsi/mypv",
|
||||||
|
true,
|
||||||
|
"system_u:object_r:container_file_t:s0:c314,c894",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"with context with no SELinux",
|
||||||
|
"/var/lib/kubelet/pods/d4f3b306-ad4c-4f7a-8983-b5b228039a8c/volumes/kubernetes.io~iscsi/mypv",
|
||||||
|
false,
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"no context with seclabel",
|
||||||
|
"/var/lib/docker/devicemapper/test/shared",
|
||||||
|
true,
|
||||||
|
"",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
out, err := getSELinuxMountContext(test.mountPoint, filename, func() bool { return test.seLinuxEnabled })
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("Test %s failed with error: %s", test.name, err)
|
||||||
|
}
|
||||||
|
if test.expectedContext != out {
|
||||||
|
t.Errorf("Test %s failed: expected %v, got %v", test.name, test.expectedContext, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
@ -101,3 +101,9 @@ func (hu *HostUtil) GetMode(pathname string) (os.FileMode, error) {
|
|||||||
func getDeviceNameFromMount(mounter mount.Interface, mountPath, pluginMountDir string) (string, error) {
|
func getDeviceNameFromMount(mounter mount.Interface, mountPath, pluginMountDir string) (string, error) {
|
||||||
return "", errUnsupported
|
return "", errUnsupported
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
|
||||||
|
// given mount point.
|
||||||
|
func (hu *HostUtil) GetSELinuxMountContext(pathname string) (string, error) {
|
||||||
|
return "", errUnsupported
|
||||||
|
}
|
||||||
|
@ -123,3 +123,9 @@ func (hu *HostUtil) GetMode(pathname string) (os.FileMode, error) {
|
|||||||
}
|
}
|
||||||
return info.Mode(), nil
|
return info.Mode(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetSELinuxMountContext returns value of -o context=XYZ mount option on
|
||||||
|
// given mount point.
|
||||||
|
func (hu *HostUtil) GetSELinuxMountContext(pathname string) (string, error) {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
@ -112,6 +112,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
tokenRequests []storagev1.TokenRequest
|
tokenRequests []storagev1.TokenRequest
|
||||||
requiresRepublish *bool
|
requiresRepublish *bool
|
||||||
fsGroupPolicy *storagev1.FSGroupPolicy
|
fsGroupPolicy *storagev1.FSGroupPolicy
|
||||||
|
enableSELinuxMount *bool
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockDriverSetup struct {
|
type mockDriverSetup struct {
|
||||||
@ -155,6 +156,7 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
TokenRequests: tp.tokenRequests,
|
TokenRequests: tp.tokenRequests,
|
||||||
RequiresRepublish: tp.requiresRepublish,
|
RequiresRepublish: tp.requiresRepublish,
|
||||||
FSGroupPolicy: tp.fsGroupPolicy,
|
FSGroupPolicy: tp.fsGroupPolicy,
|
||||||
|
EnableSELinuxMount: tp.enableSELinuxMount,
|
||||||
}
|
}
|
||||||
|
|
||||||
// At the moment, only tests which need hooks are
|
// At the moment, only tests which need hooks are
|
||||||
@ -270,7 +272,6 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
DelayBinding: m.tp.lateBinding,
|
DelayBinding: m.tp.lateBinding,
|
||||||
AllowVolumeExpansion: m.tp.enableResizing,
|
AllowVolumeExpansion: m.tp.enableResizing,
|
||||||
}
|
}
|
||||||
|
|
||||||
class, claim, pod := startBusyBoxPod(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, fsGroup)
|
class, claim, pod := startBusyBoxPod(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, fsGroup)
|
||||||
|
|
||||||
if class != nil {
|
if class != nil {
|
||||||
@ -287,6 +288,38 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
return class, claim, pod
|
return class, claim, pod
|
||||||
}
|
}
|
||||||
|
|
||||||
|
createPodWithSELinux := func(accessModes []v1.PersistentVolumeAccessMode, mountOptions []string, seLinuxOpts *v1.SELinuxOptions) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||||
|
ginkgo.By("Creating pod with SELinux context")
|
||||||
|
nodeSelection := m.config.ClientNodeSelection
|
||||||
|
sc := m.driver.GetDynamicProvisionStorageClass(m.config, "")
|
||||||
|
scTest := testsuites.StorageClassTest{
|
||||||
|
Name: m.driver.GetDriverInfo().Name,
|
||||||
|
Provisioner: sc.Provisioner,
|
||||||
|
Parameters: sc.Parameters,
|
||||||
|
ClaimSize: "1Gi",
|
||||||
|
ExpectedSize: "1Gi",
|
||||||
|
DelayBinding: m.tp.lateBinding,
|
||||||
|
AllowVolumeExpansion: m.tp.enableResizing,
|
||||||
|
MountOptions: mountOptions,
|
||||||
|
}
|
||||||
|
class, claim := createClaim(f.ClientSet, scTest, nodeSelection, m.tp.scName, f.Namespace.Name, accessModes)
|
||||||
|
pod, err := startPausePodWithSELinuxOptions(f.ClientSet, claim, nodeSelection, f.Namespace.Name, seLinuxOpts)
|
||||||
|
framework.ExpectNoError(err, "Failed to create pause pod with SELinux context %s: %v", seLinuxOpts, err)
|
||||||
|
|
||||||
|
if class != nil {
|
||||||
|
m.sc[class.Name] = class
|
||||||
|
}
|
||||||
|
if claim != nil {
|
||||||
|
m.pvcs = append(m.pvcs, claim)
|
||||||
|
}
|
||||||
|
|
||||||
|
if pod != nil {
|
||||||
|
m.pods = append(m.pods, pod)
|
||||||
|
}
|
||||||
|
|
||||||
|
return class, claim, pod
|
||||||
|
}
|
||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
cs := f.ClientSet
|
cs := f.ClientSet
|
||||||
var errs []error
|
var errs []error
|
||||||
@ -1978,6 +2011,94 @@ var _ = utils.SIGDescribe("CSI mock volume", func() {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ginkgo.Context("SELinuxMount [LinuxOnly][Feature:SELinuxMountReadWriteOncePod]", func() {
|
||||||
|
// Make sure all options are set so system specific defaults are not used.
|
||||||
|
seLinuxOpts := v1.SELinuxOptions{
|
||||||
|
User: "system_u",
|
||||||
|
Role: "object_r",
|
||||||
|
Type: "container_file_t",
|
||||||
|
Level: "s0:c0,c1",
|
||||||
|
}
|
||||||
|
seLinuxMountOption := "context=\"system_u:object_r:container_file_t:s0:c0,c1\""
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
seLinuxEnabled bool
|
||||||
|
seLinuxSetInPod bool
|
||||||
|
mountOptions []string
|
||||||
|
volumeMode v1.PersistentVolumeAccessMode
|
||||||
|
expectedMountOptions []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "should pass SELinux mount option for RWOP volume and Pod with SELinux context set",
|
||||||
|
seLinuxEnabled: true,
|
||||||
|
seLinuxSetInPod: true,
|
||||||
|
volumeMode: v1.ReadWriteOncePod,
|
||||||
|
expectedMountOptions: []string{seLinuxMountOption},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should add SELinux mount option to existing mount options",
|
||||||
|
seLinuxEnabled: true,
|
||||||
|
seLinuxSetInPod: true,
|
||||||
|
mountOptions: []string{"noexec", "noatime"},
|
||||||
|
volumeMode: v1.ReadWriteOncePod,
|
||||||
|
expectedMountOptions: []string{"noexec", "noatime", seLinuxMountOption},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should not pass SELinux mount option for RWO volume",
|
||||||
|
seLinuxEnabled: true,
|
||||||
|
seLinuxSetInPod: true,
|
||||||
|
volumeMode: v1.ReadWriteOnce,
|
||||||
|
expectedMountOptions: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should not pass SELinux mount option for Pod without SELinux context",
|
||||||
|
seLinuxEnabled: true,
|
||||||
|
seLinuxSetInPod: false,
|
||||||
|
volumeMode: v1.ReadWriteOncePod,
|
||||||
|
expectedMountOptions: nil,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "should not pass SELinux mount option for CSI driver that does not support SELinux mount",
|
||||||
|
seLinuxEnabled: false,
|
||||||
|
seLinuxSetInPod: true,
|
||||||
|
volumeMode: v1.ReadWriteOncePod,
|
||||||
|
expectedMountOptions: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, t := range tests {
|
||||||
|
t := t
|
||||||
|
ginkgo.It(t.name, func() {
|
||||||
|
if framework.NodeOSDistroIs("windows") {
|
||||||
|
e2eskipper.Skipf("SELinuxMount is only applied on linux nodes -- skipping")
|
||||||
|
}
|
||||||
|
var nodeStageMountOpts, nodePublishMountOpts []string
|
||||||
|
init(testParameters{
|
||||||
|
disableAttach: true,
|
||||||
|
registerDriver: true,
|
||||||
|
enableSELinuxMount: &t.seLinuxEnabled,
|
||||||
|
hooks: createSELinuxMountPreHook(&nodeStageMountOpts, &nodePublishMountOpts),
|
||||||
|
})
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
accessModes := []v1.PersistentVolumeAccessMode{t.volumeMode}
|
||||||
|
var podSELinuxOpts *v1.SELinuxOptions
|
||||||
|
if t.seLinuxSetInPod {
|
||||||
|
// Make sure all options are set so system specific defaults are not used.
|
||||||
|
podSELinuxOpts = &seLinuxOpts
|
||||||
|
}
|
||||||
|
|
||||||
|
_, _, pod := createPodWithSELinux(accessModes, t.mountOptions, podSELinuxOpts)
|
||||||
|
err := e2epod.WaitForPodNameRunningInNamespace(m.cs, pod.Name, pod.Namespace)
|
||||||
|
framework.ExpectNoError(err, "failed to start pod")
|
||||||
|
|
||||||
|
framework.ExpectEqual(nodeStageMountOpts, t.expectedMountOptions, "Expect NodeStageVolumeRequest.VolumeCapability.MountVolume. to equal %q; got: %q", t.expectedMountOptions, nodeStageMountOpts)
|
||||||
|
framework.ExpectEqual(nodePublishMountOpts, t.expectedMountOptions, "Expect NodePublishVolumeRequest.VolumeCapability.MountVolume.VolumeMountGroup to equal %q; got: %q", t.expectedMountOptions, nodeStageMountOpts)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
func deleteSnapshot(cs clientset.Interface, config *storageframework.PerTestConfig, snapshot *unstructured.Unstructured) {
|
func deleteSnapshot(cs clientset.Interface, config *storageframework.PerTestConfig, snapshot *unstructured.Unstructured) {
|
||||||
@ -2122,12 +2243,13 @@ func createSC(cs clientset.Interface, t testsuites.StorageClassTest, scName, ns
|
|||||||
return class
|
return class
|
||||||
}
|
}
|
||||||
|
|
||||||
func createClaim(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim) {
|
func createClaim(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, accessModes []v1.PersistentVolumeAccessMode) (*storagev1.StorageClass, *v1.PersistentVolumeClaim) {
|
||||||
class := createSC(cs, t, scName, ns)
|
class := createSC(cs, t, scName, ns)
|
||||||
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
claim := e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
|
||||||
ClaimSize: t.ClaimSize,
|
ClaimSize: t.ClaimSize,
|
||||||
StorageClassName: &(class.Name),
|
StorageClassName: &(class.Name),
|
||||||
VolumeMode: &t.VolumeMode,
|
VolumeMode: &t.VolumeMode,
|
||||||
|
AccessModes: accessModes,
|
||||||
}, ns)
|
}, ns)
|
||||||
claim, err := cs.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{})
|
claim, err := cs.CoreV1().PersistentVolumeClaims(ns).Create(context.TODO(), claim, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err, "Failed to create claim: %v", err)
|
framework.ExpectNoError(err, "Failed to create claim: %v", err)
|
||||||
@ -2141,7 +2263,7 @@ func createClaim(cs clientset.Interface, t testsuites.StorageClassTest, node e2e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||||
class, claim := createClaim(cs, t, node, scName, ns)
|
class, claim := createClaim(cs, t, node, scName, ns, nil)
|
||||||
|
|
||||||
pod, err := startPausePodWithClaim(cs, claim, node, ns)
|
pod, err := startPausePodWithClaim(cs, claim, node, ns)
|
||||||
framework.ExpectNoError(err, "Failed to create pause pod: %v", err)
|
framework.ExpectNoError(err, "Failed to create pause pod: %v", err)
|
||||||
@ -2149,7 +2271,7 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, node e
|
|||||||
}
|
}
|
||||||
|
|
||||||
func startBusyBoxPod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
func startBusyBoxPod(cs clientset.Interface, t testsuites.StorageClassTest, node e2epod.NodeSelection, scName, ns string, fsGroup *int64) (*storagev1.StorageClass, *v1.PersistentVolumeClaim, *v1.Pod) {
|
||||||
class, claim := createClaim(cs, t, node, scName, ns)
|
class, claim := createClaim(cs, t, node, scName, ns, nil)
|
||||||
pod, err := startBusyBoxPodWithClaim(cs, claim, node, ns, fsGroup)
|
pod, err := startBusyBoxPodWithClaim(cs, claim, node, ns, fsGroup)
|
||||||
framework.ExpectNoError(err, "Failed to create busybox pod: %v", err)
|
framework.ExpectNoError(err, "Failed to create busybox pod: %v", err)
|
||||||
return class, claim, pod
|
return class, claim, pod
|
||||||
@ -2276,6 +2398,45 @@ func startBusyBoxPodWithVolumeSource(cs clientset.Interface, volumeSource v1.Vol
|
|||||||
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func startPausePodWithSELinuxOptions(cs clientset.Interface, pvc *v1.PersistentVolumeClaim, node e2epod.NodeSelection, ns string, seLinuxOpts *v1.SELinuxOptions) (*v1.Pod, error) {
|
||||||
|
pod := &v1.Pod{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
GenerateName: "pvc-volume-tester-",
|
||||||
|
},
|
||||||
|
Spec: v1.PodSpec{
|
||||||
|
SecurityContext: &v1.PodSecurityContext{
|
||||||
|
SELinuxOptions: seLinuxOpts,
|
||||||
|
},
|
||||||
|
Containers: []v1.Container{
|
||||||
|
{
|
||||||
|
Name: "volume-tester",
|
||||||
|
Image: imageutils.GetE2EImage(imageutils.Pause),
|
||||||
|
VolumeMounts: []v1.VolumeMount{
|
||||||
|
{
|
||||||
|
Name: "my-volume",
|
||||||
|
MountPath: "/mnt/test",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
RestartPolicy: v1.RestartPolicyNever,
|
||||||
|
Volumes: []v1.Volume{
|
||||||
|
{
|
||||||
|
Name: "my-volume",
|
||||||
|
VolumeSource: v1.VolumeSource{
|
||||||
|
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||||
|
ClaimName: pvc.Name,
|
||||||
|
ReadOnly: false,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
e2epod.SetNodeSelection(&pod.Spec, node)
|
||||||
|
return cs.CoreV1().Pods(ns).Create(context.TODO(), pod, metav1.CreateOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
// checkPodLogs tests that NodePublish was called with expected volume_context and (for ephemeral inline volumes)
|
// checkPodLogs tests that NodePublish was called with expected volume_context and (for ephemeral inline volumes)
|
||||||
// has the matching NodeUnpublish
|
// has the matching NodeUnpublish
|
||||||
func checkPodLogs(getCalls func() ([]drivers.MockCSICall, error), pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled, csiServiceAccountTokenEnabled bool, expectedNumNodePublish int) error {
|
func checkPodLogs(getCalls func() ([]drivers.MockCSICall, error), pod *v1.Pod, expectPodInfo, ephemeralVolume, csiInlineVolumesEnabled, csiServiceAccountTokenEnabled bool, expectedNumNodePublish int) error {
|
||||||
@ -2501,6 +2662,30 @@ func createFSGroupRequestPreHook(nodeStageFsGroup, nodePublishFsGroup *string) *
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// createSELinuxMountPreHook creates a hook that records the mountOptions passed in
|
||||||
|
// through NodeStageVolume and NodePublishVolume calls.
|
||||||
|
func createSELinuxMountPreHook(nodeStageMountOpts, nodePublishMountOpts *[]string) *drivers.Hooks {
|
||||||
|
return &drivers.Hooks{
|
||||||
|
Pre: func(ctx context.Context, fullMethod string, request interface{}) (reply interface{}, err error) {
|
||||||
|
nodeStageRequest, ok := request.(*csipbv1.NodeStageVolumeRequest)
|
||||||
|
if ok {
|
||||||
|
mountVolume := nodeStageRequest.GetVolumeCapability().GetMount()
|
||||||
|
if mountVolume != nil {
|
||||||
|
*nodeStageMountOpts = mountVolume.MountFlags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
nodePublishRequest, ok := request.(*csipbv1.NodePublishVolumeRequest)
|
||||||
|
if ok {
|
||||||
|
mountVolume := nodePublishRequest.GetVolumeCapability().GetMount()
|
||||||
|
if mountVolume != nil {
|
||||||
|
*nodePublishMountOpts = mountVolume.MountFlags
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, nil
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type snapshotMetricsTestConfig struct {
|
type snapshotMetricsTestConfig struct {
|
||||||
// expected values
|
// expected values
|
||||||
metricName string
|
metricName string
|
||||||
|
@ -309,6 +309,7 @@ type mockCSIDriver struct {
|
|||||||
embedded bool
|
embedded bool
|
||||||
calls MockCSICalls
|
calls MockCSICalls
|
||||||
embeddedCSIDriver *mockdriver.CSIDriver
|
embeddedCSIDriver *mockdriver.CSIDriver
|
||||||
|
enableSELinuxMount *bool
|
||||||
|
|
||||||
// Additional values set during PrepareTest
|
// Additional values set during PrepareTest
|
||||||
clientSet clientset.Interface
|
clientSet clientset.Interface
|
||||||
@ -355,6 +356,7 @@ type CSIMockDriverOpts struct {
|
|||||||
TokenRequests []storagev1.TokenRequest
|
TokenRequests []storagev1.TokenRequest
|
||||||
RequiresRepublish *bool
|
RequiresRepublish *bool
|
||||||
FSGroupPolicy *storagev1.FSGroupPolicy
|
FSGroupPolicy *storagev1.FSGroupPolicy
|
||||||
|
EnableSELinuxMount *bool
|
||||||
|
|
||||||
// Embedded defines whether the CSI mock driver runs
|
// Embedded defines whether the CSI mock driver runs
|
||||||
// inside the cluster (false, the default) or just a proxy
|
// inside the cluster (false, the default) or just a proxy
|
||||||
@ -507,6 +509,7 @@ func InitMockCSIDriver(driverOpts CSIMockDriverOpts) MockCSITestDriver {
|
|||||||
requiresRepublish: driverOpts.RequiresRepublish,
|
requiresRepublish: driverOpts.RequiresRepublish,
|
||||||
fsGroupPolicy: driverOpts.FSGroupPolicy,
|
fsGroupPolicy: driverOpts.FSGroupPolicy,
|
||||||
enableVolumeMountGroup: driverOpts.EnableVolumeMountGroup,
|
enableVolumeMountGroup: driverOpts.EnableVolumeMountGroup,
|
||||||
|
enableSELinuxMount: driverOpts.EnableSELinuxMount,
|
||||||
embedded: driverOpts.Embedded,
|
embedded: driverOpts.Embedded,
|
||||||
hooks: driverOpts.Hooks,
|
hooks: driverOpts.Hooks,
|
||||||
}
|
}
|
||||||
@ -657,6 +660,7 @@ func (m *mockCSIDriver) PrepareTest(f *framework.Framework) *storageframework.Pe
|
|||||||
TokenRequests: m.tokenRequests,
|
TokenRequests: m.tokenRequests,
|
||||||
RequiresRepublish: m.requiresRepublish,
|
RequiresRepublish: m.requiresRepublish,
|
||||||
FSGroupPolicy: m.fsGroupPolicy,
|
FSGroupPolicy: m.fsGroupPolicy,
|
||||||
|
SELinuxMount: m.enableSELinuxMount,
|
||||||
}
|
}
|
||||||
cleanup, err := utils.CreateFromManifests(f, m.driverNamespace, func(item interface{}) error {
|
cleanup, err := utils.CreateFromManifests(f, m.driverNamespace, func(item interface{}) error {
|
||||||
if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil {
|
if err := utils.PatchCSIDeployment(config.Framework, o, item); err != nil {
|
||||||
|
@ -18,7 +18,6 @@ package testsuites
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/onsi/ginkgo/v2"
|
"github.com/onsi/ginkgo/v2"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/errors"
|
"k8s.io/apimachinery/pkg/util/errors"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
@ -90,7 +89,7 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
|
|||||||
f := framework.NewFrameworkWithCustomTimeouts("disruptive", storageframework.GetDriverTimeouts(driver))
|
f := framework.NewFrameworkWithCustomTimeouts("disruptive", storageframework.GetDriverTimeouts(driver))
|
||||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||||
|
|
||||||
init := func() {
|
init := func(accessModes []v1.PersistentVolumeAccessMode) {
|
||||||
l = local{}
|
l = local{}
|
||||||
l.ns = f.Namespace
|
l.ns = f.Namespace
|
||||||
l.cs = f.ClientSet
|
l.cs = f.ClientSet
|
||||||
@ -99,7 +98,20 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
|
|||||||
l.config = driver.PrepareTest(f)
|
l.config = driver.PrepareTest(f)
|
||||||
|
|
||||||
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
|
testVolumeSizeRange := s.GetTestSuiteInfo().SupportedSizeRange
|
||||||
l.resource = storageframework.CreateVolumeResource(driver, l.config, pattern, testVolumeSizeRange)
|
if accessModes == nil {
|
||||||
|
l.resource = storageframework.CreateVolumeResource(
|
||||||
|
driver,
|
||||||
|
l.config,
|
||||||
|
pattern,
|
||||||
|
testVolumeSizeRange)
|
||||||
|
} else {
|
||||||
|
l.resource = storageframework.CreateVolumeResourceWithAccessModes(
|
||||||
|
driver,
|
||||||
|
l.config,
|
||||||
|
pattern,
|
||||||
|
testVolumeSizeRange,
|
||||||
|
accessModes)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanup := func() {
|
cleanup := func() {
|
||||||
@ -120,13 +132,13 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
|
|||||||
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
|
framework.ExpectNoError(errors.NewAggregate(errs), "while cleaning up resource")
|
||||||
}
|
}
|
||||||
|
|
||||||
type testBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod)
|
type singlePodTestBody func(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod)
|
||||||
type disruptiveTest struct {
|
type singlePodTest struct {
|
||||||
testItStmt string
|
testItStmt string
|
||||||
runTestFile testBody
|
runTestFile singlePodTestBody
|
||||||
runTestBlock testBody
|
runTestBlock singlePodTestBody
|
||||||
}
|
}
|
||||||
disruptiveTestTable := []disruptiveTest{
|
singlePodTests := []singlePodTest{
|
||||||
{
|
{
|
||||||
testItStmt: "Should test that pv written before kubelet restart is readable after restart.",
|
testItStmt: "Should test that pv written before kubelet restart is readable after restart.",
|
||||||
runTestFile: storageutils.TestKubeletRestartsAndRestoresMount,
|
runTestFile: storageutils.TestKubeletRestartsAndRestoresMount,
|
||||||
@ -144,12 +156,12 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range disruptiveTestTable {
|
for _, test := range singlePodTests {
|
||||||
func(t disruptiveTest) {
|
func(t singlePodTest) {
|
||||||
if (pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil) ||
|
if (pattern.VolMode == v1.PersistentVolumeBlock && t.runTestBlock != nil) ||
|
||||||
(pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil) {
|
(pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil) {
|
||||||
ginkgo.It(t.testItStmt, func() {
|
ginkgo.It(t.testItStmt, func() {
|
||||||
init()
|
init(nil)
|
||||||
defer cleanup()
|
defer cleanup()
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
@ -182,4 +194,80 @@ func (s *disruptiveTestSuite) DefineTests(driver storageframework.TestDriver, pa
|
|||||||
}
|
}
|
||||||
}(test)
|
}(test)
|
||||||
}
|
}
|
||||||
|
type multiplePodTestBody func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod)
|
||||||
|
type multiplePodTest struct {
|
||||||
|
testItStmt string
|
||||||
|
changeSELinuxContexts bool
|
||||||
|
runTestFile multiplePodTestBody
|
||||||
|
}
|
||||||
|
multiplePodTests := []multiplePodTest{
|
||||||
|
{
|
||||||
|
testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod when kubelet returns.",
|
||||||
|
runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
|
||||||
|
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, false, false, pod2)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod when kubelet returns.",
|
||||||
|
runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
|
||||||
|
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, true, false, pod2)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testItStmt: "Should test that pv used in a pod that is deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinuxMountReadWriteOncePod].",
|
||||||
|
changeSELinuxContexts: true,
|
||||||
|
runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
|
||||||
|
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, false, false, pod2)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
testItStmt: "Should test that pv used in a pod that is force deleted while the kubelet is down is usable by a new pod with a different SELinux context when kubelet returns [Feature:SELinuxMountReadWriteOncePod].",
|
||||||
|
changeSELinuxContexts: true,
|
||||||
|
runTestFile: func(c clientset.Interface, f *framework.Framework, pod1, pod2 *v1.Pod) {
|
||||||
|
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, pod1, true, false, pod2)
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range multiplePodTests {
|
||||||
|
func(t multiplePodTest) {
|
||||||
|
if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil {
|
||||||
|
ginkgo.It(t.testItStmt, func() {
|
||||||
|
init([]v1.PersistentVolumeAccessMode{v1.ReadWriteOncePod})
|
||||||
|
defer cleanup()
|
||||||
|
|
||||||
|
var err error
|
||||||
|
var pvcs []*v1.PersistentVolumeClaim
|
||||||
|
var inlineSources []*v1.VolumeSource
|
||||||
|
if pattern.VolType == storageframework.InlineVolume {
|
||||||
|
inlineSources = append(inlineSources, l.resource.VolSource)
|
||||||
|
} else {
|
||||||
|
pvcs = append(pvcs, l.resource.Pvc)
|
||||||
|
}
|
||||||
|
ginkgo.By("Creating a pod with pvc")
|
||||||
|
podConfig := e2epod.Config{
|
||||||
|
NS: l.ns.Name,
|
||||||
|
PVCs: pvcs,
|
||||||
|
InlineVolumeSources: inlineSources,
|
||||||
|
SeLinuxLabel: e2epv.SELinuxLabel,
|
||||||
|
NodeSelection: l.config.ClientNodeSelection,
|
||||||
|
ImageID: e2epod.GetDefaultTestImageID(),
|
||||||
|
}
|
||||||
|
l.pod, err = e2epod.CreateSecPodWithNodeSelection(l.cs, &podConfig, f.Timeouts.PodStart)
|
||||||
|
framework.ExpectNoError(err, "While creating pods for kubelet restart test")
|
||||||
|
if t.changeSELinuxContexts {
|
||||||
|
// Different than e2epv.SELinuxLabel
|
||||||
|
podConfig.SeLinuxLabel = &v1.SELinuxOptions{Level: "s0:c98,c99"}
|
||||||
|
}
|
||||||
|
pod2, err := e2epod.MakeSecPod(&podConfig)
|
||||||
|
// Instantly schedule the second pod on the same node as the first one.
|
||||||
|
pod2.Spec.NodeName = l.pod.Spec.NodeName
|
||||||
|
framework.ExpectNoError(err, "While creating second pod for kubelet restart test")
|
||||||
|
if pattern.VolMode == v1.PersistentVolumeFilesystem && t.runTestFile != nil {
|
||||||
|
t.runTestFile(l.cs, l.config.Framework, l.pod, pod2)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}(test)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -66,6 +66,7 @@ type StorageClassTest struct {
|
|||||||
VolumeMode v1.PersistentVolumeMode
|
VolumeMode v1.PersistentVolumeMode
|
||||||
AllowVolumeExpansion bool
|
AllowVolumeExpansion bool
|
||||||
NodeSelection e2epod.NodeSelection
|
NodeSelection e2epod.NodeSelection
|
||||||
|
MountOptions []string
|
||||||
}
|
}
|
||||||
|
|
||||||
type provisioningTestSuite struct {
|
type provisioningTestSuite struct {
|
||||||
|
@ -1002,7 +1002,7 @@ func testSubpathReconstruction(f *framework.Framework, hostExec storageutils.Hos
|
|||||||
}
|
}
|
||||||
framework.ExpectNotEqual(podNode, nil, "pod node should exist in schedulable nodes")
|
framework.ExpectNotEqual(podNode, nil, "pod node should exist in schedulable nodes")
|
||||||
|
|
||||||
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true)
|
storageutils.TestVolumeUnmountsFromDeletedPodWithForceOption(f.ClientSet, f, pod, forceDelete, true, nil)
|
||||||
|
|
||||||
if podNode != nil {
|
if podNode != nil {
|
||||||
mountPoints := globalMountPointsByNode[podNode.Name]
|
mountPoints := globalMountPointsByNode[podNode.Name]
|
||||||
|
@ -152,6 +152,9 @@ func PatchCSIDeployment(f *e2eframework.Framework, o PatchCSIOptions, object int
|
|||||||
if o.FSGroupPolicy != nil {
|
if o.FSGroupPolicy != nil {
|
||||||
object.Spec.FSGroupPolicy = o.FSGroupPolicy
|
object.Spec.FSGroupPolicy = o.FSGroupPolicy
|
||||||
}
|
}
|
||||||
|
if o.SELinuxMount != nil {
|
||||||
|
object.Spec.SELinuxMount = o.SELinuxMount
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -211,4 +214,8 @@ type PatchCSIOptions struct {
|
|||||||
// field *if* the driver deploys a CSIDriver object. Ignored
|
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||||
// otherwise.
|
// otherwise.
|
||||||
FSGroupPolicy *storagev1.FSGroupPolicy
|
FSGroupPolicy *storagev1.FSGroupPolicy
|
||||||
|
// If not nil, the value to use for the CSIDriver.Spec.SELinuxMount
|
||||||
|
// field *if* the driver deploys a CSIDriver object. Ignored
|
||||||
|
// otherwise.
|
||||||
|
SELinuxMount *bool
|
||||||
}
|
}
|
||||||
|
@ -133,7 +133,8 @@ func TestKubeletRestartsAndRestoresMap(c clientset.Interface, f *framework.Frame
|
|||||||
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
// TestVolumeUnmountsFromDeletedPodWithForceOption tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
||||||
// forceDelete is true indicating whether the pod is forcefully deleted.
|
// forceDelete is true indicating whether the pod is forcefully deleted.
|
||||||
// checkSubpath is true indicating whether the subpath should be checked.
|
// checkSubpath is true indicating whether the subpath should be checked.
|
||||||
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool) {
|
// If secondPod is set, it is started when kubelet is down to check that the volume is usable while the old pod is being deleted and the new pod is starting.
|
||||||
|
func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, forceDelete bool, checkSubpath bool, secondPod *v1.Pod) {
|
||||||
nodeIP, err := getHostAddress(c, clientPod)
|
nodeIP, err := getHostAddress(c, clientPod)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
nodeIP = nodeIP + ":22"
|
nodeIP = nodeIP + ":22"
|
||||||
@ -152,6 +153,12 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
|||||||
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ginkgo.By("Writing to the volume.")
|
||||||
|
path := "/mnt/volume1"
|
||||||
|
byteLen := 64
|
||||||
|
seed := time.Now().UTC().UnixNano()
|
||||||
|
CheckWriteToPath(f, clientPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
|
||||||
|
|
||||||
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
|
// This command is to make sure kubelet is started after test finishes no matter it fails or not.
|
||||||
defer func() {
|
defer func() {
|
||||||
KubeletCommand(KStart, c, clientPod)
|
KubeletCommand(KStart, c, clientPod)
|
||||||
@ -159,6 +166,12 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
|||||||
ginkgo.By("Stopping the kubelet.")
|
ginkgo.By("Stopping the kubelet.")
|
||||||
KubeletCommand(KStop, c, clientPod)
|
KubeletCommand(KStop, c, clientPod)
|
||||||
|
|
||||||
|
if secondPod != nil {
|
||||||
|
ginkgo.By("Starting the second pod")
|
||||||
|
_, err = c.CoreV1().Pods(clientPod.Namespace).Create(context.TODO(), secondPod, metav1.CreateOptions{})
|
||||||
|
framework.ExpectNoError(err, "when starting the second pod")
|
||||||
|
}
|
||||||
|
|
||||||
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
|
ginkgo.By(fmt.Sprintf("Deleting Pod %q", clientPod.Name))
|
||||||
if forceDelete {
|
if forceDelete {
|
||||||
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
|
err = c.CoreV1().Pods(clientPod.Namespace).Delete(context.TODO(), clientPod.Name, *metav1.NewDeleteOptions(0))
|
||||||
@ -180,6 +193,29 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
|||||||
time.Sleep(30 * time.Second)
|
time.Sleep(30 * time.Second)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if secondPod != nil {
|
||||||
|
ginkgo.By("Waiting for the second pod.")
|
||||||
|
err = e2epod.WaitForPodRunningInNamespace(c, secondPod)
|
||||||
|
framework.ExpectNoError(err, "while waiting for the second pod Running")
|
||||||
|
|
||||||
|
ginkgo.By("Getting the second pod uuid.")
|
||||||
|
secondPod, err := c.CoreV1().Pods(secondPod.Namespace).Get(context.TODO(), secondPod.Name, metav1.GetOptions{})
|
||||||
|
framework.ExpectNoError(err, "getting the second UID")
|
||||||
|
|
||||||
|
ginkgo.By("Expecting the volume mount to be found in the second pod.")
|
||||||
|
result, err := e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", secondPod.UID), nodeIP, framework.TestContext.Provider)
|
||||||
|
e2essh.LogResult(result)
|
||||||
|
framework.ExpectNoError(err, "Encountered SSH error when checking the second pod.")
|
||||||
|
framework.ExpectEqual(result.Code, 0, fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
|
||||||
|
|
||||||
|
ginkgo.By("Testing that written file is accessible in the second pod.")
|
||||||
|
CheckReadFromPath(f, secondPod, v1.PersistentVolumeFilesystem, false, path, byteLen, seed)
|
||||||
|
err = c.CoreV1().Pods(secondPod.Namespace).Delete(context.TODO(), secondPod.Name, metav1.DeleteOptions{})
|
||||||
|
framework.ExpectNoError(err, "when deleting the second pod")
|
||||||
|
err = e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, secondPod.Name, f.Namespace.Name, f.Timeouts.PodDelete)
|
||||||
|
framework.ExpectNoError(err, "when waiting for the second pod to disappear")
|
||||||
|
}
|
||||||
|
|
||||||
ginkgo.By("Expecting the volume mount not to be found.")
|
ginkgo.By("Expecting the volume mount not to be found.")
|
||||||
result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
result, err = e2essh.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", clientPod.UID), nodeIP, framework.TestContext.Provider)
|
||||||
e2essh.LogResult(result)
|
e2essh.LogResult(result)
|
||||||
@ -195,16 +231,17 @@ func TestVolumeUnmountsFromDeletedPodWithForceOption(c clientset.Interface, f *f
|
|||||||
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
|
gomega.Expect(result.Stdout).To(gomega.BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
|
||||||
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
|
framework.Logf("Subpath volume unmounted on node %s", clientPod.Spec.NodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
// TestVolumeUnmountsFromDeletedPod tests that a volume unmounts if the client pod was deleted while the kubelet was down.
|
||||||
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
func TestVolumeUnmountsFromDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false)
|
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, false, false, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
|
// TestVolumeUnmountsFromForceDeletedPod tests that a volume unmounts if the client pod was forcefully deleted while the kubelet was down.
|
||||||
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
func TestVolumeUnmountsFromForceDeletedPod(c clientset.Interface, f *framework.Framework, clientPod *v1.Pod) {
|
||||||
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false)
|
TestVolumeUnmountsFromDeletedPodWithForceOption(c, f, clientPod, true, false, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
|
// TestVolumeUnmapsFromDeletedPodWithForceOption tests that a volume unmaps if the client pod was deleted while the kubelet was down.
|
||||||
|
@ -808,7 +808,7 @@ func newStorageClass(t testsuites.StorageClassTest, ns string, prefix string) *s
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
sc := getStorageClass(pluginName, t.Parameters, &bindingMode, ns, prefix)
|
sc := getStorageClass(pluginName, t.Parameters, &bindingMode, t.MountOptions, ns, prefix)
|
||||||
if t.AllowVolumeExpansion {
|
if t.AllowVolumeExpansion {
|
||||||
sc.AllowVolumeExpansion = &t.AllowVolumeExpansion
|
sc.AllowVolumeExpansion = &t.AllowVolumeExpansion
|
||||||
}
|
}
|
||||||
@ -819,6 +819,7 @@ func getStorageClass(
|
|||||||
provisioner string,
|
provisioner string,
|
||||||
parameters map[string]string,
|
parameters map[string]string,
|
||||||
bindingMode *storagev1.VolumeBindingMode,
|
bindingMode *storagev1.VolumeBindingMode,
|
||||||
|
mountOptions []string,
|
||||||
ns string,
|
ns string,
|
||||||
prefix string,
|
prefix string,
|
||||||
) *storagev1.StorageClass {
|
) *storagev1.StorageClass {
|
||||||
@ -837,6 +838,7 @@ func getStorageClass(
|
|||||||
Provisioner: provisioner,
|
Provisioner: provisioner,
|
||||||
Parameters: parameters,
|
Parameters: parameters,
|
||||||
VolumeBindingMode: bindingMode,
|
VolumeBindingMode: bindingMode,
|
||||||
|
MountOptions: mountOptions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user