mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Merge pull request #28980 from kubernetes/revert-28691-pv-gid-squash2
Revert "Remove pod mutation for PVs with supplemental GIDs"
This commit is contained in:
commit
e4265cebbc
@ -54,7 +54,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/cache"
|
"k8s.io/kubernetes/pkg/kubelet/util/cache"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/securitycontext"
|
"k8s.io/kubernetes/pkg/securitycontext"
|
||||||
kubetypes "k8s.io/kubernetes/pkg/types"
|
kubetypes "k8s.io/kubernetes/pkg/types"
|
||||||
@ -139,9 +138,6 @@ type DockerManager struct {
|
|||||||
// Network plugin.
|
// Network plugin.
|
||||||
networkPlugin network.NetworkPlugin
|
networkPlugin network.NetworkPlugin
|
||||||
|
|
||||||
// Kubelet Volume Manager.
|
|
||||||
volumeManager volumemanager.VolumeManager
|
|
||||||
|
|
||||||
// Health check results.
|
// Health check results.
|
||||||
livenessManager proberesults.Manager
|
livenessManager proberesults.Manager
|
||||||
|
|
||||||
@ -214,7 +210,6 @@ func NewDockerManager(
|
|||||||
containerLogsDir string,
|
containerLogsDir string,
|
||||||
osInterface kubecontainer.OSInterface,
|
osInterface kubecontainer.OSInterface,
|
||||||
networkPlugin network.NetworkPlugin,
|
networkPlugin network.NetworkPlugin,
|
||||||
volumeManager volumemanager.VolumeManager,
|
|
||||||
runtimeHelper kubecontainer.RuntimeHelper,
|
runtimeHelper kubecontainer.RuntimeHelper,
|
||||||
httpClient types.HttpGetter,
|
httpClient types.HttpGetter,
|
||||||
execHandler ExecHandler,
|
execHandler ExecHandler,
|
||||||
@ -253,7 +248,6 @@ func NewDockerManager(
|
|||||||
dockerRoot: dockerRoot,
|
dockerRoot: dockerRoot,
|
||||||
containerLogsDir: containerLogsDir,
|
containerLogsDir: containerLogsDir,
|
||||||
networkPlugin: networkPlugin,
|
networkPlugin: networkPlugin,
|
||||||
volumeManager: volumeManager,
|
|
||||||
livenessManager: livenessManager,
|
livenessManager: livenessManager,
|
||||||
runtimeHelper: runtimeHelper,
|
runtimeHelper: runtimeHelper,
|
||||||
execHandler: execHandler,
|
execHandler: execHandler,
|
||||||
@ -696,12 +690,9 @@ func (dm *DockerManager) runContainer(
|
|||||||
|
|
||||||
glog.V(3).Infof("Container %v/%v/%v: setting entrypoint \"%v\" and command \"%v\"", pod.Namespace, pod.Name, container.Name, dockerOpts.Config.Entrypoint, dockerOpts.Config.Cmd)
|
glog.V(3).Infof("Container %v/%v/%v: setting entrypoint \"%v\" and command \"%v\"", pod.Namespace, pod.Name, container.Name, dockerOpts.Config.Entrypoint, dockerOpts.Config.Cmd)
|
||||||
|
|
||||||
// todo: query volume manager for supplemental GIDs
|
|
||||||
supplementalGids := dm.volumeManager.GetExtraSupplementalGroupsForPod(pod)
|
|
||||||
|
|
||||||
securityContextProvider := securitycontext.NewSimpleSecurityContextProvider()
|
securityContextProvider := securitycontext.NewSimpleSecurityContextProvider()
|
||||||
securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config)
|
securityContextProvider.ModifyContainerConfig(pod, container, dockerOpts.Config)
|
||||||
securityContextProvider.ModifyHostConfig(pod, container, dockerOpts.HostConfig, supplementalGids)
|
securityContextProvider.ModifyHostConfig(pod, container, dockerOpts.HostConfig)
|
||||||
createResp, err := dm.client.CreateContainer(dockerOpts)
|
createResp, err := dm.client.CreateContainer(dockerOpts)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToCreateContainer, "Failed to create docker container with error: %v", err)
|
dm.recorder.Eventf(ref, api.EventTypeWarning, kubecontainer.FailedToCreateContainer, "Failed to create docker container with error: %v", err)
|
||||||
|
@ -47,7 +47,6 @@ import (
|
|||||||
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
|
nettest "k8s.io/kubernetes/pkg/kubelet/network/testing"
|
||||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
kubetypes "k8s.io/kubernetes/pkg/types"
|
kubetypes "k8s.io/kubernetes/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/util"
|
"k8s.io/kubernetes/pkg/util"
|
||||||
@ -110,13 +109,7 @@ func createTestDockerManager(fakeHTTPClient *fakeHTTP, fakeDocker *FakeDockerCli
|
|||||||
}
|
}
|
||||||
fakeRecorder := &record.FakeRecorder{}
|
fakeRecorder := &record.FakeRecorder{}
|
||||||
containerRefManager := kubecontainer.NewRefManager()
|
containerRefManager := kubecontainer.NewRefManager()
|
||||||
networkPlugin, _ := network.InitNetworkPlugin(
|
networkPlugin, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, "10.0.0.0/8")
|
||||||
[]network.NetworkPlugin{},
|
|
||||||
"",
|
|
||||||
nettest.NewFakeHost(nil),
|
|
||||||
componentconfig.HairpinNone,
|
|
||||||
"10.0.0.0/8")
|
|
||||||
|
|
||||||
dockerManager := NewFakeDockerManager(
|
dockerManager := NewFakeDockerManager(
|
||||||
fakeDocker,
|
fakeDocker,
|
||||||
fakeRecorder,
|
fakeRecorder,
|
||||||
@ -127,7 +120,6 @@ func createTestDockerManager(fakeHTTPClient *fakeHTTP, fakeDocker *FakeDockerCli
|
|||||||
0, 0, "",
|
0, 0, "",
|
||||||
&containertest.FakeOS{},
|
&containertest.FakeOS{},
|
||||||
networkPlugin,
|
networkPlugin,
|
||||||
volumemanager.NewFakeVolumeManager(),
|
|
||||||
&fakeRuntimeHelper{},
|
&fakeRuntimeHelper{},
|
||||||
fakeHTTPClient,
|
fakeHTTPClient,
|
||||||
flowcontrol.NewBackOff(time.Second, 300*time.Second))
|
flowcontrol.NewBackOff(time.Second, 300*time.Second))
|
||||||
|
@ -653,7 +653,7 @@ func TestFindContainersByPod(t *testing.T) {
|
|||||||
fakeClient := NewFakeDockerClient()
|
fakeClient := NewFakeDockerClient()
|
||||||
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, "10.0.0.0/8")
|
np, _ := network.InitNetworkPlugin([]network.NetworkPlugin{}, "", nettest.NewFakeHost(nil), componentconfig.HairpinNone, "10.0.0.0/8")
|
||||||
// image back-off is set to nil, this test should not pull images
|
// image back-off is set to nil, this test should not pull images
|
||||||
containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, &cadvisorapi.MachineInfo{}, options.GetDefaultPodInfraContainerImage(), 0, 0, "", &containertest.FakeOS{}, np, nil, nil, nil, nil)
|
containerManager := NewFakeDockerManager(fakeClient, &record.FakeRecorder{}, nil, nil, &cadvisorapi.MachineInfo{}, options.GetDefaultPodInfraContainerImage(), 0, 0, "", &containertest.FakeOS{}, np, nil, nil, nil)
|
||||||
for i, test := range tests {
|
for i, test := range tests {
|
||||||
fakeClient.RunningContainerList = test.runningContainerList
|
fakeClient.RunningContainerList = test.runningContainerList
|
||||||
fakeClient.ExitedContainerList = test.exitedContainerList
|
fakeClient.ExitedContainerList = test.exitedContainerList
|
||||||
|
@ -25,7 +25,6 @@ import (
|
|||||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||||
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util/cache"
|
"k8s.io/kubernetes/pkg/kubelet/util/cache"
|
||||||
volumemanager "k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
|
||||||
"k8s.io/kubernetes/pkg/types"
|
"k8s.io/kubernetes/pkg/types"
|
||||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||||
"k8s.io/kubernetes/pkg/util/oom"
|
"k8s.io/kubernetes/pkg/util/oom"
|
||||||
@ -44,7 +43,6 @@ func NewFakeDockerManager(
|
|||||||
containerLogsDir string,
|
containerLogsDir string,
|
||||||
osInterface kubecontainer.OSInterface,
|
osInterface kubecontainer.OSInterface,
|
||||||
networkPlugin network.NetworkPlugin,
|
networkPlugin network.NetworkPlugin,
|
||||||
volumeManager volumemanager.VolumeManager,
|
|
||||||
runtimeHelper kubecontainer.RuntimeHelper,
|
runtimeHelper kubecontainer.RuntimeHelper,
|
||||||
httpClient kubetypes.HttpGetter, imageBackOff *flowcontrol.Backoff) *DockerManager {
|
httpClient kubetypes.HttpGetter, imageBackOff *flowcontrol.Backoff) *DockerManager {
|
||||||
|
|
||||||
@ -52,7 +50,7 @@ func NewFakeDockerManager(
|
|||||||
fakeProcFs := procfs.NewFakeProcFS()
|
fakeProcFs := procfs.NewFakeProcFS()
|
||||||
fakePodGetter := &fakePodGetter{}
|
fakePodGetter := &fakePodGetter{}
|
||||||
dm := NewDockerManager(client, recorder, livenessManager, containerRefManager, fakePodGetter, machineInfo, podInfraContainerImage, qps,
|
dm := NewDockerManager(client, recorder, livenessManager, containerRefManager, fakePodGetter, machineInfo, podInfraContainerImage, qps,
|
||||||
burst, containerLogsDir, osInterface, networkPlugin, volumeManager, runtimeHelper, httpClient, &NativeExecHandler{},
|
burst, containerLogsDir, osInterface, networkPlugin, runtimeHelper, httpClient, &NativeExecHandler{},
|
||||||
fakeOOMAdjuster, fakeProcFs, false, imageBackOff, false, false, true, "/var/lib/kubelet/seccomp")
|
fakeOOMAdjuster, fakeProcFs, false, imageBackOff, false, false, true, "/var/lib/kubelet/seccomp")
|
||||||
dm.dockerPuller = &FakeDockerPuller{}
|
dm.dockerPuller = &FakeDockerPuller{}
|
||||||
|
|
||||||
|
@ -403,20 +403,6 @@ func NewMainKubelet(
|
|||||||
klet.podCache = kubecontainer.NewCache()
|
klet.podCache = kubecontainer.NewCache()
|
||||||
klet.podManager = kubepod.NewBasicPodManager(kubepod.NewBasicMirrorClient(klet.kubeClient))
|
klet.podManager = kubepod.NewBasicPodManager(kubepod.NewBasicMirrorClient(klet.kubeClient))
|
||||||
|
|
||||||
klet.volumePluginMgr, err =
|
|
||||||
NewInitializedVolumePluginMgr(klet, volumePlugins)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
klet.volumeManager, err = volumemanager.NewVolumeManager(
|
|
||||||
enableControllerAttachDetach,
|
|
||||||
hostname,
|
|
||||||
klet.podManager,
|
|
||||||
klet.kubeClient,
|
|
||||||
klet.volumePluginMgr,
|
|
||||||
klet.containerRuntime)
|
|
||||||
|
|
||||||
// Initialize the runtime.
|
// Initialize the runtime.
|
||||||
switch containerRuntime {
|
switch containerRuntime {
|
||||||
case "docker":
|
case "docker":
|
||||||
@ -434,7 +420,6 @@ func NewMainKubelet(
|
|||||||
containerLogsDir,
|
containerLogsDir,
|
||||||
osInterface,
|
osInterface,
|
||||||
klet.networkPlugin,
|
klet.networkPlugin,
|
||||||
klet.volumeManager,
|
|
||||||
klet,
|
klet,
|
||||||
klet.httpClient,
|
klet.httpClient,
|
||||||
dockerExecHandler,
|
dockerExecHandler,
|
||||||
@ -511,6 +496,20 @@ func NewMainKubelet(
|
|||||||
containerRefManager,
|
containerRefManager,
|
||||||
recorder)
|
recorder)
|
||||||
|
|
||||||
|
klet.volumePluginMgr, err =
|
||||||
|
NewInitializedVolumePluginMgr(klet, volumePlugins)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
klet.volumeManager, err = volumemanager.NewVolumeManager(
|
||||||
|
enableControllerAttachDetach,
|
||||||
|
hostname,
|
||||||
|
klet.podManager,
|
||||||
|
klet.kubeClient,
|
||||||
|
klet.volumePluginMgr,
|
||||||
|
klet.containerRuntime)
|
||||||
|
|
||||||
runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime)
|
runtimeCache, err := kubecontainer.NewRuntimeCache(klet.containerRuntime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -1,55 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package volumemanager
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/container"
|
|
||||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewFakeVolumeManager() *FakeVolumeManager {
|
|
||||||
return &FakeVolumeManager{}
|
|
||||||
}
|
|
||||||
|
|
||||||
type FakeVolumeManager struct{}
|
|
||||||
|
|
||||||
func (f *FakeVolumeManager) Run(stopCh <-chan struct{}) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FakeVolumeManager) WaitForAttachAndMount(pod *api.Pod) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FakeVolumeManager) GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap {
|
|
||||||
return container.VolumeMap{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FakeVolumeManager) GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FakeVolumeManager) GetVolumesInUse() []api.UniqueVolumeName {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FakeVolumeManager) MarkVolumesAsReportedInUse(volumesReportedAsInUse []api.UniqueVolumeName) {
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *FakeVolumeManager) VolumeIsAttached(volumeName api.UniqueVolumeName) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
@ -98,10 +98,16 @@ type VolumeManager interface {
|
|||||||
// volumes.
|
// volumes.
|
||||||
GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap
|
GetMountedVolumesForPod(podName types.UniquePodName) container.VolumeMap
|
||||||
|
|
||||||
// GetExtraSupplementalGroupsForPod returns a list of the extra
|
// GetVolumesForPodAndApplySupplementalGroups, like GetVolumesForPod returns
|
||||||
// supplemental groups for the Pod. These extra supplemental groups come
|
// a VolumeMap containing the volumes referenced by the specified pod that
|
||||||
// from annotations on persistent volumes that the pod depends on.
|
// are successfully attached and mounted. The key in the map is the
|
||||||
GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64
|
// OuterVolumeSpecName (i.e. pod.Spec.Volumes[x].Name).
|
||||||
|
// It returns an empty VolumeMap if pod has no volumes.
|
||||||
|
// In addition for every volume that specifies a VolumeGidValue, it appends
|
||||||
|
// the SecurityContext.SupplementalGroups for the specified pod.
|
||||||
|
// XXX: https://github.com/kubernetes/kubernetes/issues/27197 mutating the
|
||||||
|
// pod object is bad, and should be avoided.
|
||||||
|
GetVolumesForPodAndAppendSupplementalGroups(pod *api.Pod) container.VolumeMap
|
||||||
|
|
||||||
// Returns a list of all volumes that implement the volume.Attacher
|
// Returns a list of all volumes that implement the volume.Attacher
|
||||||
// interface and are currently in use according to the actual and desired
|
// interface and are currently in use according to the actual and desired
|
||||||
@ -218,34 +224,12 @@ func (vm *volumeManager) Run(stopCh <-chan struct{}) {
|
|||||||
|
|
||||||
func (vm *volumeManager) GetMountedVolumesForPod(
|
func (vm *volumeManager) GetMountedVolumesForPod(
|
||||||
podName types.UniquePodName) container.VolumeMap {
|
podName types.UniquePodName) container.VolumeMap {
|
||||||
podVolumes := make(container.VolumeMap)
|
return vm.getVolumesForPodHelper(podName, nil /* pod */)
|
||||||
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
|
||||||
podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{Mounter: mountedVolume.Mounter}
|
|
||||||
}
|
|
||||||
return podVolumes
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vm *volumeManager) GetExtraSupplementalGroupsForPod(pod *api.Pod) []int64 {
|
func (vm *volumeManager) GetVolumesForPodAndAppendSupplementalGroups(
|
||||||
podName := volumehelper.GetUniquePodName(pod)
|
pod *api.Pod) container.VolumeMap {
|
||||||
supplementalGroups := sets.NewString()
|
return vm.getVolumesForPodHelper("" /* podName */, pod)
|
||||||
|
|
||||||
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
|
||||||
if mountedVolume.VolumeGidValue != "" {
|
|
||||||
supplementalGroups.Insert(mountedVolume.VolumeGidValue)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
result := make([]int64, 0, supplementalGroups.Len())
|
|
||||||
for _, group := range supplementalGroups.List() {
|
|
||||||
iGroup, extra := getExtraSupplementalGid(group, pod)
|
|
||||||
if !extra {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
result = append(result, int64(iGroup))
|
|
||||||
}
|
|
||||||
|
|
||||||
return result
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (vm *volumeManager) GetVolumesInUse() []api.UniqueVolumeName {
|
func (vm *volumeManager) GetVolumesInUse() []api.UniqueVolumeName {
|
||||||
@ -292,6 +276,33 @@ func (vm *volumeManager) MarkVolumesAsReportedInUse(
|
|||||||
vm.desiredStateOfWorld.MarkVolumesReportedInUse(volumesReportedAsInUse)
|
vm.desiredStateOfWorld.MarkVolumesReportedInUse(volumesReportedAsInUse)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// getVolumesForPodHelper is a helper method implements the common logic for
|
||||||
|
// the GetVolumesForPod methods.
|
||||||
|
// XXX: https://github.com/kubernetes/kubernetes/issues/27197 mutating the pod
|
||||||
|
// object is bad, and should be avoided.
|
||||||
|
func (vm *volumeManager) getVolumesForPodHelper(
|
||||||
|
podName types.UniquePodName, pod *api.Pod) container.VolumeMap {
|
||||||
|
if pod != nil {
|
||||||
|
podName = volumehelper.GetUniquePodName(pod)
|
||||||
|
}
|
||||||
|
podVolumes := make(container.VolumeMap)
|
||||||
|
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
|
||||||
|
podVolumes[mountedVolume.OuterVolumeSpecName] =
|
||||||
|
container.VolumeInfo{Mounter: mountedVolume.Mounter}
|
||||||
|
if pod != nil {
|
||||||
|
err := applyPersistentVolumeAnnotations(
|
||||||
|
mountedVolume.VolumeGidValue, pod)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("applyPersistentVolumeAnnotations failed for pod %q volume %q with: %v",
|
||||||
|
podName,
|
||||||
|
mountedVolume.VolumeName,
|
||||||
|
err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return podVolumes
|
||||||
|
}
|
||||||
|
|
||||||
func (vm *volumeManager) WaitForAttachAndMount(pod *api.Pod) error {
|
func (vm *volumeManager) WaitForAttachAndMount(pod *api.Pod) error {
|
||||||
expectedVolumes := getExpectedVolumes(pod)
|
expectedVolumes := getExpectedVolumes(pod)
|
||||||
if len(expectedVolumes) == 0 {
|
if len(expectedVolumes) == 0 {
|
||||||
@ -381,26 +392,32 @@ func getExpectedVolumes(pod *api.Pod) []string {
|
|||||||
return expectedVolumes
|
return expectedVolumes
|
||||||
}
|
}
|
||||||
|
|
||||||
// getExtraSupplementalGid returns the value of an extra supplemental GID as
|
// applyPersistentVolumeAnnotations appends a pod
|
||||||
// defined by an annotation on a volume and a boolean indicating whether the
|
// SecurityContext.SupplementalGroups if a GID annotation is provided.
|
||||||
// volume defined a GID.
|
// XXX: https://github.com/kubernetes/kubernetes/issues/27197 mutating the pod
|
||||||
func getExtraSupplementalGid(volumeGidValue string, pod *api.Pod) (int64, bool) {
|
// object is bad, and should be avoided.
|
||||||
if volumeGidValue == "" {
|
func applyPersistentVolumeAnnotations(
|
||||||
return 0, false
|
volumeGidValue string, pod *api.Pod) error {
|
||||||
}
|
if volumeGidValue != "" {
|
||||||
|
|
||||||
gid, err := strconv.ParseInt(volumeGidValue, 10, 64)
|
gid, err := strconv.ParseInt(volumeGidValue, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, false
|
return fmt.Errorf(
|
||||||
|
"Invalid value for %s %v",
|
||||||
|
volumehelper.VolumeGidAnnotationKey,
|
||||||
|
err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if pod.Spec.SecurityContext != nil {
|
if pod.Spec.SecurityContext == nil {
|
||||||
|
pod.Spec.SecurityContext = &api.PodSecurityContext{}
|
||||||
|
}
|
||||||
for _, existingGid := range pod.Spec.SecurityContext.SupplementalGroups {
|
for _, existingGid := range pod.Spec.SecurityContext.SupplementalGroups {
|
||||||
if gid == existingGid {
|
if gid == existingGid {
|
||||||
return 0, false
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
pod.Spec.SecurityContext.SupplementalGroups =
|
||||||
|
append(pod.Spec.SecurityContext.SupplementalGroups, gid)
|
||||||
}
|
}
|
||||||
|
|
||||||
return gid, true
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -1,274 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2016 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package volumemanager
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"reflect"
|
|
||||||
"strconv"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
|
||||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
|
||||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/pod"
|
|
||||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
|
||||||
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
|
||||||
utiltesting "k8s.io/kubernetes/pkg/util/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
|
||||||
"k8s.io/kubernetes/pkg/volume/util/types"
|
|
||||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
testHostname = "test-hostname"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGetMountedVolumesForPodAndGetVolumesInUse(t *testing.T) {
|
|
||||||
tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("can't make a temp dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpDir)
|
|
||||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient())
|
|
||||||
|
|
||||||
node, pod, pv, claim := createObjects()
|
|
||||||
kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
|
|
||||||
|
|
||||||
manager, err := newTestVolumeManager(tmpDir, podManager, kubeClient)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to initialize volume manager: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
|
||||||
go manager.Run(stopCh)
|
|
||||||
defer close(stopCh)
|
|
||||||
|
|
||||||
podManager.SetPods([]*api.Pod{pod})
|
|
||||||
|
|
||||||
// Fake node status update
|
|
||||||
go simulateVolumeInUseUpdate(
|
|
||||||
api.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
|
|
||||||
stopCh,
|
|
||||||
manager)
|
|
||||||
|
|
||||||
err = manager.WaitForAttachAndMount(pod)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Expected success: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedMounted := pod.Spec.Volumes[0].Name
|
|
||||||
actualMounted := manager.GetMountedVolumesForPod(types.UniquePodName(pod.ObjectMeta.UID))
|
|
||||||
if _, ok := actualMounted[expectedMounted]; !ok || (len(actualMounted) != 1) {
|
|
||||||
t.Errorf("Expected %v to be mounted to pod but got %v", expectedMounted, actualMounted)
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedInUse := []api.UniqueVolumeName{api.UniqueVolumeName(node.Status.VolumesAttached[0].Name)}
|
|
||||||
actualInUse := manager.GetVolumesInUse()
|
|
||||||
if !reflect.DeepEqual(expectedInUse, actualInUse) {
|
|
||||||
t.Errorf("Expected %v to be in use but got %v", expectedInUse, actualInUse)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestGetExtraSupplementalGroupsForPod(t *testing.T) {
|
|
||||||
tmpDir, err := utiltesting.MkTmpdir("volumeManagerTest")
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("can't make a temp dir: %v", err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpDir)
|
|
||||||
podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient())
|
|
||||||
|
|
||||||
node, pod, _, claim := createObjects()
|
|
||||||
|
|
||||||
existingGid := pod.Spec.SecurityContext.SupplementalGroups[0]
|
|
||||||
|
|
||||||
cases := []struct {
|
|
||||||
gidAnnotation string
|
|
||||||
expected []int64
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
gidAnnotation: "666",
|
|
||||||
expected: []int64{666},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
gidAnnotation: strconv.FormatInt(existingGid, 10),
|
|
||||||
expected: []int64{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
gidAnnotation: "a",
|
|
||||||
expected: []int64{},
|
|
||||||
},
|
|
||||||
{
|
|
||||||
gidAnnotation: "",
|
|
||||||
expected: []int64{},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range cases {
|
|
||||||
pv := &api.PersistentVolume{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Name: "pvA",
|
|
||||||
Annotations: map[string]string{
|
|
||||||
volumehelper.VolumeGidAnnotationKey: tc.gidAnnotation,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: api.PersistentVolumeSpec{
|
|
||||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
|
||||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
|
||||||
PDName: "fake-device",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ClaimRef: &api.ObjectReference{
|
|
||||||
Name: claim.ObjectMeta.Name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
kubeClient := fake.NewSimpleClientset(node, pod, pv, claim)
|
|
||||||
|
|
||||||
manager, err := newTestVolumeManager(tmpDir, podManager, kubeClient)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Failed to initialize volume manager: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
|
||||||
go manager.Run(stopCh)
|
|
||||||
|
|
||||||
podManager.SetPods([]*api.Pod{pod})
|
|
||||||
|
|
||||||
// Fake node status update
|
|
||||||
go simulateVolumeInUseUpdate(
|
|
||||||
api.UniqueVolumeName(node.Status.VolumesAttached[0].Name),
|
|
||||||
stopCh,
|
|
||||||
manager)
|
|
||||||
|
|
||||||
err = manager.WaitForAttachAndMount(pod)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("Expected success: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
actual := manager.GetExtraSupplementalGroupsForPod(pod)
|
|
||||||
if !reflect.DeepEqual(tc.expected, actual) {
|
|
||||||
t.Errorf("Expected supplemental groups %v, got %v", tc.expected, actual)
|
|
||||||
}
|
|
||||||
|
|
||||||
close(stopCh)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func newTestVolumeManager(
|
|
||||||
tmpDir string,
|
|
||||||
podManager pod.Manager,
|
|
||||||
kubeClient internalclientset.Interface) (VolumeManager, error) {
|
|
||||||
plug := &volumetest.FakeVolumePlugin{PluginName: "fake", Host: nil}
|
|
||||||
plugMgr := &volume.VolumePluginMgr{}
|
|
||||||
plugMgr.InitPlugins([]volume.VolumePlugin{plug}, volumetest.NewFakeVolumeHost(tmpDir, kubeClient, nil, "" /* rootContext */))
|
|
||||||
|
|
||||||
vm, err := NewVolumeManager(
|
|
||||||
true,
|
|
||||||
testHostname,
|
|
||||||
podManager,
|
|
||||||
kubeClient,
|
|
||||||
plugMgr,
|
|
||||||
&containertest.FakeRuntime{})
|
|
||||||
return vm, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// createObjects returns objects for making a fake clientset. The pv is
|
|
||||||
// already attached to the node and bound to the claim used by the pod.
|
|
||||||
func createObjects() (*api.Node, *api.Pod, *api.PersistentVolume, *api.PersistentVolumeClaim) {
|
|
||||||
node := &api.Node{
|
|
||||||
ObjectMeta: api.ObjectMeta{Name: testHostname},
|
|
||||||
Status: api.NodeStatus{
|
|
||||||
VolumesAttached: []api.AttachedVolume{
|
|
||||||
{
|
|
||||||
Name: "fake/pvA",
|
|
||||||
DevicePath: "fake/path",
|
|
||||||
},
|
|
||||||
}},
|
|
||||||
Spec: api.NodeSpec{ExternalID: testHostname},
|
|
||||||
}
|
|
||||||
pod := &api.Pod{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Name: "abc",
|
|
||||||
Namespace: "nsA",
|
|
||||||
UID: "1234",
|
|
||||||
},
|
|
||||||
Spec: api.PodSpec{
|
|
||||||
Volumes: []api.Volume{
|
|
||||||
{
|
|
||||||
Name: "vol1",
|
|
||||||
VolumeSource: api.VolumeSource{
|
|
||||||
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
|
|
||||||
ClaimName: "claimA",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
SecurityContext: &api.PodSecurityContext{
|
|
||||||
SupplementalGroups: []int64{555},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
pv := &api.PersistentVolume{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Name: "pvA",
|
|
||||||
},
|
|
||||||
Spec: api.PersistentVolumeSpec{
|
|
||||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
|
||||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
|
||||||
PDName: "fake-device",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
ClaimRef: &api.ObjectReference{
|
|
||||||
Name: "claimA",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
claim := &api.PersistentVolumeClaim{
|
|
||||||
ObjectMeta: api.ObjectMeta{
|
|
||||||
Name: "claimA",
|
|
||||||
Namespace: "nsA",
|
|
||||||
},
|
|
||||||
Spec: api.PersistentVolumeClaimSpec{
|
|
||||||
VolumeName: "pvA",
|
|
||||||
},
|
|
||||||
Status: api.PersistentVolumeClaimStatus{
|
|
||||||
Phase: api.ClaimBound,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return node, pod, pv, claim
|
|
||||||
}
|
|
||||||
|
|
||||||
func simulateVolumeInUseUpdate(
|
|
||||||
volumeName api.UniqueVolumeName,
|
|
||||||
stopCh <-chan struct{},
|
|
||||||
volumeManager VolumeManager) {
|
|
||||||
ticker := time.NewTicker(100 * time.Millisecond)
|
|
||||||
defer ticker.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
volumeManager.MarkVolumesAsReportedInUse(
|
|
||||||
[]api.UniqueVolumeName{volumeName})
|
|
||||||
case <-stopCh:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -41,5 +41,5 @@ type FakeSecurityContextProvider struct{}
|
|||||||
|
|
||||||
func (p FakeSecurityContextProvider) ModifyContainerConfig(pod *api.Pod, container *api.Container, config *dockercontainer.Config) {
|
func (p FakeSecurityContextProvider) ModifyContainerConfig(pod *api.Pod, container *api.Container, config *dockercontainer.Config) {
|
||||||
}
|
}
|
||||||
func (p FakeSecurityContextProvider) ModifyHostConfig(pod *api.Pod, container *api.Container, hostConfig *dockercontainer.HostConfig, supplementalGids []int64) {
|
func (p FakeSecurityContextProvider) ModifyHostConfig(pod *api.Pod, container *api.Container, hostConfig *dockercontainer.HostConfig) {
|
||||||
}
|
}
|
||||||
|
@ -47,12 +47,12 @@ func (p SimpleSecurityContextProvider) ModifyContainerConfig(pod *api.Pod, conta
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ModifyHostConfig is called before the Docker runContainer call. The
|
// ModifyHostConfig is called before the Docker runContainer call.
|
||||||
// security context provider can make changes to the HostConfig, affecting
|
// The security context provider can make changes to the HostConfig, affecting
|
||||||
// security options, whether the container is privileged, volume binds, etc.
|
// security options, whether the container is privileged, volume binds, etc.
|
||||||
func (p SimpleSecurityContextProvider) ModifyHostConfig(pod *api.Pod, container *api.Container, hostConfig *dockercontainer.HostConfig, supplementalGids []int64) {
|
func (p SimpleSecurityContextProvider) ModifyHostConfig(pod *api.Pod, container *api.Container, hostConfig *dockercontainer.HostConfig) {
|
||||||
// Apply supplemental groups
|
// Apply pod security context
|
||||||
if container.Name != leaky.PodInfraContainerName {
|
if container.Name != leaky.PodInfraContainerName && pod.Spec.SecurityContext != nil {
|
||||||
// TODO: We skip application of supplemental groups to the
|
// TODO: We skip application of supplemental groups to the
|
||||||
// infra container to work around a runc issue which
|
// infra container to work around a runc issue which
|
||||||
// requires containers to have the '/etc/group'. For
|
// requires containers to have the '/etc/group'. For
|
||||||
@ -60,17 +60,14 @@ func (p SimpleSecurityContextProvider) ModifyHostConfig(pod *api.Pod, container
|
|||||||
// https://github.com/opencontainers/runc/pull/313
|
// https://github.com/opencontainers/runc/pull/313
|
||||||
// This can be removed once the fix makes it into the
|
// This can be removed once the fix makes it into the
|
||||||
// required version of docker.
|
// required version of docker.
|
||||||
if pod.Spec.SecurityContext != nil {
|
if pod.Spec.SecurityContext.SupplementalGroups != nil {
|
||||||
for _, group := range pod.Spec.SecurityContext.SupplementalGroups {
|
hostConfig.GroupAdd = make([]string, len(pod.Spec.SecurityContext.SupplementalGroups))
|
||||||
hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.Itoa(int(group)))
|
for i, group := range pod.Spec.SecurityContext.SupplementalGroups {
|
||||||
|
hostConfig.GroupAdd[i] = strconv.Itoa(int(group))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, group := range supplementalGids {
|
if pod.Spec.SecurityContext.FSGroup != nil {
|
||||||
hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.Itoa(int(group)))
|
|
||||||
}
|
|
||||||
|
|
||||||
if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.FSGroup != nil {
|
|
||||||
hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.Itoa(int(*pod.Spec.SecurityContext.FSGroup)))
|
hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.Itoa(int(*pod.Spec.SecurityContext.FSGroup)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -166,7 +166,7 @@ func TestModifyHostConfig(t *testing.T) {
|
|||||||
dummyContainer.SecurityContext = tc.sc
|
dummyContainer.SecurityContext = tc.sc
|
||||||
dockerCfg := &dockercontainer.HostConfig{}
|
dockerCfg := &dockercontainer.HostConfig{}
|
||||||
|
|
||||||
provider.ModifyHostConfig(pod, dummyContainer, dockerCfg, nil)
|
provider.ModifyHostConfig(pod, dummyContainer, dockerCfg)
|
||||||
|
|
||||||
if e, a := tc.expected, dockerCfg; !reflect.DeepEqual(e, a) {
|
if e, a := tc.expected, dockerCfg; !reflect.DeepEqual(e, a) {
|
||||||
t.Errorf("%v: unexpected modification of host config\nExpected:\n\n%#v\n\nGot:\n\n%#v", tc.name, e, a)
|
t.Errorf("%v: unexpected modification of host config\nExpected:\n\n%#v\n\nGot:\n\n%#v", tc.name, e, a)
|
||||||
@ -181,32 +181,25 @@ func TestModifyHostConfigPodSecurityContext(t *testing.T) {
|
|||||||
supplementalGroupHC.GroupAdd = []string{"2222"}
|
supplementalGroupHC.GroupAdd = []string{"2222"}
|
||||||
fsGroupHC := fullValidHostConfig()
|
fsGroupHC := fullValidHostConfig()
|
||||||
fsGroupHC.GroupAdd = []string{"1234"}
|
fsGroupHC.GroupAdd = []string{"1234"}
|
||||||
extraSupplementalGroupHC := fullValidHostConfig()
|
|
||||||
extraSupplementalGroupHC.GroupAdd = []string{"1234"}
|
|
||||||
bothHC := fullValidHostConfig()
|
bothHC := fullValidHostConfig()
|
||||||
bothHC.GroupAdd = []string{"2222", "1234"}
|
bothHC.GroupAdd = []string{"2222", "1234"}
|
||||||
fsGroup := int64(1234)
|
fsGroup := int64(1234)
|
||||||
extraSupplementalGroup := []int64{1234}
|
|
||||||
|
|
||||||
testCases := map[string]struct {
|
testCases := map[string]struct {
|
||||||
securityContext *api.PodSecurityContext
|
securityContext *api.PodSecurityContext
|
||||||
expected *dockercontainer.HostConfig
|
expected *dockercontainer.HostConfig
|
||||||
extra []int64
|
|
||||||
}{
|
}{
|
||||||
"nil": {
|
"nil": {
|
||||||
securityContext: nil,
|
securityContext: nil,
|
||||||
expected: fullValidHostConfig(),
|
expected: fullValidHostConfig(),
|
||||||
extra: nil,
|
|
||||||
},
|
},
|
||||||
"SupplementalGroup": {
|
"SupplementalGroup": {
|
||||||
securityContext: supplementalGroupsSC,
|
securityContext: supplementalGroupsSC,
|
||||||
expected: supplementalGroupHC,
|
expected: supplementalGroupHC,
|
||||||
extra: nil,
|
|
||||||
},
|
},
|
||||||
"FSGroup": {
|
"FSGroup": {
|
||||||
securityContext: &api.PodSecurityContext{FSGroup: &fsGroup},
|
securityContext: &api.PodSecurityContext{FSGroup: &fsGroup},
|
||||||
expected: fsGroupHC,
|
expected: fsGroupHC,
|
||||||
extra: nil,
|
|
||||||
},
|
},
|
||||||
"FSGroup + SupplementalGroups": {
|
"FSGroup + SupplementalGroups": {
|
||||||
securityContext: &api.PodSecurityContext{
|
securityContext: &api.PodSecurityContext{
|
||||||
@ -214,17 +207,6 @@ func TestModifyHostConfigPodSecurityContext(t *testing.T) {
|
|||||||
FSGroup: &fsGroup,
|
FSGroup: &fsGroup,
|
||||||
},
|
},
|
||||||
expected: bothHC,
|
expected: bothHC,
|
||||||
extra: nil,
|
|
||||||
},
|
|
||||||
"ExtraSupplementalGroup": {
|
|
||||||
securityContext: nil,
|
|
||||||
expected: extraSupplementalGroupHC,
|
|
||||||
extra: extraSupplementalGroup,
|
|
||||||
},
|
|
||||||
"ExtraSupplementalGroup + SupplementalGroups": {
|
|
||||||
securityContext: supplementalGroupsSC,
|
|
||||||
expected: bothHC,
|
|
||||||
extra: extraSupplementalGroup,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -238,7 +220,7 @@ func TestModifyHostConfigPodSecurityContext(t *testing.T) {
|
|||||||
for k, v := range testCases {
|
for k, v := range testCases {
|
||||||
dummyPod.Spec.SecurityContext = v.securityContext
|
dummyPod.Spec.SecurityContext = v.securityContext
|
||||||
dockerCfg := &dockercontainer.HostConfig{}
|
dockerCfg := &dockercontainer.HostConfig{}
|
||||||
provider.ModifyHostConfig(dummyPod, dummyContainer, dockerCfg, v.extra)
|
provider.ModifyHostConfig(dummyPod, dummyContainer, dockerCfg)
|
||||||
if !reflect.DeepEqual(v.expected, dockerCfg) {
|
if !reflect.DeepEqual(v.expected, dockerCfg) {
|
||||||
t.Errorf("unexpected modification of host config for %s. Expected: %#v Got: %#v", k, v.expected, dockerCfg)
|
t.Errorf("unexpected modification of host config for %s. Expected: %#v Got: %#v", k, v.expected, dockerCfg)
|
||||||
}
|
}
|
||||||
|
@ -33,11 +33,7 @@ type SecurityContextProvider interface {
|
|||||||
// security options, whether the container is privileged, volume binds, etc.
|
// security options, whether the container is privileged, volume binds, etc.
|
||||||
// An error is returned if it's not possible to secure the container as requested
|
// An error is returned if it's not possible to secure the container as requested
|
||||||
// with a security context.
|
// with a security context.
|
||||||
//
|
ModifyHostConfig(pod *api.Pod, container *api.Container, hostConfig *dockercontainer.HostConfig)
|
||||||
// - pod: the pod to modify the docker hostconfig for
|
|
||||||
// - container: the container to modify the hostconfig for
|
|
||||||
// - supplementalGids: additional supplemental GIDs associated with the pod's volumes
|
|
||||||
ModifyHostConfig(pod *api.Pod, container *api.Container, hostConfig *dockercontainer.HostConfig, supplementalGids []int64)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -46,7 +46,7 @@ func dockerRuntime() kubecontainer.Runtime {
|
|||||||
dockerClient,
|
dockerClient,
|
||||||
nil, nil, nil, pm, nil,
|
nil, nil, nil, pm, nil,
|
||||||
"", 0, 0, "",
|
"", 0, 0, "",
|
||||||
nil, nil, nil, nil, nil, nil, nil, nil,
|
nil, nil, nil, nil, nil, nil, nil,
|
||||||
false, nil, true, false, false, "",
|
false, nil, true, false, false, "",
|
||||||
)
|
)
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user