Merge pull request #89193 from zshihang/master

set proper file permission for projected service account volume
This commit is contained in:
Kubernetes Prow Robot 2020-05-04 20:05:07 -07:00 committed by GitHub
commit 3365ed5625
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 510 additions and 72 deletions

View File

@ -32,6 +32,7 @@ go_test(
"//pkg/apis/core:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

View File

@ -17,7 +17,7 @@ limitations under the License.
package securitycontext
import (
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
)
// HasPrivilegedRequest returns the value of SecurityContext.Privileged, taking into account
@ -124,6 +124,25 @@ func DetermineEffectiveSecurityContext(pod *v1.Pod, container *v1.Container) *v1
return effectiveSc
}
// DetermineEffectiveRunAsUser returns a pointer of UID from the provided pod's
// and container's security context and a bool value to indicate if it is absent.
// Container's runAsUser take precedence in cases where both are set.
func DetermineEffectiveRunAsUser(pod *v1.Pod, container *v1.Container) (*int64, bool) {
var runAsUser *int64
if pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.RunAsUser != nil {
runAsUser = new(int64)
*runAsUser = *pod.Spec.SecurityContext.RunAsUser
}
if container.SecurityContext != nil && container.SecurityContext.RunAsUser != nil {
runAsUser = new(int64)
*runAsUser = *container.SecurityContext.RunAsUser
}
if runAsUser == nil {
return nil, false
}
return runAsUser, true
}
func securityContextFromPodSecurityContext(pod *v1.Pod) *v1.SecurityContext {
if pod.Spec.SecurityContext == nil {
return nil

View File

@ -20,7 +20,8 @@ import (
"reflect"
"testing"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
utilptr "k8s.io/utils/pointer"
)
func TestAddNoNewPrivileges(t *testing.T) {
@ -120,3 +121,92 @@ func TestConvertToRuntimeReadonlyPaths(t *testing.T) {
}
}
}
func TestDetermineEffectiveRunAsUser(t *testing.T) {
tests := []struct {
desc string
pod *v1.Pod
container *v1.Container
wantRunAsUser *int64
}{
{
desc: "no securityContext in pod, no securityContext in container",
pod: &v1.Pod{
Spec: v1.PodSpec{},
},
container: &v1.Container{},
wantRunAsUser: nil,
},
{
desc: "no runAsUser in pod, no runAsUser in container",
pod: &v1.Pod{
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
},
},
container: &v1.Container{
SecurityContext: &v1.SecurityContext{},
},
wantRunAsUser: nil,
},
{
desc: "runAsUser in pod, no runAsUser in container",
pod: &v1.Pod{
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
RunAsUser: new(int64),
},
},
},
container: &v1.Container{
SecurityContext: &v1.SecurityContext{},
},
wantRunAsUser: new(int64),
},
{
desc: "no runAsUser in pod, runAsUser in container",
pod: &v1.Pod{
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
},
},
container: &v1.Container{
SecurityContext: &v1.SecurityContext{
RunAsUser: new(int64),
},
},
wantRunAsUser: new(int64),
},
{
desc: "no runAsUser in pod, runAsUser in container",
pod: &v1.Pod{
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{
RunAsUser: new(int64),
},
},
},
container: &v1.Container{
SecurityContext: &v1.SecurityContext{
RunAsUser: utilptr.Int64Ptr(1),
},
},
wantRunAsUser: utilptr.Int64Ptr(1),
},
}
for _, test := range tests {
t.Run(test.desc, func(t *testing.T) {
runAsUser, ok := DetermineEffectiveRunAsUser(test.pod, test.container)
if !ok && test.wantRunAsUser != nil {
t.Errorf("DetermineEffectiveRunAsUser(%v, %v) = %v, want %d", test.pod, test.container, runAsUser, *test.wantRunAsUser)
}
if ok && test.wantRunAsUser == nil {
t.Errorf("DetermineEffectiveRunAsUser(%v, %v) = %d, want %v", test.pod, test.container, *runAsUser, test.wantRunAsUser)
}
if ok && test.wantRunAsUser != nil && *runAsUser != *test.wantRunAsUser {
t.Errorf("DetermineEffectiveRunAsUser(%v, %v) = %d, want %d", test.pod, test.container, *runAsUser, *test.wantRunAsUser)
}
})
}
}

View File

@ -22,11 +22,12 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
"//staging/src/k8s.io/client-go/testing:go_default_library",
"//vendor/github.com/google/go-cmp/cmp:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
],
)

View File

@ -196,7 +196,7 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterA
return err
}
data, err := s.collectData()
data, err := s.collectData(mounterArgs)
if err != nil {
klog.Errorf("Error preparing data for projected volume %v for pod %v/%v: %s", s.volName, s.pod.Namespace, s.pod.Name, err.Error())
return err
@ -248,7 +248,7 @@ func (s *projectedVolumeMounter) SetUpAt(dir string, mounterArgs volume.MounterA
return nil
}
func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjection, error) {
func (s *projectedVolumeMounter) collectData(mounterArgs volume.MounterArgs) (map[string]volumeutil.FileProjection, error) {
if s.source.DefaultMode == nil {
return nil, fmt.Errorf("No defaultMode used, not even the default value for it")
}
@ -328,6 +328,13 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec
}
tp := source.ServiceAccountToken
// When FsGroup is set, we depend on SetVolumeOwnership to
// change from 0600 to 0640.
mode := *s.source.DefaultMode
if mounterArgs.FsUser != nil || mounterArgs.FsGroup != nil {
mode = 0600
}
var auds []string
if len(tp.Audience) != 0 {
auds = []string{tp.Audience}
@ -349,8 +356,9 @@ func (s *projectedVolumeMounter) collectData() (map[string]volumeutil.FileProjec
continue
}
payload[tp.Path] = volumeutil.FileProjection{
Data: []byte(tr.Status.Token),
Mode: 0600,
Data: []byte(tr.Status.Token),
Mode: mode,
FsUser: mounterArgs.FsUser,
}
}
}

View File

@ -25,12 +25,12 @@ import (
"strings"
"testing"
"github.com/google/go-cmp/cmp"
authenticationv1 "k8s.io/api/authentication/v1"
"k8s.io/api/core/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/diff"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/fake"
@ -41,6 +41,7 @@ import (
"k8s.io/kubernetes/pkg/volume/emptydir"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"k8s.io/kubernetes/pkg/volume/util"
utilptr "k8s.io/utils/pointer"
)
func TestCollectDataWithSecret(t *testing.T) {
@ -253,7 +254,7 @@ func TestCollectDataWithSecret(t *testing.T) {
Name: tc.name,
}
source := makeProjection(tc.name, tc.mode, "secret")
source := makeProjection(tc.name, utilptr.Int32Ptr(tc.mode), "secret")
source.Sources[0].Secret.Items = tc.mappings
source.Sources[0].Secret.Optional = &tc.optional
@ -275,7 +276,7 @@ func TestCollectDataWithSecret(t *testing.T) {
pod: pod,
}
actualPayload, err := myVolumeMounter.collectData()
actualPayload, err := myVolumeMounter.collectData(volume.MounterArgs{})
if err != nil && tc.success {
t.Errorf("%v: unexpected failure making payload: %v", tc.name, err)
return
@ -502,7 +503,7 @@ func TestCollectDataWithConfigMap(t *testing.T) {
Name: tc.name,
}
source := makeProjection(tc.name, tc.mode, "configMap")
source := makeProjection(tc.name, utilptr.Int32Ptr(tc.mode), "configMap")
source.Sources[0].ConfigMap.Items = tc.mappings
source.Sources[0].ConfigMap.Optional = &tc.optional
@ -524,7 +525,7 @@ func TestCollectDataWithConfigMap(t *testing.T) {
pod: pod,
}
actualPayload, err := myVolumeMounter.collectData()
actualPayload, err := myVolumeMounter.collectData(volume.MounterArgs{})
if err != nil && tc.success {
t.Errorf("%v: unexpected failure making payload: %v", tc.name, err)
return
@ -676,7 +677,7 @@ func TestCollectDataWithDownwardAPI(t *testing.T) {
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
source := makeProjection("", tc.mode, "downwardAPI")
source := makeProjection("", utilptr.Int32Ptr(tc.mode), "downwardAPI")
source.Sources[0].DownwardAPI.Items = tc.volumeFile
client := fake.NewSimpleClientset(tc.pod)
@ -694,7 +695,7 @@ func TestCollectDataWithDownwardAPI(t *testing.T) {
pod: tc.pod,
}
actualPayload, err := myVolumeMounter.collectData()
actualPayload, err := myVolumeMounter.collectData(volume.MounterArgs{})
if err != nil && tc.success {
t.Errorf("%v: unexpected failure making payload: %v", tc.name, err)
return
@ -721,50 +722,100 @@ func TestCollectDataWithServiceAccountToken(t *testing.T) {
minute := int64(60)
cases := []struct {
name string
svcacct string
audience string
expiration *int64
path string
name string
svcacct string
audience string
defaultMode *int32
fsUser *int64
fsGroup *int64
expiration *int64
path string
payload map[string]util.FileProjection
wantPayload map[string]util.FileProjection
wantErr error
}{
{
name: "test good service account",
audience: "https://example.com",
path: "token",
expiration: &minute,
name: "good service account",
audience: "https://example.com",
defaultMode: utilptr.Int32Ptr(0644),
path: "token",
expiration: &minute,
payload: map[string]util.FileProjection{
"token": {Data: []byte("test_projected_namespace:foo:60:[https://example.com]"), Mode: 0600},
wantPayload: map[string]util.FileProjection{
"token": {Data: []byte("test_projected_namespace:foo:60:[https://example.com]"), Mode: 0644},
},
},
{
name: "test good service account other path",
audience: "https://example.com",
path: "other-token",
expiration: &minute,
payload: map[string]util.FileProjection{
"other-token": {Data: []byte("test_projected_namespace:foo:60:[https://example.com]"), Mode: 0600},
name: "good service account other path",
audience: "https://example.com",
defaultMode: utilptr.Int32Ptr(0644),
path: "other-token",
expiration: &minute,
wantPayload: map[string]util.FileProjection{
"other-token": {Data: []byte("test_projected_namespace:foo:60:[https://example.com]"), Mode: 0644},
},
},
{
name: "test good service account defaults audience",
path: "token",
expiration: &minute,
name: "good service account defaults audience",
defaultMode: utilptr.Int32Ptr(0644),
path: "token",
expiration: &minute,
payload: map[string]util.FileProjection{
"token": {Data: []byte("test_projected_namespace:foo:60:[https://api]"), Mode: 0600},
wantPayload: map[string]util.FileProjection{
"token": {Data: []byte("test_projected_namespace:foo:60:[https://api]"), Mode: 0644},
},
},
{
name: "test good service account defaults expiration",
audience: "https://example.com",
path: "token",
name: "good service account defaults expiration",
defaultMode: utilptr.Int32Ptr(0644),
path: "token",
payload: map[string]util.FileProjection{
"token": {Data: []byte("test_projected_namespace:foo:3600:[https://example.com]"), Mode: 0600},
wantPayload: map[string]util.FileProjection{
"token": {Data: []byte("test_projected_namespace:foo:3600:[https://api]"), Mode: 0644},
},
},
{
name: "no default mode",
path: "token",
wantErr: fmt.Errorf("No defaultMode used, not even the default value for it"),
},
{
name: "fsUser != nil",
defaultMode: utilptr.Int32Ptr(0644),
fsUser: utilptr.Int64Ptr(1000),
path: "token",
wantPayload: map[string]util.FileProjection{
"token": {
Data: []byte("test_projected_namespace:foo:3600:[https://api]"),
Mode: 0600,
FsUser: utilptr.Int64Ptr(1000),
},
},
},
{
name: "fsGroup != nil",
defaultMode: utilptr.Int32Ptr(0644),
fsGroup: utilptr.Int64Ptr(1000),
path: "token",
wantPayload: map[string]util.FileProjection{
"token": {
Data: []byte("test_projected_namespace:foo:3600:[https://api]"),
Mode: 0600,
},
},
},
{
name: "fsUser != nil && fsGroup != nil",
defaultMode: utilptr.Int32Ptr(0644),
fsGroup: utilptr.Int64Ptr(1000),
fsUser: utilptr.Int64Ptr(1000),
path: "token",
wantPayload: map[string]util.FileProjection{
"token": {
Data: []byte("test_projected_namespace:foo:3600:[https://api]"),
Mode: 0600,
FsUser: utilptr.Int64Ptr(1000),
},
},
},
}
@ -772,7 +823,7 @@ func TestCollectDataWithServiceAccountToken(t *testing.T) {
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
testNamespace := "test_projected_namespace"
source := makeProjection(tc.name, 0600, "serviceAccountToken")
source := makeProjection(tc.name, tc.defaultMode, "serviceAccountToken")
source.Sources[0].ServiceAccountToken.Audience = tc.audience
source.Sources[0].ServiceAccountToken.ExpirationSeconds = tc.expiration
source.Sources[0].ServiceAccountToken.Path = tc.path
@ -811,12 +862,12 @@ func TestCollectDataWithServiceAccountToken(t *testing.T) {
pod: pod,
}
actualPayload, err := myVolumeMounter.collectData()
if err != nil {
t.Fatalf("unexpected failure making payload: %v", err)
gotPayload, err := myVolumeMounter.collectData(volume.MounterArgs{FsUser: tc.fsUser, FsGroup: tc.fsGroup})
if err != nil && (tc.wantErr == nil || tc.wantErr.Error() != err.Error()) {
t.Fatalf("collectData() = unexpected err: %v", err)
}
if e, a := tc.payload, actualPayload; !reflect.DeepEqual(e, a) {
t.Errorf("expected and actual payload do not match:\n%s", diff.ObjectReflectDiff(e, a))
if diff := cmp.Diff(tc.wantPayload, gotPayload); diff != "" {
t.Errorf("collectData() = unexpected diff (-want +got):\n%s", diff)
}
})
}
@ -1184,7 +1235,7 @@ func makeVolumeSpec(volumeName, name string, defaultMode int32) *v1.Volume {
return &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Projected: makeProjection(name, defaultMode, "secret"),
Projected: makeProjection(name, utilptr.Int32Ptr(defaultMode), "secret"),
},
}
}
@ -1203,7 +1254,7 @@ func makeSecret(namespace, name string) v1.Secret {
}
}
func makeProjection(name string, defaultMode int32, kind string) *v1.ProjectedVolumeSource {
func makeProjection(name string, defaultMode *int32, kind string) *v1.ProjectedVolumeSource {
var item v1.VolumeProjection
switch kind {
@ -1231,7 +1282,7 @@ func makeProjection(name string, defaultMode int32, kind string) *v1.ProjectedVo
return &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{item},
DefaultMode: &defaultMode,
DefaultMode: defaultMode,
}
}

View File

@ -22,6 +22,7 @@ go_library(
"//pkg/api/legacyscheme:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/securitycontext:go_default_library",
"//pkg/util/resizefs:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util/types:go_default_library",
@ -71,6 +72,7 @@ go_test(
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
"//vendor/k8s.io/utils/pointer:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"//staging/src/k8s.io/client-go/util/testing:go_default_library",

View File

@ -63,8 +63,9 @@ type AtomicWriter struct {
// FileProjection contains file Data and access Mode
type FileProjection struct {
Data []byte
Mode int32
Data []byte
Mode int32
FsUser *int64
}
// NewAtomicWriter creates a new AtomicWriter configured to write to the given
@ -378,14 +379,12 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir
fullPath := filepath.Join(dir, userVisiblePath)
baseDir, _ := filepath.Split(fullPath)
err := os.MkdirAll(baseDir, os.ModePerm)
if err != nil {
if err := os.MkdirAll(baseDir, os.ModePerm); err != nil {
klog.Errorf("%s: unable to create directory %s: %v", w.logContext, baseDir, err)
return err
}
err = ioutil.WriteFile(fullPath, content, mode)
if err != nil {
if err := ioutil.WriteFile(fullPath, content, mode); err != nil {
klog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
return err
}
@ -393,9 +392,17 @@ func (w *AtomicWriter) writePayloadToDir(payload map[string]FileProjection, dir
// open(2) to create the file, so the final mode used is "mode &
// ~umask". But we want to make sure the specified mode is used
// in the file no matter what the umask is.
err = os.Chmod(fullPath, mode)
if err != nil {
klog.Errorf("%s: unable to write file %s with mode %v: %v", w.logContext, fullPath, mode, err)
if err := os.Chmod(fullPath, mode); err != nil {
klog.Errorf("%s: unable to change file %s with mode %v: %v", w.logContext, fullPath, mode, err)
return err
}
if fileProjection.FsUser == nil {
continue
}
if err := os.Chown(fullPath, int(*fileProjection.FsUser), -1); err != nil {
klog.Errorf("%s: unable to change file %s with owner %v: %v", w.logContext, fullPath, int(*fileProjection.FsUser), err)
return err
}
}

View File

@ -627,6 +627,7 @@ func (og *operationGenerator) GenerateMountVolumeFunc(
// Execute mount
mountErr := volumeMounter.SetUp(volume.MounterArgs{
FsUser: ioutil.FsUserFrom(volumeToMount.Pod),
FsGroup: fsGroup,
DesiredSize: volumeToMount.DesiredSizeLimit,
FSGroupChangePolicy: fsGroupChangePolicy,

View File

@ -43,6 +43,7 @@ import (
"k8s.io/kubernetes/pkg/api/legacyscheme"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/securitycontext"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/types"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
@ -608,6 +609,27 @@ func GetPodVolumeNames(pod *v1.Pod) (mounts sets.String, devices sets.String) {
return
}
// FsUserFrom returns FsUser of pod, which is determined by the runAsUser
// attributes.
func FsUserFrom(pod *v1.Pod) *int64 {
var fsUser *int64
// Exclude ephemeral containers because SecurityContext is not allowed.
podutil.VisitContainers(&pod.Spec, podutil.InitContainers|podutil.Containers, func(container *v1.Container, containerType podutil.ContainerType) bool {
runAsUser, ok := securitycontext.DetermineEffectiveRunAsUser(pod, container)
// One container doesn't specify user or there are more than one
// non-root UIDs.
if !ok || (fsUser != nil && *fsUser != *runAsUser) {
fsUser = nil
return false
}
if fsUser == nil {
fsUser = runAsUser
}
return true
})
return fsUser
}
// HasMountRefs checks if the given mountPath has mountRefs.
// TODO: this is a workaround for the unmount device issue caused by gci mounter.
// In GCI cluster, if gci mounter is used for mounting, the container started by mounter

View File

@ -19,25 +19,22 @@ package util
import (
"io/ioutil"
"os"
"reflect"
"runtime"
"strings"
"testing"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
_ "k8s.io/kubernetes/pkg/apis/core/install"
"k8s.io/kubernetes/pkg/features"
"reflect"
"strings"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/util/slice"
"k8s.io/kubernetes/pkg/volume"
utilptr "k8s.io/utils/pointer"
)
var nodeLabels = map[string]string{
@ -344,6 +341,119 @@ func TestCalculateTimeoutForVolume(t *testing.T) {
}
}
func TestFsUserFrom(t *testing.T) {
tests := []struct {
desc string
pod *v1.Pod
wantFsUser *int64
}{
{
desc: "no runAsUser specified",
pod: &v1.Pod{
Spec: v1.PodSpec{},
},
wantFsUser: nil,
},
{
desc: "some have runAsUser specified",
pod: &v1.Pod{
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
InitContainers: []v1.Container{
{
SecurityContext: &v1.SecurityContext{
RunAsUser: utilptr.Int64Ptr(1000),
},
},
},
Containers: []v1.Container{
{
SecurityContext: &v1.SecurityContext{
RunAsUser: utilptr.Int64Ptr(1000),
},
},
{
SecurityContext: &v1.SecurityContext{},
},
},
},
},
wantFsUser: nil,
},
{
desc: "all have runAsUser specified but not the same",
pod: &v1.Pod{
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
InitContainers: []v1.Container{
{
SecurityContext: &v1.SecurityContext{
RunAsUser: utilptr.Int64Ptr(999),
},
},
},
Containers: []v1.Container{
{
SecurityContext: &v1.SecurityContext{
RunAsUser: utilptr.Int64Ptr(1000),
},
},
{
SecurityContext: &v1.SecurityContext{
RunAsUser: utilptr.Int64Ptr(1000),
},
},
},
},
},
wantFsUser: nil,
},
{
desc: "all have runAsUser specified and the same",
pod: &v1.Pod{
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
InitContainers: []v1.Container{
{
SecurityContext: &v1.SecurityContext{
RunAsUser: utilptr.Int64Ptr(1000),
},
},
},
Containers: []v1.Container{
{
SecurityContext: &v1.SecurityContext{
RunAsUser: utilptr.Int64Ptr(1000),
},
},
{
SecurityContext: &v1.SecurityContext{
RunAsUser: utilptr.Int64Ptr(1000),
},
},
},
},
},
wantFsUser: utilptr.Int64Ptr(1000),
},
}
for _, test := range tests {
t.Run(test.desc, func(t *testing.T) {
fsUser := FsUserFrom(test.pod)
if fsUser == nil && test.wantFsUser != nil {
t.Errorf("FsUserFrom(%v) = %v, want %d", test.pod, fsUser, *test.wantFsUser)
}
if fsUser != nil && test.wantFsUser == nil {
t.Errorf("FsUserFrom(%v) = %d, want %v", test.pod, *fsUser, test.wantFsUser)
}
if fsUser != nil && test.wantFsUser != nil && *fsUser != *test.wantFsUser {
t.Errorf("FsUserFrom(%v) = %d, want %d", test.pod, *fsUser, *test.wantFsUser)
}
})
}
}
func TestGenerateVolumeName(t *testing.T) {
// Normal operation, no truncate

View File

@ -103,6 +103,10 @@ type Attributes struct {
// MounterArgs provides more easily extensible arguments to Mounter
type MounterArgs struct {
// When FsUser is set, the ownership of the volume will be modified to be
// owned and writable by FsUser. Otherwise, there is no side effects.
// Currently only supported with projected service account tokens.
FsUser *int64
FsGroup *int64
FSGroupChangePolicy *v1.PodFSGroupChangePolicy
DesiredSize *resource.Quantity
@ -126,7 +130,7 @@ type Mounter interface {
// SetUp prepares and mounts/unpacks the volume to a
// self-determined directory path. The mount point and its
// content should be owned by 'fsGroup' so that it can be
// content should be owned by `fsUser` or 'fsGroup' so that it can be
// accessed by the pod. This may be called more than once, so
// implementations must be idempotent.
// It could return following types of errors:
@ -137,7 +141,7 @@ type Mounter interface {
// SetUpAt prepares and mounts/unpacks the volume to the
// specified directory path, which may or may not exist yet.
// The mount point and its content should be owned by
// The mount point and its content should be owned by `fsUser`
// 'fsGroup' so that it can be accessed by the pod. This may
// be called more than once, so implementations must be
// idempotent.

View File

@ -39,6 +39,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
imageutils "k8s.io/kubernetes/test/utils/image"
"github.com/onsi/ginkgo"
@ -417,6 +418,127 @@ var _ = SIGDescribe("ServiceAccounts", func() {
}
})
/*
Testname: Projected service account token file ownership and permission.
Description: Ensure that Projected Service Account Token is mounted with
correct file ownership and permissino mounted. We test the
following scenarios here.
1. RunAsUser is set,
2. FsGroup is set,
3. RunAsUser and FsGroup are set,
4. Default, neither RunAsUser nor FsGroup is set,
Containers MUST verify that the projected service account token can be
read and has correct file mode set including ownership and permission.
*/
ginkgo.It("should set ownership and permission when RunAsUser or FsGroup is present [LinuxOnly] [NodeFeature:FSGroup] [Feature:TokenRequestProjection]", func() {
e2eskipper.SkipIfNodeOSDistroIs("windows")
var (
podName = "test-pod-" + string(uuid.NewUUID())
containerName = "test-container"
volumeName = "test-volume"
volumeMountPath = "/test-volume"
tokenVolumePath = "/test-volume/token"
int64p = func(i int64) *int64 { return &i }
)
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{
ServiceAccountToken: &v1.ServiceAccountTokenProjection{
Path: "token",
ExpirationSeconds: int64p(60 * 60),
},
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: containerName,
Image: imageutils.GetE2EImage(imageutils.Agnhost),
Args: []string{
"mounttest",
fmt.Sprintf("--file_perm=%v", tokenVolumePath),
fmt.Sprintf("--file_owner=%v", tokenVolumePath),
fmt.Sprintf("--file_content=%v", tokenVolumePath),
},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
testcases := []struct {
runAsUser bool
fsGroup bool
wantPerm string
wantUID int64
wantGID int64
}{
{
runAsUser: true,
wantPerm: "-rw-------",
wantUID: 1000,
wantGID: 0,
},
{
fsGroup: true,
wantPerm: "-rw-r-----",
wantUID: 0,
wantGID: 10000,
},
{
runAsUser: true,
fsGroup: true,
wantPerm: "-rw-r-----",
wantUID: 1000,
wantGID: 10000,
},
{
wantPerm: "-rw-r--r--",
wantUID: 0,
wantGID: 0,
},
}
for _, tc := range testcases {
pod.Spec.SecurityContext = &v1.PodSecurityContext{}
if tc.runAsUser {
pod.Spec.SecurityContext.RunAsUser = &tc.wantUID
}
if tc.fsGroup {
pod.Spec.SecurityContext.FSGroup = &tc.wantGID
}
output := []string{
fmt.Sprintf("perms of file \"%v\": %s", tokenVolumePath, tc.wantPerm),
fmt.Sprintf("content of file \"%v\": %s", tokenVolumePath, ".+"),
fmt.Sprintf("owner UID of \"%v\": %d", tokenVolumePath, tc.wantUID),
fmt.Sprintf("owner GID of \"%v\": %d", tokenVolumePath, tc.wantGID),
}
f.TestContainerOutputRegexp("service account token: ", pod, 0, output)
}
})
ginkgo.It("should support InClusterConfig with token rotation [Slow] [Feature:TokenRequestProjection]", func() {
cfg, err := framework.LoadConfig()
framework.ExpectNoError(err)