mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #130210 from everpeace/KEP-3619-promote-SupplementalGroupsPolicy-to-Beta
KEP-3619: Promote SupplementalGroupsPolicy feature to Beta
This commit is contained in:
commit
451d032915
@ -327,6 +327,18 @@ func SetContainerStatuses(containerStatuses ...api.ContainerStatus) TweakPodStat
|
||||
}
|
||||
}
|
||||
|
||||
func SetInitContainerStatuses(containerStatuses ...api.ContainerStatus) TweakPodStatus {
|
||||
return func(podstatus *api.PodStatus) {
|
||||
podstatus.InitContainerStatuses = containerStatuses
|
||||
}
|
||||
}
|
||||
|
||||
func SetEphemeralContainerStatuses(containerStatuses ...api.ContainerStatus) TweakPodStatus {
|
||||
return func(podstatus *api.PodStatus) {
|
||||
podstatus.EphemeralContainerStatuses = containerStatuses
|
||||
}
|
||||
}
|
||||
|
||||
func MakeContainerStatus(name string, allocatedResources api.ResourceList) api.ContainerStatus {
|
||||
cs := api.ContainerStatus{
|
||||
Name: name,
|
||||
|
@ -8624,10 +8624,10 @@ func validateContainerStatusUsers(containerStatuses []core.ContainerStatus, fldP
|
||||
switch osName {
|
||||
case core.Windows:
|
||||
if containerUser.Linux != nil {
|
||||
allErrors = append(allErrors, field.Forbidden(fldPath.Index(i).Child("linux"), "cannot be set for a windows pod"))
|
||||
allErrors = append(allErrors, field.Forbidden(fldPath.Index(i).Child("user").Child("linux"), "cannot be set for a windows pod"))
|
||||
}
|
||||
case core.Linux:
|
||||
allErrors = append(allErrors, validateLinuxContainerUser(containerUser.Linux, fldPath.Index(i).Child("linux"))...)
|
||||
allErrors = append(allErrors, validateLinuxContainerUser(containerUser.Linux, fldPath.Index(i).Child("user").Child("linux"))...)
|
||||
}
|
||||
}
|
||||
return allErrors
|
||||
|
@ -8279,6 +8279,8 @@ func TestValidateEphemeralContainers(t *testing.T) {
|
||||
func TestValidateWindowsPodSecurityContext(t *testing.T) {
|
||||
validWindowsSC := &core.PodSecurityContext{WindowsOptions: &core.WindowsSecurityContextOptions{RunAsUserName: ptr.To("dummy")}}
|
||||
invalidWindowsSC := &core.PodSecurityContext{SELinuxOptions: &core.SELinuxOptions{Role: "dummyRole"}}
|
||||
invalidWindowsSC2 := &core.PodSecurityContext{SupplementalGroupsPolicy: ptr.To(core.SupplementalGroupsPolicyStrict)}
|
||||
|
||||
cases := map[string]struct {
|
||||
podSec *core.PodSpec
|
||||
expectErr bool
|
||||
@ -8289,12 +8291,18 @@ func TestValidateWindowsPodSecurityContext(t *testing.T) {
|
||||
podSec: &core.PodSpec{SecurityContext: validWindowsSC},
|
||||
expectErr: false,
|
||||
},
|
||||
"invalid SC, windows, error": {
|
||||
"invalid SC(by SELinuxOptions), windows, error": {
|
||||
podSec: &core.PodSpec{SecurityContext: invalidWindowsSC},
|
||||
errorType: "FieldValueForbidden",
|
||||
errorDetail: "cannot be set for a windows pod",
|
||||
expectErr: true,
|
||||
},
|
||||
"invalid SC(by SupplementalGroupsPolicy), windows, error": {
|
||||
podSec: &core.PodSpec{SecurityContext: invalidWindowsSC2},
|
||||
errorType: "FieldValueForbidden",
|
||||
errorDetail: "cannot be set for a windows pod",
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
for k, v := range cases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
@ -10097,6 +10105,9 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
goodfsGroupChangePolicy := core.FSGroupChangeAlways
|
||||
badfsGroupChangePolicy1 := core.PodFSGroupChangePolicy("invalid")
|
||||
badfsGroupChangePolicy2 := core.PodFSGroupChangePolicy("")
|
||||
goodSupplementalGroupsPolicy := core.SupplementalGroupsPolicyStrict
|
||||
badSupplementalGroupsPolicy1 := core.SupplementalGroupsPolicy("invalid")
|
||||
badSupplementalGroupsPolicy2 := core.SupplementalGroupsPolicy("")
|
||||
|
||||
successCases := map[string]*core.Pod{
|
||||
"populate basic fields, leave defaults for most": podtest.MakePod(""),
|
||||
@ -10205,6 +10216,11 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
core.ContainerResizePolicy{ResourceName: "cpu", RestartPolicy: "NotRequired"}),
|
||||
)),
|
||||
),
|
||||
"populate SupplementalGroupsPolicy": podtest.MakePod("",
|
||||
podtest.SetSecurityContext(&core.PodSecurityContext{
|
||||
SupplementalGroupsPolicy: &goodSupplementalGroupsPolicy,
|
||||
}),
|
||||
),
|
||||
}
|
||||
for k, v := range successCases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
@ -10344,6 +10360,16 @@ func TestValidatePodSpec(t *testing.T) {
|
||||
FSGroupChangePolicy: &badfsGroupChangePolicy1,
|
||||
}),
|
||||
),
|
||||
"bad empty SupplementalGroupsPolicy": *podtest.MakePod("",
|
||||
podtest.SetSecurityContext(&core.PodSecurityContext{
|
||||
SupplementalGroupsPolicy: &badSupplementalGroupsPolicy2,
|
||||
}),
|
||||
),
|
||||
"bad invalid SupplementalGroupsPolicy": *podtest.MakePod("",
|
||||
podtest.SetSecurityContext(&core.PodSecurityContext{
|
||||
SupplementalGroupsPolicy: &badSupplementalGroupsPolicy1,
|
||||
}),
|
||||
),
|
||||
}
|
||||
for k, v := range failureCases {
|
||||
t.Run(k, func(t *testing.T) {
|
||||
@ -14745,6 +14771,253 @@ func TestValidatePodStatusUpdate(t *testing.T) {
|
||||
),
|
||||
"",
|
||||
"qosClass no change",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(core.ContainerStatus{}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo"),
|
||||
"",
|
||||
"nil containerUser in containerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(core.ContainerStatus{
|
||||
User: &core.ContainerUser{},
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo"),
|
||||
"",
|
||||
"empty containerUser in containerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(core.ContainerStatus{
|
||||
User: &core.ContainerUser{
|
||||
Linux: &core.LinuxContainerUser{
|
||||
UID: 0,
|
||||
GID: 0,
|
||||
SupplementalGroups: []int64{0, 100},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo"),
|
||||
"",
|
||||
"containerUser with valid ids in containerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(core.ContainerStatus{
|
||||
User: &core.ContainerUser{
|
||||
Linux: &core.LinuxContainerUser{
|
||||
UID: -1,
|
||||
GID: -1,
|
||||
SupplementalGroups: []int64{-1},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo"),
|
||||
`status.containerStatuses[0].user.linux.uid: Invalid value: -1: must be between 0 and 2147483647, inclusive` +
|
||||
`, status.containerStatuses[0].user.linux.gid: Invalid value: -1: must be between 0 and 2147483647, inclusive` +
|
||||
`, status.containerStatuses[0].user.linux.supplementalGroups[0]: Invalid value: -1: must be between 0 and 2147483647, inclusive`,
|
||||
"containerUser with invalid uids/gids/supplementalGroups in containerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetOS(core.Windows),
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetContainerStatuses(core.ContainerStatus{
|
||||
User: &core.ContainerUser{
|
||||
Linux: &core.LinuxContainerUser{},
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetOS(core.Windows),
|
||||
),
|
||||
`status.containerStatuses[0].user.linux: Forbidden: cannot be set for a windows pod`,
|
||||
"containerUser cannot be set for windows pod in containerStatuses",
|
||||
}, {
|
||||
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetInitContainerStatuses(core.ContainerStatus{}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo"),
|
||||
"",
|
||||
"nil containerUser in initContainerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetInitContainerStatuses(core.ContainerStatus{
|
||||
User: &core.ContainerUser{},
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo"),
|
||||
"",
|
||||
"empty containerUser in initContainerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetInitContainerStatuses(core.ContainerStatus{
|
||||
User: &core.ContainerUser{
|
||||
Linux: &core.LinuxContainerUser{
|
||||
UID: 0,
|
||||
GID: 0,
|
||||
SupplementalGroups: []int64{0, 100},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo"),
|
||||
"",
|
||||
"containerUser with valid ids in initContainerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetInitContainerStatuses(core.ContainerStatus{
|
||||
User: &core.ContainerUser{
|
||||
Linux: &core.LinuxContainerUser{
|
||||
UID: -1,
|
||||
GID: -1,
|
||||
SupplementalGroups: []int64{-1},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo"),
|
||||
`status.initContainerStatuses[0].user.linux.uid: Invalid value: -1: must be between 0 and 2147483647, inclusive` +
|
||||
`, status.initContainerStatuses[0].user.linux.gid: Invalid value: -1: must be between 0 and 2147483647, inclusive` +
|
||||
`, status.initContainerStatuses[0].user.linux.supplementalGroups[0]: Invalid value: -1: must be between 0 and 2147483647, inclusive`,
|
||||
"containerUser with invalid uids/gids/supplementalGroups in initContainerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetOS(core.Windows),
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetInitContainerStatuses(core.ContainerStatus{
|
||||
User: &core.ContainerUser{
|
||||
Linux: &core.LinuxContainerUser{},
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetOS(core.Windows),
|
||||
),
|
||||
`status.initContainerStatuses[0].user.linux: Forbidden: cannot be set for a windows pod`,
|
||||
"containerUser cannot be set for windows pod in initContainerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetEphemeralContainerStatuses(core.ContainerStatus{}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo"),
|
||||
"",
|
||||
"nil containerUser in ephemeralContainerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetEphemeralContainerStatuses(core.ContainerStatus{
|
||||
User: &core.ContainerUser{},
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo"),
|
||||
"",
|
||||
"empty containerUser in ephemeralContainerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetEphemeralContainerStatuses(core.ContainerStatus{
|
||||
User: &core.ContainerUser{
|
||||
Linux: &core.LinuxContainerUser{
|
||||
UID: 0,
|
||||
GID: 0,
|
||||
SupplementalGroups: []int64{0, 100},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo"),
|
||||
"",
|
||||
"containerUser with valid ids in ephemeralContainerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetEphemeralContainerStatuses(core.ContainerStatus{
|
||||
User: &core.ContainerUser{
|
||||
Linux: &core.LinuxContainerUser{
|
||||
UID: -1,
|
||||
GID: -1,
|
||||
SupplementalGroups: []int64{-1},
|
||||
},
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo"),
|
||||
`status.ephemeralContainerStatuses[0].user.linux.uid: Invalid value: -1: must be between 0 and 2147483647, inclusive` +
|
||||
`, status.ephemeralContainerStatuses[0].user.linux.gid: Invalid value: -1: must be between 0 and 2147483647, inclusive` +
|
||||
`, status.ephemeralContainerStatuses[0].user.linux.supplementalGroups[0]: Invalid value: -1: must be between 0 and 2147483647, inclusive`,
|
||||
"containerUser with invalid uids/gids/supplementalGroups in ephemeralContainerStatuses",
|
||||
}, {
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetOS(core.Windows),
|
||||
podtest.SetStatus(
|
||||
podtest.MakePodStatus(
|
||||
podtest.SetEphemeralContainerStatuses(core.ContainerStatus{
|
||||
User: &core.ContainerUser{
|
||||
Linux: &core.LinuxContainerUser{},
|
||||
},
|
||||
}),
|
||||
),
|
||||
),
|
||||
),
|
||||
*podtest.MakePod("foo",
|
||||
podtest.SetOS(core.Windows),
|
||||
),
|
||||
`status.ephemeralContainerStatuses[0].user.linux: Forbidden: cannot be set for a windows pod`,
|
||||
"containerUser cannot be set for windows pod in ephemeralContainerStatuses",
|
||||
},
|
||||
}
|
||||
|
||||
@ -25553,221 +25826,6 @@ func TestValidatePodDNSConfigWithRelaxedSearchDomain(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: merge these test to TestValidatePodSpec after SupplementalGroupsPolicy feature graduates to Beta
|
||||
func TestValidatePodSpecWithSupplementalGroupsPolicy(t *testing.T) {
|
||||
fldPath := field.NewPath("spec")
|
||||
badSupplementalGroupsPolicyEmpty := ptr.To(core.SupplementalGroupsPolicy(""))
|
||||
badSupplementalGroupsPolicyNotSupported := ptr.To(core.SupplementalGroupsPolicy("not-supported"))
|
||||
|
||||
validatePodSpecTestCases := map[string]struct {
|
||||
securityContext *core.PodSecurityContext
|
||||
wantFieldErrors field.ErrorList
|
||||
}{
|
||||
"nil SecurityContext is valid": {
|
||||
securityContext: nil,
|
||||
},
|
||||
"nil SupplementalGroupsPolicy is valid": {
|
||||
securityContext: &core.PodSecurityContext{},
|
||||
},
|
||||
"SupplementalGroupsPolicyMerge is valid": {
|
||||
securityContext: &core.PodSecurityContext{
|
||||
SupplementalGroupsPolicy: ptr.To(core.SupplementalGroupsPolicyMerge),
|
||||
},
|
||||
},
|
||||
"SupplementalGroupsPolicyStrict is valid": {
|
||||
securityContext: &core.PodSecurityContext{
|
||||
SupplementalGroupsPolicy: ptr.To(core.SupplementalGroupsPolicyStrict),
|
||||
},
|
||||
},
|
||||
"empty SupplementalGroupsPolicy is invalid": {
|
||||
securityContext: &core.PodSecurityContext{
|
||||
SupplementalGroupsPolicy: badSupplementalGroupsPolicyEmpty,
|
||||
},
|
||||
wantFieldErrors: field.ErrorList{
|
||||
field.NotSupported(
|
||||
fldPath.Child("securityContext").Child("supplementalGroupsPolicy"),
|
||||
badSupplementalGroupsPolicyEmpty, sets.List(validSupplementalGroupsPolicies)),
|
||||
},
|
||||
},
|
||||
"not-supported SupplementalGroupsPolicy is invalid": {
|
||||
securityContext: &core.PodSecurityContext{
|
||||
SupplementalGroupsPolicy: badSupplementalGroupsPolicyNotSupported,
|
||||
},
|
||||
wantFieldErrors: field.ErrorList{
|
||||
field.NotSupported(
|
||||
fldPath.Child("securityContext").Child("supplementalGroupsPolicy"),
|
||||
badSupplementalGroupsPolicyNotSupported, sets.List(validSupplementalGroupsPolicies)),
|
||||
},
|
||||
},
|
||||
}
|
||||
for name, tt := range validatePodSpecTestCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
podSpec := podtest.MakePodSpec(podtest.SetSecurityContext(tt.securityContext), podtest.SetContainers(podtest.MakeContainer("con")))
|
||||
|
||||
if tt.wantFieldErrors == nil {
|
||||
tt.wantFieldErrors = field.ErrorList{}
|
||||
}
|
||||
errs := ValidatePodSpec(&podSpec, nil, fldPath, PodValidationOptions{})
|
||||
if diff := cmp.Diff(tt.wantFieldErrors, errs); diff != "" {
|
||||
t.Errorf("unexpected field errors (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: merge these testcases to TestValidateWindowsPodSecurityContext after SupplementalGroupsPolicy feature graduates to Beta
|
||||
func TestValidateWindowsPodSecurityContextSupplementalGroupsPolicy(t *testing.T) {
|
||||
fldPath := field.NewPath("spec")
|
||||
|
||||
testCases := map[string]struct {
|
||||
securityContext *core.PodSecurityContext
|
||||
wantFieldErrors field.ErrorList
|
||||
}{
|
||||
"nil SecurityContext is valid": {
|
||||
securityContext: nil,
|
||||
},
|
||||
"nil SupplementalGroupdPolicy is valid": {
|
||||
securityContext: &core.PodSecurityContext{},
|
||||
},
|
||||
"non-empty SupplementalGroupdPolicy is invalid": {
|
||||
securityContext: &core.PodSecurityContext{
|
||||
SupplementalGroupsPolicy: ptr.To(core.SupplementalGroupsPolicyMerge),
|
||||
},
|
||||
wantFieldErrors: field.ErrorList{
|
||||
field.Forbidden(
|
||||
fldPath.Child("securityContext").Child("supplementalGroupsPolicy"),
|
||||
"cannot be set for a windows pod"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
podSpec := podtest.MakePodSpec(podtest.SetSecurityContext(tt.securityContext), podtest.SetOS(core.Windows), podtest.SetContainers(podtest.MakeContainer("con")))
|
||||
if tt.wantFieldErrors == nil {
|
||||
tt.wantFieldErrors = field.ErrorList{}
|
||||
}
|
||||
errs := validateWindows(&podSpec, fldPath)
|
||||
if diff := cmp.Diff(tt.wantFieldErrors, errs); diff != "" {
|
||||
t.Errorf("unexpected field errors (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: merge these testcases to TestValidatePodStatusUpdate after SupplementalGroupsPolicy feature graduates to Beta
|
||||
func TestValidatePodStatusUpdateWithSupplementalGroupsPolicy(t *testing.T) {
|
||||
badUID := int64(-1)
|
||||
badGID := int64(-1)
|
||||
|
||||
containerTypes := map[string]func(podStatus *core.PodStatus, containerStatus []core.ContainerStatus){
|
||||
"container": func(podStatus *core.PodStatus, containerStatus []core.ContainerStatus) {
|
||||
podStatus.ContainerStatuses = containerStatus
|
||||
},
|
||||
"initContainer": func(podStatus *core.PodStatus, containerStatus []core.ContainerStatus) {
|
||||
podStatus.InitContainerStatuses = containerStatus
|
||||
},
|
||||
"ephemeralContainer": func(podStatus *core.PodStatus, containerStatus []core.ContainerStatus) {
|
||||
podStatus.EphemeralContainerStatuses = containerStatus
|
||||
},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
podOSes []*core.PodOS
|
||||
containerStatuses []core.ContainerStatus
|
||||
wantFieldErrors field.ErrorList
|
||||
}{
|
||||
"nil container user is valid": {
|
||||
podOSes: []*core.PodOS{nil, {Name: core.Linux}},
|
||||
containerStatuses: []core.ContainerStatus{},
|
||||
},
|
||||
"empty container user is valid": {
|
||||
podOSes: []*core.PodOS{nil, {Name: core.Linux}},
|
||||
containerStatuses: []core.ContainerStatus{{
|
||||
User: &core.ContainerUser{},
|
||||
}},
|
||||
},
|
||||
"container user with valid ids": {
|
||||
podOSes: []*core.PodOS{nil, {Name: core.Linux}},
|
||||
containerStatuses: []core.ContainerStatus{{
|
||||
User: &core.ContainerUser{
|
||||
Linux: &core.LinuxContainerUser{},
|
||||
},
|
||||
}},
|
||||
},
|
||||
"container user with invalid ids": {
|
||||
podOSes: []*core.PodOS{nil, {Name: core.Linux}},
|
||||
containerStatuses: []core.ContainerStatus{{
|
||||
User: &core.ContainerUser{
|
||||
Linux: &core.LinuxContainerUser{
|
||||
UID: badUID,
|
||||
GID: badGID,
|
||||
SupplementalGroups: []int64{badGID},
|
||||
},
|
||||
},
|
||||
}},
|
||||
wantFieldErrors: field.ErrorList{
|
||||
field.Invalid(field.NewPath("[0].linux.uid"), badUID, "must be between 0 and 2147483647, inclusive"),
|
||||
field.Invalid(field.NewPath("[0].linux.gid"), badGID, "must be between 0 and 2147483647, inclusive"),
|
||||
field.Invalid(field.NewPath("[0].linux.supplementalGroups[0]"), badGID, "must be between 0 and 2147483647, inclusive"),
|
||||
},
|
||||
},
|
||||
"user.linux must not be set in windows": {
|
||||
podOSes: []*core.PodOS{{Name: core.Windows}},
|
||||
containerStatuses: []core.ContainerStatus{{
|
||||
User: &core.ContainerUser{
|
||||
Linux: &core.LinuxContainerUser{},
|
||||
},
|
||||
}},
|
||||
wantFieldErrors: field.ErrorList{
|
||||
field.Forbidden(field.NewPath("[0].linux"), "cannot be set for a windows pod"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, tt := range testCases {
|
||||
for _, podOS := range tt.podOSes {
|
||||
for containerType, setContainerStatuses := range containerTypes {
|
||||
t.Run(fmt.Sprintf("[podOS=%v][containerType=%s] %s", podOS, containerType, name), func(t *testing.T) {
|
||||
oldPod := &core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: core.PodSpec{
|
||||
OS: podOS,
|
||||
},
|
||||
Status: core.PodStatus{},
|
||||
}
|
||||
newPod := &core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: core.PodSpec{
|
||||
OS: podOS,
|
||||
},
|
||||
}
|
||||
setContainerStatuses(&newPod.Status, tt.containerStatuses)
|
||||
var expectedFieldErrors field.ErrorList
|
||||
for _, err := range tt.wantFieldErrors {
|
||||
expectedField := fmt.Sprintf("%s%s", field.NewPath("status").Child(containerType+"Statuses"), err.Field)
|
||||
expectedFieldErrors = append(expectedFieldErrors, &field.Error{
|
||||
Type: err.Type,
|
||||
Field: expectedField,
|
||||
BadValue: err.BadValue,
|
||||
Detail: err.Detail,
|
||||
})
|
||||
}
|
||||
errs := ValidatePodStatusUpdate(newPod, oldPod, PodValidationOptions{})
|
||||
if diff := cmp.Diff(expectedFieldErrors, errs); diff != "" {
|
||||
t.Errorf("unexpected field errors (-want, +got):\n%s", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestValidateContainerStatusNoAllocatedResourcesStatus(t *testing.T) {
|
||||
containerStatuses := []core.ContainerStatus{
|
||||
{
|
||||
|
@ -1749,6 +1749,7 @@ var defaultVersionedKubernetesFeatureGates = map[featuregate.Feature]featuregate
|
||||
|
||||
SupplementalGroupsPolicy: {
|
||||
{Version: version.MustParse("1.31"), Default: false, PreRelease: featuregate.Alpha},
|
||||
{Version: version.MustParse("1.33"), Default: true, PreRelease: featuregate.Beta},
|
||||
},
|
||||
|
||||
SystemdWatchdog: {
|
||||
|
@ -245,6 +245,7 @@ var (
|
||||
lifecycle.PodOSNotSupported,
|
||||
lifecycle.InvalidNodeInfo,
|
||||
lifecycle.InitContainerRestartPolicyForbidden,
|
||||
lifecycle.SupplementalGroupsPolicyNotSupported,
|
||||
lifecycle.UnexpectedAdmissionError,
|
||||
lifecycle.UnknownReason,
|
||||
lifecycle.UnexpectedPredicateFailureType,
|
||||
|
@ -3656,6 +3656,15 @@ func TestRecordAdmissionRejection(t *testing.T) {
|
||||
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
|
||||
# TYPE kubelet_admission_rejections_total counter
|
||||
kubelet_admission_rejections_total{reason="InitContainerRestartPolicyForbidden"} 1
|
||||
`,
|
||||
},
|
||||
{
|
||||
name: "SupplementalGroupsPolicyNotSupported",
|
||||
reason: lifecycle.SupplementalGroupsPolicyNotSupported,
|
||||
wants: `
|
||||
# HELP kubelet_admission_rejections_total [ALPHA] Cumulative number pod admission rejections by the Kubelet.
|
||||
# TYPE kubelet_admission_rejections_total counter
|
||||
kubelet_admission_rejections_total{reason="SupplementalGroupsPolicyNotSupported"} 1
|
||||
`,
|
||||
},
|
||||
{
|
||||
|
@ -21,13 +21,17 @@ import (
|
||||
"runtime"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/component-helpers/scheduling/corev1"
|
||||
"k8s.io/klog/v2"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/scheduler"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -49,6 +53,11 @@ const (
|
||||
// than Always for some of its init containers.
|
||||
InitContainerRestartPolicyForbidden = "InitContainerRestartPolicyForbidden"
|
||||
|
||||
// SupplementalGroupsPolicyNotSupported is used to denote that the pod was
|
||||
// rejected admission to the node because the node does not support
|
||||
// the pod's SupplementalGroupsPolicy.
|
||||
SupplementalGroupsPolicyNotSupported = "SupplementalGroupsPolicyNotSupported"
|
||||
|
||||
// UnexpectedAdmissionError is used to denote that the pod was rejected
|
||||
// admission to the node because of an error during admission that could not
|
||||
// be categorized.
|
||||
@ -132,6 +141,16 @@ func (w *predicateAdmitHandler) Admit(attrs *PodAdmitAttributes) PodAdmitResult
|
||||
}
|
||||
}
|
||||
|
||||
if rejectPodAdmissionBasedOnSupplementalGroupsPolicy(admitPod, node) {
|
||||
message := fmt.Sprintf("SupplementalGroupsPolicy=%s is not supported in this node", v1.SupplementalGroupsPolicyStrict)
|
||||
klog.InfoS("Failed to admit pod", "pod", klog.KObj(admitPod), "message", message)
|
||||
return PodAdmitResult{
|
||||
Admit: false,
|
||||
Reason: SupplementalGroupsPolicyNotSupported,
|
||||
Message: message,
|
||||
}
|
||||
}
|
||||
|
||||
pods := attrs.OtherPods
|
||||
nodeInfo := schedulerframework.NewNodeInfo(pods...)
|
||||
nodeInfo.SetNode(node)
|
||||
@ -254,6 +273,45 @@ func rejectPodAdmissionBasedOnOSField(pod *v1.Pod) bool {
|
||||
return string(pod.Spec.OS.Name) != runtime.GOOS
|
||||
}
|
||||
|
||||
// rejectPodAdmissionBasedOnSupplementalGroupsPolicy rejects pod only if
|
||||
// - the feature is beta or above, and SupplementalPolicy=Strict is set in the pod
|
||||
// - but, the node does not support the feature
|
||||
//
|
||||
// Note: During the feature is alpha or before(not yet released) in emulated version,
|
||||
// it should admit for backward compatibility
|
||||
func rejectPodAdmissionBasedOnSupplementalGroupsPolicy(pod *v1.Pod, node *v1.Node) bool {
|
||||
admit, reject := false, true // just for readability
|
||||
|
||||
inUse := (pod.Spec.SecurityContext != nil && pod.Spec.SecurityContext.SupplementalGroupsPolicy != nil)
|
||||
if !inUse {
|
||||
return admit
|
||||
}
|
||||
|
||||
isBetaOrAbove := false
|
||||
if featureSpec, ok := utilfeature.DefaultMutableFeatureGate.GetAll()[features.SupplementalGroupsPolicy]; ok {
|
||||
isBetaOrAbove = (featureSpec.PreRelease == featuregate.Beta) || (featureSpec.PreRelease == featuregate.GA)
|
||||
}
|
||||
|
||||
if !isBetaOrAbove {
|
||||
return admit
|
||||
}
|
||||
|
||||
featureSupportedOnNode := ptr.Deref(
|
||||
ptr.Deref(node.Status.Features, v1.NodeFeatures{SupplementalGroupsPolicy: ptr.To(false)}).SupplementalGroupsPolicy,
|
||||
false,
|
||||
)
|
||||
effectivePolicy := ptr.Deref(
|
||||
pod.Spec.SecurityContext.SupplementalGroupsPolicy,
|
||||
v1.SupplementalGroupsPolicyMerge,
|
||||
)
|
||||
|
||||
if effectivePolicy == v1.SupplementalGroupsPolicyStrict && !featureSupportedOnNode {
|
||||
return reject
|
||||
}
|
||||
|
||||
return admit
|
||||
}
|
||||
|
||||
func removeMissingExtendedResources(pod *v1.Pod, nodeInfo *schedulerframework.NodeInfo) *v1.Pod {
|
||||
filterExtendedResources := func(containers []v1.Container) {
|
||||
for i, c := range containers {
|
||||
|
@ -24,12 +24,16 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilversion "k8s.io/apimachinery/pkg/util/version"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -431,3 +435,115 @@ func TestRejectPodAdmissionBasedOnOSField(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodAdmissionBasedOnSupplementalGroupsPolicy(t *testing.T) {
|
||||
nodeWithFeature := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test"},
|
||||
Status: v1.NodeStatus{
|
||||
Features: &v1.NodeFeatures{
|
||||
SupplementalGroupsPolicy: ptr.To(true),
|
||||
},
|
||||
},
|
||||
}
|
||||
nodeWithoutFeature := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test"},
|
||||
}
|
||||
podNotUsingFeature := &v1.Pod{}
|
||||
podUsingFeature := &v1.Pod{Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
SupplementalGroupsPolicy: ptr.To(v1.SupplementalGroupsPolicyStrict),
|
||||
},
|
||||
}}
|
||||
tests := []struct {
|
||||
name string
|
||||
emulationVersion *utilversion.Version
|
||||
node *v1.Node
|
||||
pod *v1.Pod
|
||||
expectRejection bool
|
||||
}{
|
||||
// The feature is Beta in v1.33
|
||||
{
|
||||
name: "feature=Beta, node=feature not supported, pod=in use: it should REJECT",
|
||||
emulationVersion: utilversion.MustParse("1.33"),
|
||||
node: nodeWithoutFeature,
|
||||
pod: podUsingFeature,
|
||||
expectRejection: true,
|
||||
},
|
||||
{
|
||||
name: "feature=Beta, node=feature supported, pod=in use: it should ADMIT",
|
||||
emulationVersion: utilversion.MustParse("1.33"),
|
||||
node: nodeWithFeature,
|
||||
pod: podUsingFeature,
|
||||
expectRejection: false,
|
||||
},
|
||||
{
|
||||
name: "feature=Beta, node=feature not supported, pod=not in use: it should ADMIT",
|
||||
emulationVersion: utilversion.MustParse("1.33"),
|
||||
node: nodeWithoutFeature,
|
||||
pod: podNotUsingFeature,
|
||||
expectRejection: false,
|
||||
},
|
||||
{
|
||||
name: "feature=Beta, node=feature supported, pod=not in use: it should ADMIT",
|
||||
emulationVersion: utilversion.MustParse("1.33"),
|
||||
node: nodeWithFeature,
|
||||
pod: podNotUsingFeature,
|
||||
expectRejection: false,
|
||||
},
|
||||
// The feature is Alpha(v1.31, v1.32) in emulated version
|
||||
// Note: When the feature is alpha in emulated version, it should always admit for backward compatibility
|
||||
{
|
||||
name: "feature=Alpha, node=feature not supported, pod=feature used: it should ADMIT",
|
||||
emulationVersion: utilversion.MustParse("1.32"),
|
||||
node: nodeWithoutFeature,
|
||||
pod: podUsingFeature,
|
||||
expectRejection: false,
|
||||
},
|
||||
{
|
||||
name: "feature=Alpha, node=feature not supported, pod=feature not used: it should ADMIT",
|
||||
emulationVersion: utilversion.MustParse("1.32"),
|
||||
node: nodeWithoutFeature,
|
||||
pod: podNotUsingFeature,
|
||||
expectRejection: false,
|
||||
},
|
||||
{
|
||||
name: "feature=Alpha, node=feature supported, pod=feature used: it should ADMIT",
|
||||
emulationVersion: utilversion.MustParse("1.32"),
|
||||
node: nodeWithFeature,
|
||||
pod: podUsingFeature,
|
||||
expectRejection: false,
|
||||
},
|
||||
{
|
||||
name: "feature=Alpha, node=feature supported, pod=feature not used: it should ADMIT",
|
||||
emulationVersion: utilversion.MustParse("1.32"),
|
||||
node: nodeWithFeature,
|
||||
pod: podNotUsingFeature,
|
||||
expectRejection: false,
|
||||
},
|
||||
// The feature is not yet released (< v1.31) in emulated version (this can happen when only kubelet downgraded).
|
||||
// Note: When the feature is not yet released in emulated version, it should always admit for backward compatibility
|
||||
{
|
||||
name: "feature=NotReleased, node=feature not supported, pod=feature used: it should ADMIT",
|
||||
emulationVersion: utilversion.MustParse("1.30"),
|
||||
node: nodeWithoutFeature,
|
||||
pod: podUsingFeature,
|
||||
expectRejection: false,
|
||||
},
|
||||
{
|
||||
name: "feature=NotReleased, node=feature not supported, pod=feature not used: it should ADMIT",
|
||||
emulationVersion: utilversion.MustParse("1.30"),
|
||||
node: nodeWithoutFeature,
|
||||
pod: podNotUsingFeature,
|
||||
expectRejection: false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
featuregatetesting.SetFeatureGateEmulationVersionDuringTest(t, utilfeature.DefaultFeatureGate, test.emulationVersion)
|
||||
actualResult := rejectPodAdmissionBasedOnSupplementalGroupsPolicy(test.pod, test.node)
|
||||
if test.expectRejection != actualResult {
|
||||
t.Errorf("unexpected result, expected %v but got %v", test.expectRejection, actualResult)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -1529,6 +1529,10 @@
|
||||
lockToDefault: false
|
||||
preRelease: Alpha
|
||||
version: "1.31"
|
||||
- default: true
|
||||
lockToDefault: false
|
||||
preRelease: Beta
|
||||
version: "1.33"
|
||||
- name: SystemdWatchdog
|
||||
versionedSpecs:
|
||||
- default: true
|
||||
|
@ -20,14 +20,19 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/test/e2e/feature"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
@ -40,6 +45,7 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/gcustom"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -707,6 +713,249 @@ var _ = SIGDescribe("Security Context", func() {
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
f.Context("SupplementalGroupsPolicy [LinuxOnly]", feature.SupplementalGroupsPolicy, framework.WithFeatureGate(features.SupplementalGroupsPolicy), func() {
|
||||
timeout := 1 * time.Minute
|
||||
|
||||
agnhostImage := imageutils.GetE2EImage(imageutils.Agnhost)
|
||||
uidInImage := int64(1000)
|
||||
gidDefinedInImage := int64(50000)
|
||||
supplementalGroup := int64(60000)
|
||||
|
||||
mkPod := func(policy *v1.SupplementalGroupsPolicy) *v1.Pod {
|
||||
// In specified image(agnhost E2E image),
|
||||
// - user-defined-in-image(uid=1000) is defined
|
||||
// - user-defined-in-image belongs to group-defined-in-image(gid=50000)
|
||||
// thus, resultant supplementary group of the container processes should be
|
||||
// - 1000 : self
|
||||
// - 50000: pre-defined groups defined in the container image(/etc/group) of self(uid=1000)
|
||||
// - 60000: specified in SupplementalGroups
|
||||
// $ id -G
|
||||
// 1000 50000 60000 (if SupplementalGroupsPolicy=Merge or not set)
|
||||
// 1000 60000 (if SupplementalGroupsPolicy=Strict)
|
||||
podName := "sppl-grp-plcy-" + string(uuid.NewUUID())
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: map[string]string{"name": podName},
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
SecurityContext: &v1.PodSecurityContext{
|
||||
RunAsUser: &uidInImage,
|
||||
SupplementalGroups: []int64{supplementalGroup},
|
||||
SupplementalGroupsPolicy: policy,
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "test-container",
|
||||
Image: agnhostImage,
|
||||
Command: []string{"sh", "-c", "id -G; while :; do sleep 1; done"},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
nodeSupportsSupplementalGroupsPolicy := func(ctx context.Context, f *framework.Framework, nodeName string) bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(node).NotTo(gomega.BeNil())
|
||||
if node.Status.Features != nil {
|
||||
supportsSupplementalGroupsPolicy := node.Status.Features.SupplementalGroupsPolicy
|
||||
if supportsSupplementalGroupsPolicy != nil && *supportsSupplementalGroupsPolicy {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
waitForContainerUser := func(ctx context.Context, f *framework.Framework, podName string, containerName string, expectedContainerUser *v1.ContainerUser) error {
|
||||
return framework.Gomega().Eventually(ctx,
|
||||
framework.RetryNotFound(framework.GetObject(f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get, podName, metav1.GetOptions{}))).
|
||||
WithTimeout(timeout).
|
||||
Should(gcustom.MakeMatcher(func(p *v1.Pod) (bool, error) {
|
||||
for _, s := range p.Status.ContainerStatuses {
|
||||
if s.Name == containerName {
|
||||
return reflect.DeepEqual(s.User, expectedContainerUser), nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}))
|
||||
}
|
||||
waitForPodLogs := func(ctx context.Context, f *framework.Framework, podName string, containerName string, expectedLog string) error {
|
||||
return framework.Gomega().Eventually(ctx,
|
||||
framework.RetryNotFound(framework.GetObject(f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get, podName, metav1.GetOptions{}))).
|
||||
WithTimeout(timeout).
|
||||
Should(gcustom.MakeMatcher(func(p *v1.Pod) (bool, error) {
|
||||
podLogs, err := e2epod.GetPodLogs(ctx, f.ClientSet, p.Namespace, p.Name, containerName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return podLogs == expectedLog, nil
|
||||
}))
|
||||
}
|
||||
expectMergePolicyInEffect := func(ctx context.Context, f *framework.Framework, podName string, containerName string, featureSupportedOnNode bool) {
|
||||
expectedOutput := fmt.Sprintf("%d %d %d", uidInImage, gidDefinedInImage, supplementalGroup)
|
||||
expectedContainerUser := &v1.ContainerUser{
|
||||
Linux: &v1.LinuxContainerUser{
|
||||
UID: uidInImage,
|
||||
GID: uidInImage,
|
||||
SupplementalGroups: []int64{uidInImage, gidDefinedInImage, supplementalGroup},
|
||||
},
|
||||
}
|
||||
|
||||
if featureSupportedOnNode {
|
||||
framework.ExpectNoError(waitForContainerUser(ctx, f, podName, containerName, expectedContainerUser))
|
||||
}
|
||||
framework.ExpectNoError(waitForPodLogs(ctx, f, podName, containerName, expectedOutput+"\n"))
|
||||
|
||||
stdout := e2epod.ExecCommandInContainer(f, podName, containerName, "id", "-G")
|
||||
gomega.Expect(stdout).To(gomega.Equal(expectedOutput))
|
||||
}
|
||||
expectStrictPolicyInEffect := func(ctx context.Context, f *framework.Framework, podName string, containerName string, featureSupportedOnNode bool) {
|
||||
expectedOutput := fmt.Sprintf("%d %d", uidInImage, supplementalGroup)
|
||||
expectedContainerUser := &v1.ContainerUser{
|
||||
Linux: &v1.LinuxContainerUser{
|
||||
UID: uidInImage,
|
||||
GID: uidInImage,
|
||||
SupplementalGroups: []int64{uidInImage, supplementalGroup},
|
||||
},
|
||||
}
|
||||
|
||||
if featureSupportedOnNode {
|
||||
framework.ExpectNoError(waitForContainerUser(ctx, f, podName, containerName, expectedContainerUser))
|
||||
}
|
||||
framework.ExpectNoError(waitForPodLogs(ctx, f, podName, containerName, expectedOutput+"\n"))
|
||||
|
||||
stdout := e2epod.ExecCommandInContainer(f, podName, containerName, "id", "-G")
|
||||
gomega.Expect(stdout).To(gomega.Equal(expectedOutput))
|
||||
}
|
||||
expectRejectionEventIssued := func(ctx context.Context, f *framework.Framework, pod *v1.Pod) {
|
||||
framework.ExpectNoError(
|
||||
framework.Gomega().Eventually(ctx,
|
||||
framework.HandleRetry(framework.ListObjects(
|
||||
f.ClientSet.CoreV1().Events(pod.Namespace).List,
|
||||
metav1.ListOptions{
|
||||
FieldSelector: fields.Set{
|
||||
"type": core.EventTypeWarning,
|
||||
"reason": lifecycle.SupplementalGroupsPolicyNotSupported,
|
||||
"involvedObject.kind": "Pod",
|
||||
"involvedObject.apiVersion": v1.SchemeGroupVersion.String(),
|
||||
"involvedObject.name": pod.Name,
|
||||
"involvedObject.uid": string(pod.UID),
|
||||
}.AsSelector().String(),
|
||||
},
|
||||
))).
|
||||
WithTimeout(timeout).
|
||||
Should(gcustom.MakeMatcher(func(eventList *v1.EventList) (bool, error) {
|
||||
return len(eventList.Items) == 1, nil
|
||||
})),
|
||||
)
|
||||
}
|
||||
|
||||
ginkgo.When("SupplementalGroupsPolicy nil in SecurityContext", func() {
|
||||
ginkgo.When("if the container's primary UID belongs to some groups in the image", func() {
|
||||
var pod *v1.Pod
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
ginkgo.By("creating a pod", func() {
|
||||
pod = e2epod.NewPodClient(f).CreateSync(ctx, mkPod(ptr.To(v1.SupplementalGroupsPolicyMerge)))
|
||||
})
|
||||
})
|
||||
ginkgo.When("scheduled node does not support SupplementalGroupsPolicy", func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
// ensure the scheduled node does not support SupplementalGroupsPolicy
|
||||
if nodeSupportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) {
|
||||
e2eskipper.Skipf("scheduled node does support SupplementalGroupsPolicy")
|
||||
}
|
||||
})
|
||||
ginkgo.It("it should add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) {
|
||||
expectMergePolicyInEffect(ctx, f, pod.Name, pod.Spec.Containers[0].Name, false)
|
||||
})
|
||||
})
|
||||
ginkgo.When("scheduled node supports SupplementalGroupsPolicy", func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
// ensure the scheduled node does support SupplementalGroupsPolicy
|
||||
if !nodeSupportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) {
|
||||
e2eskipper.Skipf("scheduled node does not support SupplementalGroupsPolicy")
|
||||
}
|
||||
})
|
||||
ginkgo.It("it should add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) {
|
||||
expectMergePolicyInEffect(ctx, f, pod.Name, pod.Spec.Containers[0].Name, true)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
ginkgo.When("SupplementalGroupsPolicy was set to Merge in PodSpec", func() {
|
||||
ginkgo.When("the container's primary UID belongs to some groups in the image", func() {
|
||||
var pod *v1.Pod
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
ginkgo.By("creating a pod", func() {
|
||||
pod = e2epod.NewPodClient(f).CreateSync(ctx, mkPod(nil))
|
||||
})
|
||||
})
|
||||
ginkgo.When("scheduled node does not support SupplementalGroupsPolicy", func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
// ensure the scheduled node does not support SupplementalGroupsPolicy
|
||||
if nodeSupportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) {
|
||||
e2eskipper.Skipf("scheduled node does support SupplementalGroupsPolicy")
|
||||
}
|
||||
})
|
||||
ginkgo.It("it should add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) {
|
||||
expectMergePolicyInEffect(ctx, f, pod.Name, pod.Spec.Containers[0].Name, false)
|
||||
})
|
||||
})
|
||||
ginkgo.When("scheduled node supports SupplementalGroupsPolicy", func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
// ensure the scheduled node does support SupplementalGroupsPolicy
|
||||
if !nodeSupportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) {
|
||||
e2eskipper.Skipf("scheduled node does not support SupplementalGroupsPolicy")
|
||||
}
|
||||
})
|
||||
ginkgo.It("it should add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) {
|
||||
expectMergePolicyInEffect(ctx, f, pod.Name, pod.Spec.Containers[0].Name, true)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
ginkgo.When("SupplementalGroupsPolicy was set to Strict in PodSpec", func() {
|
||||
ginkgo.When("the container's primary UID belongs to some groups in the image", func() {
|
||||
var pod *v1.Pod
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
ginkgo.By("creating a pod", func() {
|
||||
pod = e2epod.NewPodClient(f).Create(ctx, mkPod(ptr.To(v1.SupplementalGroupsPolicyStrict)))
|
||||
framework.ExpectNoError(e2epod.WaitForPodScheduled(ctx, f.ClientSet, pod.Namespace, pod.Name))
|
||||
var err error
|
||||
pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
})
|
||||
ginkgo.When("scheduled node does not support SupplementalGroupsPolicy", func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
// ensure the scheduled node does not support SupplementalGroupsPolicy
|
||||
if nodeSupportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) {
|
||||
e2eskipper.Skipf("scheduled node does support SupplementalGroupsPolicy")
|
||||
}
|
||||
})
|
||||
ginkgo.It("it should reject the pod [LinuxOnly]", func(ctx context.Context) {
|
||||
expectRejectionEventIssued(ctx, f, pod)
|
||||
})
|
||||
})
|
||||
ginkgo.When("scheduled node supports SupplementalGroupsPolicy", func() {
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
// ensure the scheduled node does support SupplementalGroupsPolicy
|
||||
if !nodeSupportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) {
|
||||
e2eskipper.Skipf("scheduled node does not support SupplementalGroupsPolicy")
|
||||
}
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod))
|
||||
})
|
||||
ginkgo.It("it should NOT add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) {
|
||||
expectStrictPolicyInEffect(ctx, f, pod.Name, pod.Spec.Containers[0].Name, true)
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = SIGDescribe("User Namespaces for Pod Security Standards [LinuxOnly]", func() {
|
||||
|
@ -25,25 +25,19 @@ package node
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/test/e2e/feature"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
ptr "k8s.io/utils/ptr"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
"github.com/onsi/gomega"
|
||||
"github.com/onsi/gomega/gcustom"
|
||||
)
|
||||
|
||||
// SeccompProcStatusField is the field of /proc/$PID/status referencing the seccomp filter type.
|
||||
@ -120,167 +114,6 @@ var _ = SIGDescribe("Security Context", func() {
|
||||
})
|
||||
})
|
||||
|
||||
SIGDescribe("SupplementalGroupsPolicy", feature.SupplementalGroupsPolicy, func() {
|
||||
timeout := 3 * time.Minute
|
||||
|
||||
agnhostImage := imageutils.GetE2EImage(imageutils.Agnhost)
|
||||
uidInImage := int64(1000)
|
||||
gidDefinedInImage := int64(50000)
|
||||
supplementalGroup := int64(60000)
|
||||
|
||||
supportsSupplementalGroupsPolicy := func(ctx context.Context, f *framework.Framework, nodeName string) bool {
|
||||
node, err := f.ClientSet.CoreV1().Nodes().Get(ctx, nodeName, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
gomega.Expect(node).NotTo(gomega.BeNil())
|
||||
if node.Status.Features != nil {
|
||||
supportsSupplementalGroupsPolicy := node.Status.Features.SupplementalGroupsPolicy
|
||||
if supportsSupplementalGroupsPolicy != nil && *supportsSupplementalGroupsPolicy {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
mkPod := func(policy *v1.SupplementalGroupsPolicy) *v1.Pod {
|
||||
pod := scTestPod(false, false)
|
||||
|
||||
// In specified image(agnhost E2E image),
|
||||
// - user-defined-in-image(uid=1000) is defined
|
||||
// - user-defined-in-image belongs to group-defined-in-image(gid=50000)
|
||||
// thus, resultant supplementary group of the container processes should be
|
||||
// - 1000 : self
|
||||
// - 50000: pre-defined groups defined in the container image(/etc/group) of self(uid=1000)
|
||||
// - 60000: specified in SupplementalGroups
|
||||
// $ id -G
|
||||
// 1000 50000 60000 (if SupplementalGroupsPolicy=Merge or not set)
|
||||
// 1000 60000 (if SupplementalGroupsPolicy=Strict)
|
||||
pod.Spec.SecurityContext.RunAsUser = &uidInImage
|
||||
pod.Spec.SecurityContext.SupplementalGroupsPolicy = policy
|
||||
pod.Spec.SecurityContext.SupplementalGroups = []int64{supplementalGroup}
|
||||
pod.Spec.Containers[0].Image = agnhostImage
|
||||
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -G; while :; do sleep 1; done"}
|
||||
|
||||
return pod
|
||||
}
|
||||
waitForContainerUser := func(ctx context.Context, f *framework.Framework, podName string, containerName string, expectedContainerUser *v1.ContainerUser) error {
|
||||
return framework.Gomega().Eventually(ctx,
|
||||
framework.RetryNotFound(framework.GetObject(f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get, podName, metav1.GetOptions{}))).
|
||||
WithTimeout(timeout).
|
||||
Should(gcustom.MakeMatcher(func(p *v1.Pod) (bool, error) {
|
||||
for _, s := range p.Status.ContainerStatuses {
|
||||
if s.Name == containerName {
|
||||
return reflect.DeepEqual(s.User, expectedContainerUser), nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}))
|
||||
}
|
||||
waitForPodLogs := func(ctx context.Context, f *framework.Framework, podName string, containerName string, expectedLog string) error {
|
||||
return framework.Gomega().Eventually(ctx,
|
||||
framework.RetryNotFound(framework.GetObject(f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get, podName, metav1.GetOptions{}))).
|
||||
WithTimeout(timeout).
|
||||
Should(gcustom.MakeMatcher(func(p *v1.Pod) (bool, error) {
|
||||
podLogs, err := e2epod.GetPodLogs(ctx, f.ClientSet, p.Namespace, p.Name, containerName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return podLogs == expectedLog, nil
|
||||
}))
|
||||
}
|
||||
|
||||
ginkgo.When("SupplementalGroupsPolicy was not set", func() {
|
||||
ginkgo.It("if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) {
|
||||
var pod *v1.Pod
|
||||
ginkgo.By("creating a pod", func() {
|
||||
pod = e2epod.NewPodClient(f).Create(ctx, mkPod(nil))
|
||||
framework.ExpectNoError(e2epod.WaitForPodScheduled(ctx, f.ClientSet, pod.Namespace, pod.Name))
|
||||
var err error
|
||||
pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if !supportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) {
|
||||
e2eskipper.Skipf("node does not support SupplementalGroupsPolicy")
|
||||
}
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod))
|
||||
})
|
||||
expectedOutput := fmt.Sprintf("%d %d %d", uidInImage, gidDefinedInImage, supplementalGroup)
|
||||
expectedContainerUser := &v1.ContainerUser{
|
||||
Linux: &v1.LinuxContainerUser{
|
||||
UID: uidInImage,
|
||||
GID: uidInImage,
|
||||
SupplementalGroups: []int64{uidInImage, gidDefinedInImage, supplementalGroup},
|
||||
},
|
||||
}
|
||||
|
||||
framework.ExpectNoError(waitForContainerUser(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedContainerUser))
|
||||
framework.ExpectNoError(waitForPodLogs(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedOutput+"\n"))
|
||||
|
||||
stdout := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, "id", "-G")
|
||||
gomega.Expect(stdout).To(gomega.Equal(expectedOutput))
|
||||
})
|
||||
})
|
||||
ginkgo.When("SupplementalGroupsPolicy was set to Merge", func() {
|
||||
ginkgo.It("if the container's primary UID belongs to some groups in the image, it should add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) {
|
||||
var pod *v1.Pod
|
||||
ginkgo.By("creating a pod", func() {
|
||||
pod = e2epod.NewPodClient(f).Create(ctx, mkPod(ptr.To(v1.SupplementalGroupsPolicyMerge)))
|
||||
framework.ExpectNoError(e2epod.WaitForPodScheduled(ctx, f.ClientSet, pod.Namespace, pod.Name))
|
||||
var err error
|
||||
pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if !supportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) {
|
||||
e2eskipper.Skipf("node does not support SupplementalGroupsPolicy")
|
||||
}
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod))
|
||||
})
|
||||
|
||||
expectedOutput := fmt.Sprintf("%d %d %d", uidInImage, gidDefinedInImage, supplementalGroup)
|
||||
expectedContainerUser := &v1.ContainerUser{
|
||||
Linux: &v1.LinuxContainerUser{
|
||||
UID: uidInImage,
|
||||
GID: uidInImage,
|
||||
SupplementalGroups: []int64{uidInImage, gidDefinedInImage, supplementalGroup},
|
||||
},
|
||||
}
|
||||
|
||||
framework.ExpectNoError(waitForContainerUser(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedContainerUser))
|
||||
framework.ExpectNoError(waitForPodLogs(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedOutput+"\n"))
|
||||
|
||||
stdout := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, "id", "-G")
|
||||
gomega.Expect(stdout).To(gomega.Equal(expectedOutput))
|
||||
})
|
||||
})
|
||||
ginkgo.When("SupplementalGroupsPolicy was set to Strict", func() {
|
||||
ginkgo.It("even if the container's primary UID belongs to some groups in the image, it should not add SupplementalGroups to them [LinuxOnly]", func(ctx context.Context) {
|
||||
var pod *v1.Pod
|
||||
ginkgo.By("creating a pod", func() {
|
||||
pod = e2epod.NewPodClient(f).Create(ctx, mkPod(ptr.To(v1.SupplementalGroupsPolicyStrict)))
|
||||
framework.ExpectNoError(e2epod.WaitForPodScheduled(ctx, f.ClientSet, pod.Namespace, pod.Name))
|
||||
var err error
|
||||
pod, err = e2epod.NewPodClient(f).Get(ctx, pod.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
if !supportsSupplementalGroupsPolicy(ctx, f, pod.Spec.NodeName) {
|
||||
e2eskipper.Skipf("node does not support SupplementalGroupsPolicy")
|
||||
}
|
||||
framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod))
|
||||
})
|
||||
|
||||
expectedOutput := fmt.Sprintf("%d %d", uidInImage, supplementalGroup)
|
||||
expectedContainerUser := &v1.ContainerUser{
|
||||
Linux: &v1.LinuxContainerUser{
|
||||
UID: uidInImage,
|
||||
GID: uidInImage,
|
||||
SupplementalGroups: []int64{uidInImage, supplementalGroup},
|
||||
},
|
||||
}
|
||||
|
||||
framework.ExpectNoError(waitForContainerUser(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedContainerUser))
|
||||
framework.ExpectNoError(waitForPodLogs(ctx, f, pod.Name, pod.Spec.Containers[0].Name, expectedOutput+"\n"))
|
||||
|
||||
stdout := e2epod.ExecCommandInContainer(f, pod.Name, pod.Spec.Containers[0].Name, "id", "-G")
|
||||
gomega.Expect(stdout).To(gomega.Equal(expectedOutput))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func(ctx context.Context) {
|
||||
pod := scTestPod(false, false)
|
||||
userID := int64(1001)
|
||||
|
Loading…
Reference in New Issue
Block a user