mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Implement AppArmor Kubelet support
This commit is contained in:
parent
fa6bd4b832
commit
3c7896719b
@ -57,6 +57,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/format"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
kubetypes "k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/flowcontrol"
|
||||
@ -108,8 +109,8 @@ var (
|
||||
// TODO: make this a TTL based pull (if image older than X policy, pull)
|
||||
podInfraContainerImagePullPolicy = api.PullIfNotPresent
|
||||
|
||||
// Default security option, only seccomp for now
|
||||
defaultSeccompProfile = "unconfined"
|
||||
// Default set of seccomp security options.
|
||||
defaultSeccompOpt = []dockerOpt{{"seccomp", "unconfined"}}
|
||||
)
|
||||
|
||||
type DockerManager struct {
|
||||
@ -1040,22 +1041,52 @@ func (dm *DockerManager) getSecurityOpts(pod *api.Pod, ctrName string) ([]string
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// seccomp is only on docker versions >= v1.10
|
||||
result, err := version.Compare(dockerV110APIVersion)
|
||||
if err != nil {
|
||||
var securityOpts []dockerOpt
|
||||
if seccompOpts, err := dm.getSeccompOpts(pod, ctrName, version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var optFmt string
|
||||
switch {
|
||||
case result < 0:
|
||||
return nil, nil // return early for Docker < 1.10
|
||||
case result == 0:
|
||||
optFmt = "%s:%s" // use colon notation for Docker 1.10
|
||||
case result > 0:
|
||||
optFmt = "%s=%s" // use = notation for Docker >= 1.11
|
||||
} else {
|
||||
securityOpts = append(securityOpts, seccompOpts...)
|
||||
}
|
||||
|
||||
defaultSecurityOpts := []string{fmt.Sprintf(optFmt, "seccomp", defaultSeccompProfile)}
|
||||
if appArmorOpts, err := dm.getAppArmorOpts(pod, ctrName); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
securityOpts = append(securityOpts, appArmorOpts...)
|
||||
}
|
||||
|
||||
const (
|
||||
// Docker changed the API for specifying options in v1.11
|
||||
optSeparatorChangeVersion = "1.23" // Corresponds to docker 1.11.x
|
||||
optSeparatorOld = ':'
|
||||
optSeparatorNew = '='
|
||||
)
|
||||
|
||||
sep := optSeparatorNew
|
||||
if result, err := version.Compare(optSeparatorChangeVersion); err != nil {
|
||||
return nil, fmt.Errorf("error parsing docker API version: %v", err)
|
||||
} else if result < 0 {
|
||||
sep = optSeparatorOld
|
||||
}
|
||||
|
||||
opts := make([]string, len(securityOpts))
|
||||
for i, opt := range securityOpts {
|
||||
opts[i] = fmt.Sprintf("%s%c%s", opt.key, sep, opt.value)
|
||||
}
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
type dockerOpt struct {
|
||||
key, value string
|
||||
}
|
||||
|
||||
// Get the docker security options for seccomp.
|
||||
func (dm *DockerManager) getSeccompOpts(pod *api.Pod, ctrName string, version kubecontainer.Version) ([]dockerOpt, error) {
|
||||
// seccomp is only on docker versions >= v1.10
|
||||
if result, err := version.Compare(dockerV110APIVersion); err != nil {
|
||||
return nil, err
|
||||
} else if result < 0 {
|
||||
return nil, nil // return early for Docker < 1.10
|
||||
}
|
||||
|
||||
profile, profileOK := pod.ObjectMeta.Annotations[api.SeccompContainerAnnotationKeyPrefix+ctrName]
|
||||
if !profileOK {
|
||||
@ -1063,13 +1094,13 @@ func (dm *DockerManager) getSecurityOpts(pod *api.Pod, ctrName string) ([]string
|
||||
profile, profileOK = pod.ObjectMeta.Annotations[api.SeccompPodAnnotationKey]
|
||||
if !profileOK {
|
||||
// return early the default
|
||||
return defaultSecurityOpts, nil
|
||||
return defaultSeccompOpt, nil
|
||||
}
|
||||
}
|
||||
|
||||
if profile == "unconfined" {
|
||||
// return early the default
|
||||
return defaultSecurityOpts, nil
|
||||
return defaultSeccompOpt, nil
|
||||
}
|
||||
|
||||
if profile == "docker/default" {
|
||||
@ -1093,7 +1124,20 @@ func (dm *DockerManager) getSecurityOpts(pod *api.Pod, ctrName string) ([]string
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []string{fmt.Sprintf(optFmt, "seccomp", b.Bytes())}, nil
|
||||
return []dockerOpt{{"seccomp", b.String()}}, nil
|
||||
}
|
||||
|
||||
// Get the docker security options for AppArmor.
|
||||
func (dm *DockerManager) getAppArmorOpts(pod *api.Pod, ctrName string) ([]dockerOpt, error) {
|
||||
profile := apparmor.GetProfileName(pod, ctrName)
|
||||
if profile == "" || profile == apparmor.ProfileRuntimeDefault {
|
||||
// The docker applies the default profile by default.
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// Assume validation has already happened.
|
||||
profileName := strings.TrimPrefix(profile, apparmor.ProfileNamePrefix)
|
||||
return []dockerOpt{{"apparmor", profileName}}, nil
|
||||
}
|
||||
|
||||
type dockerExitError struct {
|
||||
|
@ -51,6 +51,7 @@ import (
|
||||
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
kubetypes "k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
uexec "k8s.io/kubernetes/pkg/util/exec"
|
||||
@ -1781,6 +1782,70 @@ func TestSecurityOptsOperator(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSecurityOpts(t *testing.T) {
|
||||
const containerName = "bar"
|
||||
makePod := func(annotations map[string]string) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
UID: "12345678",
|
||||
Name: "foo",
|
||||
Namespace: "new",
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: containerName},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
msg string
|
||||
pod *api.Pod
|
||||
expectedOpts []string
|
||||
}{{
|
||||
msg: "No security annotations",
|
||||
pod: makePod(nil),
|
||||
expectedOpts: []string{"seccomp=unconfined"},
|
||||
}, {
|
||||
msg: "Seccomp default",
|
||||
pod: makePod(map[string]string{
|
||||
api.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
|
||||
}),
|
||||
expectedOpts: nil,
|
||||
}, {
|
||||
msg: "AppArmor runtime/default",
|
||||
pod: makePod(map[string]string{
|
||||
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileRuntimeDefault,
|
||||
}),
|
||||
expectedOpts: []string{"seccomp=unconfined"},
|
||||
}, {
|
||||
msg: "AppArmor local profile",
|
||||
pod: makePod(map[string]string{
|
||||
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo",
|
||||
}),
|
||||
expectedOpts: []string{"seccomp=unconfined", "apparmor=foo"},
|
||||
}, {
|
||||
msg: "AppArmor and seccomp profile",
|
||||
pod: makePod(map[string]string{
|
||||
api.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default",
|
||||
apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo",
|
||||
}),
|
||||
expectedOpts: []string{"apparmor=foo"},
|
||||
}}
|
||||
|
||||
dm, _ := newTestDockerManagerWithVersion("1.11.1", "1.23")
|
||||
for i, test := range tests {
|
||||
opts, err := dm.getSecurityOpts(test.pod, containerName)
|
||||
assert.NoError(t, err, "TestCase[%d]: %s", i, test.msg)
|
||||
assert.Len(t, opts, len(test.expectedOpts), "TestCase[%d]: %s", i, test.msg)
|
||||
for _, opt := range test.expectedOpts {
|
||||
assert.Contains(t, opts, opt, "TestCase[%d]: %s", i, test.msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSeccompIsUnconfinedByDefaultWithDockerV110(t *testing.T) {
|
||||
dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22")
|
||||
pod := &api.Pod{
|
||||
|
@ -71,6 +71,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/queue"
|
||||
"k8s.io/kubernetes/pkg/kubelet/volumemanager"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/securitycontext"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/bandwidth"
|
||||
@ -552,6 +553,8 @@ func NewMainKubelet(
|
||||
klet.AddPodSyncLoopHandler(activeDeadlineHandler)
|
||||
klet.AddPodSyncHandler(activeDeadlineHandler)
|
||||
|
||||
klet.AddPodAdmitHandler(apparmor.NewValidator(containerRuntime))
|
||||
|
||||
// apply functional Option's
|
||||
for _, opt := range kubeOptions {
|
||||
opt(klet)
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
)
|
||||
|
||||
@ -33,12 +34,11 @@ import (
|
||||
// Set to true if the wrong build tags are set (see validate_disabled.go).
|
||||
var isDisabledBuild bool
|
||||
|
||||
// Interface for validating that a pod with with an AppArmor profile can be run by a Node.
|
||||
type Validator interface {
|
||||
Validate(pod *api.Pod) error
|
||||
}
|
||||
const (
|
||||
rejectReason = "AppArmor"
|
||||
)
|
||||
|
||||
func NewValidator(runtime string) Validator {
|
||||
func NewValidator(runtime string) lifecycle.PodAdmitHandler {
|
||||
if err := validateHost(runtime); err != nil {
|
||||
return &validator{validateHostErr: err}
|
||||
}
|
||||
@ -58,7 +58,21 @@ type validator struct {
|
||||
appArmorFS string
|
||||
}
|
||||
|
||||
func (v *validator) Validate(pod *api.Pod) error {
|
||||
// TODO(timstclair): Refactor the PodAdmitInterface to return a (Admit, Reason Message) rather than
|
||||
// the PodAdmitResult struct so that the interface can be implemented without importing lifecycle.
|
||||
func (v *validator) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
|
||||
err := v.validate(attrs.Pod)
|
||||
if err == nil {
|
||||
return lifecycle.PodAdmitResult{Admit: true}
|
||||
}
|
||||
return lifecycle.PodAdmitResult{
|
||||
Admit: false,
|
||||
Reason: rejectReason,
|
||||
Message: fmt.Sprintf("Cannot enforce AppArmor: %v", err),
|
||||
}
|
||||
}
|
||||
|
||||
func (v *validator) validate(pod *api.Pod) error {
|
||||
if !isRequired(pod) {
|
||||
return nil
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@ -36,7 +37,7 @@ func TestGetAppArmorFS(t *testing.T) {
|
||||
assert.Equal(t, expectedPath, actualPath)
|
||||
}
|
||||
|
||||
func TestValidateHost(t *testing.T) {
|
||||
func TestAdmitHost(t *testing.T) {
|
||||
// This test only passes on systems running AppArmor with the default configuration.
|
||||
// The test should be manually run if modifying the getAppArmorFS function.
|
||||
t.Skip()
|
||||
@ -45,7 +46,7 @@ func TestValidateHost(t *testing.T) {
|
||||
assert.Error(t, validateHost("rkt"))
|
||||
}
|
||||
|
||||
func TestValidateProfile(t *testing.T) {
|
||||
func TestAdmitProfile(t *testing.T) {
|
||||
loadedProfiles := map[string]bool{
|
||||
"docker-default": true,
|
||||
"foo-bar": true,
|
||||
@ -78,7 +79,7 @@ func TestValidateProfile(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateBadHost(t *testing.T) {
|
||||
func TestAdmitBadHost(t *testing.T) {
|
||||
hostErr := errors.New("expected host error")
|
||||
v := &validator{
|
||||
validateHostErr: hostErr,
|
||||
@ -94,16 +95,20 @@ func TestValidateBadHost(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
err := v.Validate(getPodWithProfile(test.profile))
|
||||
result := v.Admit(&lifecycle.PodAdmitAttributes{
|
||||
Pod: getPodWithProfile(test.profile),
|
||||
})
|
||||
if test.expectValid {
|
||||
assert.NoError(t, err, "Pod with profile %q should be valid", test.profile)
|
||||
assert.True(t, result.Admit, "Pod with profile %q should be admitted", test.profile)
|
||||
} else {
|
||||
assert.Equal(t, hostErr, err, "Pod with profile %q should trigger a host validation error", test.profile)
|
||||
assert.False(t, result.Admit, "Pod with profile %q should be rejected", test.profile)
|
||||
assert.Equal(t, rejectReason, result.Reason, "Pod with profile %q", test.profile)
|
||||
assert.Contains(t, result.Message, hostErr.Error(), "Pod with profile %q", test.profile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateValidHost(t *testing.T) {
|
||||
func TestAdmitValidHost(t *testing.T) {
|
||||
v := &validator{
|
||||
appArmorFS: "./testdata/",
|
||||
}
|
||||
@ -123,11 +128,15 @@ func TestValidateValidHost(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
err := v.Validate(getPodWithProfile(test.profile))
|
||||
result := v.Admit(&lifecycle.PodAdmitAttributes{
|
||||
Pod: getPodWithProfile(test.profile),
|
||||
})
|
||||
if test.expectValid {
|
||||
assert.NoError(t, err, "Pod with profile %q should be valid", test.profile)
|
||||
assert.True(t, result.Admit, "Pod with profile %q should be admitted", test.profile)
|
||||
} else {
|
||||
assert.Error(t, err, "Pod with profile %q should trigger a validation error", test.profile)
|
||||
assert.False(t, result.Admit, "Pod with profile %q should be rejected", test.profile)
|
||||
assert.Equal(t, rejectReason, result.Reason, "Pod with profile %q", test.profile)
|
||||
assert.NotEmpty(t, result.Message, "Pod with profile %q", test.profile)
|
||||
}
|
||||
}
|
||||
|
||||
@ -151,10 +160,16 @@ func TestValidateValidHost(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
assert.NoError(t, v.Validate(pod), "Multi-container pod should validate")
|
||||
assert.True(t, v.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}).Admit,
|
||||
"Multi-container pod should be admitted")
|
||||
for k, val := range pod.Annotations {
|
||||
pod.Annotations[k] = val + "-bad"
|
||||
assert.Error(t, v.Validate(pod), "Multi-container pod with invalid profile %s:%s", k, pod.Annotations[k])
|
||||
|
||||
result := v.Admit(&lifecycle.PodAdmitAttributes{Pod: pod})
|
||||
assert.False(t, result.Admit, "Multi-container pod with invalid profile should be rejected")
|
||||
assert.Equal(t, rejectReason, result.Reason, "Multi-container pod with invalid profile")
|
||||
assert.NotEmpty(t, result.Message, "Multi-container pod with invalid profile")
|
||||
|
||||
pod.Annotations[k] = val // Restore.
|
||||
}
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user