diff --git a/pkg/kubelet/dockertools/docker_manager.go b/pkg/kubelet/dockertools/docker_manager.go index 74a07f8909d..885a4776755 100644 --- a/pkg/kubelet/dockertools/docker_manager.go +++ b/pkg/kubelet/dockertools/docker_manager.go @@ -57,6 +57,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/cache" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/pkg/securitycontext" kubetypes "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/flowcontrol" @@ -108,8 +109,8 @@ var ( // TODO: make this a TTL based pull (if image older than X policy, pull) podInfraContainerImagePullPolicy = api.PullIfNotPresent - // Default security option, only seccomp for now - defaultSeccompProfile = "unconfined" + // Default set of seccomp security options. + defaultSeccompOpt = []dockerOpt{{"seccomp", "unconfined"}} ) type DockerManager struct { @@ -1040,22 +1041,52 @@ func (dm *DockerManager) getSecurityOpts(pod *api.Pod, ctrName string) ([]string return nil, err } - // seccomp is only on docker versions >= v1.10 - result, err := version.Compare(dockerV110APIVersion) - if err != nil { + var securityOpts []dockerOpt + if seccompOpts, err := dm.getSeccompOpts(pod, ctrName, version); err != nil { return nil, err - } - var optFmt string - switch { - case result < 0: - return nil, nil // return early for Docker < 1.10 - case result == 0: - optFmt = "%s:%s" // use colon notation for Docker 1.10 - case result > 0: - optFmt = "%s=%s" // use = notation for Docker >= 1.11 + } else { + securityOpts = append(securityOpts, seccompOpts...) } - defaultSecurityOpts := []string{fmt.Sprintf(optFmt, "seccomp", defaultSeccompProfile)} + if appArmorOpts, err := dm.getAppArmorOpts(pod, ctrName); err != nil { + return nil, err + } else { + securityOpts = append(securityOpts, appArmorOpts...) + } + + const ( + // Docker changed the API for specifying options in v1.11 + optSeparatorChangeVersion = "1.23" // Corresponds to docker 1.11.x + optSeparatorOld = ':' + optSeparatorNew = '=' + ) + + sep := optSeparatorNew + if result, err := version.Compare(optSeparatorChangeVersion); err != nil { + return nil, fmt.Errorf("error parsing docker API version: %v", err) + } else if result < 0 { + sep = optSeparatorOld + } + + opts := make([]string, len(securityOpts)) + for i, opt := range securityOpts { + opts[i] = fmt.Sprintf("%s%c%s", opt.key, sep, opt.value) + } + return opts, nil +} + +type dockerOpt struct { + key, value string +} + +// Get the docker security options for seccomp. +func (dm *DockerManager) getSeccompOpts(pod *api.Pod, ctrName string, version kubecontainer.Version) ([]dockerOpt, error) { + // seccomp is only on docker versions >= v1.10 + if result, err := version.Compare(dockerV110APIVersion); err != nil { + return nil, err + } else if result < 0 { + return nil, nil // return early for Docker < 1.10 + } profile, profileOK := pod.ObjectMeta.Annotations[api.SeccompContainerAnnotationKeyPrefix+ctrName] if !profileOK { @@ -1063,13 +1094,13 @@ func (dm *DockerManager) getSecurityOpts(pod *api.Pod, ctrName string) ([]string profile, profileOK = pod.ObjectMeta.Annotations[api.SeccompPodAnnotationKey] if !profileOK { // return early the default - return defaultSecurityOpts, nil + return defaultSeccompOpt, nil } } if profile == "unconfined" { // return early the default - return defaultSecurityOpts, nil + return defaultSeccompOpt, nil } if profile == "docker/default" { @@ -1093,7 +1124,20 @@ func (dm *DockerManager) getSecurityOpts(pod *api.Pod, ctrName string) ([]string return nil, err } - return []string{fmt.Sprintf(optFmt, "seccomp", b.Bytes())}, nil + return []dockerOpt{{"seccomp", b.String()}}, nil +} + +// Get the docker security options for AppArmor. +func (dm *DockerManager) getAppArmorOpts(pod *api.Pod, ctrName string) ([]dockerOpt, error) { + profile := apparmor.GetProfileName(pod, ctrName) + if profile == "" || profile == apparmor.ProfileRuntimeDefault { + // The docker applies the default profile by default. + return nil, nil + } + + // Assume validation has already happened. + profileName := strings.TrimPrefix(profile, apparmor.ProfileNamePrefix) + return []dockerOpt{{"apparmor", profileName}}, nil } type dockerExitError struct { diff --git a/pkg/kubelet/dockertools/docker_manager_test.go b/pkg/kubelet/dockertools/docker_manager_test.go index 86f9d97029f..67e8c422e45 100644 --- a/pkg/kubelet/dockertools/docker_manager_test.go +++ b/pkg/kubelet/dockertools/docker_manager_test.go @@ -51,6 +51,7 @@ import ( proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/security/apparmor" kubetypes "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/clock" uexec "k8s.io/kubernetes/pkg/util/exec" @@ -1781,6 +1782,70 @@ func TestSecurityOptsOperator(t *testing.T) { } } +func TestGetSecurityOpts(t *testing.T) { + const containerName = "bar" + makePod := func(annotations map[string]string) *api.Pod { + return &api.Pod{ + ObjectMeta: api.ObjectMeta{ + UID: "12345678", + Name: "foo", + Namespace: "new", + Annotations: annotations, + }, + Spec: api.PodSpec{ + Containers: []api.Container{ + {Name: containerName}, + }, + }, + } + } + + tests := []struct { + msg string + pod *api.Pod + expectedOpts []string + }{{ + msg: "No security annotations", + pod: makePod(nil), + expectedOpts: []string{"seccomp=unconfined"}, + }, { + msg: "Seccomp default", + pod: makePod(map[string]string{ + api.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default", + }), + expectedOpts: nil, + }, { + msg: "AppArmor runtime/default", + pod: makePod(map[string]string{ + apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileRuntimeDefault, + }), + expectedOpts: []string{"seccomp=unconfined"}, + }, { + msg: "AppArmor local profile", + pod: makePod(map[string]string{ + apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo", + }), + expectedOpts: []string{"seccomp=unconfined", "apparmor=foo"}, + }, { + msg: "AppArmor and seccomp profile", + pod: makePod(map[string]string{ + api.SeccompContainerAnnotationKeyPrefix + containerName: "docker/default", + apparmor.ContainerAnnotationKeyPrefix + containerName: apparmor.ProfileNamePrefix + "foo", + }), + expectedOpts: []string{"apparmor=foo"}, + }} + + dm, _ := newTestDockerManagerWithVersion("1.11.1", "1.23") + for i, test := range tests { + opts, err := dm.getSecurityOpts(test.pod, containerName) + assert.NoError(t, err, "TestCase[%d]: %s", i, test.msg) + assert.Len(t, opts, len(test.expectedOpts), "TestCase[%d]: %s", i, test.msg) + for _, opt := range test.expectedOpts { + assert.Contains(t, opts, opt, "TestCase[%d]: %s", i, test.msg) + } + } +} + func TestSeccompIsUnconfinedByDefaultWithDockerV110(t *testing.T) { dm, fakeDocker := newTestDockerManagerWithVersion("1.10.1", "1.22") pod := &api.Pod{ diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 38adc92cf4e..3b911bf5bd5 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -71,6 +71,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/util/queue" "k8s.io/kubernetes/pkg/kubelet/volumemanager" "k8s.io/kubernetes/pkg/runtime" + "k8s.io/kubernetes/pkg/security/apparmor" "k8s.io/kubernetes/pkg/securitycontext" "k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/util/bandwidth" @@ -554,6 +555,8 @@ func NewMainKubelet( klet.AddPodSyncLoopHandler(activeDeadlineHandler) klet.AddPodSyncHandler(activeDeadlineHandler) + klet.AddPodAdmitHandler(apparmor.NewValidator(containerRuntime)) + // apply functional Option's for _, opt := range kubeOptions { opt(klet) diff --git a/pkg/security/apparmor/validate.go b/pkg/security/apparmor/validate.go index 28952cbc833..4466fd76b30 100644 --- a/pkg/security/apparmor/validate.go +++ b/pkg/security/apparmor/validate.go @@ -26,6 +26,7 @@ import ( "strings" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/util" ) @@ -33,12 +34,11 @@ import ( // Set to true if the wrong build tags are set (see validate_disabled.go). var isDisabledBuild bool -// Interface for validating that a pod with with an AppArmor profile can be run by a Node. -type Validator interface { - Validate(pod *api.Pod) error -} +const ( + rejectReason = "AppArmor" +) -func NewValidator(runtime string) Validator { +func NewValidator(runtime string) lifecycle.PodAdmitHandler { if err := validateHost(runtime); err != nil { return &validator{validateHostErr: err} } @@ -58,7 +58,21 @@ type validator struct { appArmorFS string } -func (v *validator) Validate(pod *api.Pod) error { +// TODO(timstclair): Refactor the PodAdmitInterface to return a (Admit, Reason Message) rather than +// the PodAdmitResult struct so that the interface can be implemented without importing lifecycle. +func (v *validator) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult { + err := v.validate(attrs.Pod) + if err == nil { + return lifecycle.PodAdmitResult{Admit: true} + } + return lifecycle.PodAdmitResult{ + Admit: false, + Reason: rejectReason, + Message: fmt.Sprintf("Cannot enforce AppArmor: %v", err), + } +} + +func (v *validator) validate(pod *api.Pod) error { if !isRequired(pod) { return nil } @@ -94,7 +108,7 @@ func validateHost(runtime string) error { } // Check kernel support. - if !isAppArmorEnabled() { + if !IsAppArmorEnabled() { return errors.New("AppArmor is not enabled on the host") } @@ -192,11 +206,11 @@ func getAppArmorFS() (string, error) { return "", errors.New("securityfs not found") } -// isAppArmorEnabled returns true if apparmor is enabled for the host. +// IsAppArmorEnabled returns true if apparmor is enabled for the host. // This function is forked from // https://github.com/opencontainers/runc/blob/1a81e9ab1f138c091fe5c86d0883f87716088527/libcontainer/apparmor/apparmor.go // to avoid the libapparmor dependency. -func isAppArmorEnabled() bool { +func IsAppArmorEnabled() bool { if _, err := os.Stat("/sys/kernel/security/apparmor"); err == nil && os.Getenv("container") == "" { if _, err = os.Stat("/sbin/apparmor_parser"); err == nil { buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") diff --git a/pkg/security/apparmor/validate_test.go b/pkg/security/apparmor/validate_test.go index b200f5b02d0..d71212d8c75 100644 --- a/pkg/security/apparmor/validate_test.go +++ b/pkg/security/apparmor/validate_test.go @@ -21,6 +21,7 @@ import ( "testing" "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/kubelet/lifecycle" "github.com/stretchr/testify/assert" ) @@ -36,7 +37,7 @@ func TestGetAppArmorFS(t *testing.T) { assert.Equal(t, expectedPath, actualPath) } -func TestValidateHost(t *testing.T) { +func TestAdmitHost(t *testing.T) { // This test only passes on systems running AppArmor with the default configuration. // The test should be manually run if modifying the getAppArmorFS function. t.Skip() @@ -45,7 +46,7 @@ func TestValidateHost(t *testing.T) { assert.Error(t, validateHost("rkt")) } -func TestValidateProfile(t *testing.T) { +func TestAdmitProfile(t *testing.T) { loadedProfiles := map[string]bool{ "docker-default": true, "foo-bar": true, @@ -78,7 +79,7 @@ func TestValidateProfile(t *testing.T) { } } -func TestValidateBadHost(t *testing.T) { +func TestAdmitBadHost(t *testing.T) { hostErr := errors.New("expected host error") v := &validator{ validateHostErr: hostErr, @@ -94,16 +95,20 @@ func TestValidateBadHost(t *testing.T) { } for _, test := range tests { - err := v.Validate(getPodWithProfile(test.profile)) + result := v.Admit(&lifecycle.PodAdmitAttributes{ + Pod: getPodWithProfile(test.profile), + }) if test.expectValid { - assert.NoError(t, err, "Pod with profile %q should be valid", test.profile) + assert.True(t, result.Admit, "Pod with profile %q should be admitted", test.profile) } else { - assert.Equal(t, hostErr, err, "Pod with profile %q should trigger a host validation error", test.profile) + assert.False(t, result.Admit, "Pod with profile %q should be rejected", test.profile) + assert.Equal(t, rejectReason, result.Reason, "Pod with profile %q", test.profile) + assert.Contains(t, result.Message, hostErr.Error(), "Pod with profile %q", test.profile) } } } -func TestValidateValidHost(t *testing.T) { +func TestAdmitValidHost(t *testing.T) { v := &validator{ appArmorFS: "./testdata/", } @@ -123,11 +128,15 @@ func TestValidateValidHost(t *testing.T) { } for _, test := range tests { - err := v.Validate(getPodWithProfile(test.profile)) + result := v.Admit(&lifecycle.PodAdmitAttributes{ + Pod: getPodWithProfile(test.profile), + }) if test.expectValid { - assert.NoError(t, err, "Pod with profile %q should be valid", test.profile) + assert.True(t, result.Admit, "Pod with profile %q should be admitted", test.profile) } else { - assert.Error(t, err, "Pod with profile %q should trigger a validation error", test.profile) + assert.False(t, result.Admit, "Pod with profile %q should be rejected", test.profile) + assert.Equal(t, rejectReason, result.Reason, "Pod with profile %q", test.profile) + assert.NotEmpty(t, result.Message, "Pod with profile %q", test.profile) } } @@ -151,10 +160,16 @@ func TestValidateValidHost(t *testing.T) { }, }, } - assert.NoError(t, v.Validate(pod), "Multi-container pod should validate") + assert.True(t, v.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}).Admit, + "Multi-container pod should be admitted") for k, val := range pod.Annotations { pod.Annotations[k] = val + "-bad" - assert.Error(t, v.Validate(pod), "Multi-container pod with invalid profile %s:%s", k, pod.Annotations[k]) + + result := v.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}) + assert.False(t, result.Admit, "Multi-container pod with invalid profile should be rejected") + assert.Equal(t, rejectReason, result.Reason, "Multi-container pod with invalid profile") + assert.NotEmpty(t, result.Message, "Multi-container pod with invalid profile") + pod.Annotations[k] = val // Restore. } } diff --git a/test/e2e_node/apparmor_test.go b/test/e2e_node/apparmor_test.go new file mode 100644 index 00000000000..e23dcbf5eed --- /dev/null +++ b/test/e2e_node/apparmor_test.go @@ -0,0 +1,194 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package e2e_node + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "os/exec" + "regexp" + "strconv" + "strings" + + "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/security/apparmor" + "k8s.io/kubernetes/test/e2e/framework" + + "github.com/davecgh/go-spew/spew" + "github.com/golang/glog" + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" +) + +var _ = framework.KubeDescribe("AppArmor [Feature:AppArmor]", func() { + if isAppArmorEnabled() { + testAppArmorNode() + } else { + testNonAppArmorNode() + } +}) + +func testAppArmorNode() { + BeforeEach(func() { + By("Loading AppArmor profiles for testing") + framework.ExpectNoError(loadTestProfiles(), "Could not load AppArmor test profiles") + }) + Context("when running with AppArmor", func() { + f := framework.NewDefaultFramework("apparmor-test") + + It("should reject an unloaded profile", func() { + status := runAppArmorTest(f, "localhost/"+"non-existant-profile") + Expect(status.Phase).To(Equal(api.PodFailed), "PodStatus: %+v", status) + Expect(status.Reason).To(Equal("AppArmor"), "PodStatus: %+v", status) + }) + It("should enforce a profile blocking writes", func() { + status := runAppArmorTest(f, "localhost/"+apparmorProfilePrefix+"deny-write") + if len(status.ContainerStatuses) == 0 { + framework.Failf("Unexpected pod status: %s", spew.Sdump(status)) + return + } + state := status.ContainerStatuses[0].State.Terminated + Expect(state.ExitCode).To(Not(BeZero()), "ContainerStateTerminated: %+v", state) + + }) + It("should enforce a permissive profile", func() { + status := runAppArmorTest(f, "localhost/"+apparmorProfilePrefix+"audit-write") + if len(status.ContainerStatuses) == 0 { + framework.Failf("Unexpected pod status: %s", spew.Sdump(status)) + return + } + state := status.ContainerStatuses[0].State.Terminated + Expect(state.ExitCode).To(BeZero(), "ContainerStateTerminated: %+v", state) + }) + }) +} + +func testNonAppArmorNode() { + Context("when running without AppArmor", func() { + f := framework.NewDefaultFramework("apparmor-test") + + It("should reject a pod with an AppArmor profile", func() { + status := runAppArmorTest(f, "runtime/default") + Expect(status.Phase).To(Equal(api.PodFailed), "PodStatus: %+v", status) + Expect(status.Reason).To(Equal("AppArmor"), "PodStatus: %+v", status) + }) + }) +} + +const apparmorProfilePrefix = "e2e-node-apparmor-test-" +const testProfiles = ` +#include + +profile e2e-node-apparmor-test-deny-write flags=(attach_disconnected) { + #include + + file, + + # Deny all file writes. + deny /** w, +} + +profile e2e-node-apparmor-test-audit-write flags=(attach_disconnected) { + #include + + file, + + # Only audit file writes. + audit /** w, +} +` + +func loadTestProfiles() error { + f, err := ioutil.TempFile("/tmp", "apparmor") + if err != nil { + return fmt.Errorf("failed to open temp file: %v", err) + } + defer os.Remove(f.Name()) + defer f.Close() + + if _, err := f.WriteString(testProfiles); err != nil { + return fmt.Errorf("failed to write profiles to file: %v", err) + } + + cmd := exec.Command("sudo", "apparmor_parser", "-r", "-W", f.Name()) + stderr := &bytes.Buffer{} + cmd.Stderr = stderr + out, err := cmd.Output() + // apparmor_parser does not always return an error code, so consider any stderr output an error. + if err != nil || stderr.Len() > 0 { + if stderr.Len() > 0 { + glog.Warning(stderr.String()) + } + if len(out) > 0 { + glog.Infof("apparmor_parser: %s", out) + } + return fmt.Errorf("failed to load profiles: %v", err) + } + glog.V(2).Infof("Loaded profiles: %v", out) + return nil +} + +func runAppArmorTest(f *framework.Framework, profile string) api.PodStatus { + pod := createPodWithAppArmor(f, profile) + framework.ExpectNoError(f.WaitForPodNoLongerRunning(pod.Name)) + p, err := f.PodClient().Get(pod.Name) + framework.ExpectNoError(err) + return p.Status +} + +func createPodWithAppArmor(f *framework.Framework, profile string) *api.Pod { + pod := &api.Pod{ + ObjectMeta: api.ObjectMeta{ + Name: fmt.Sprintf("test-apparmor-%s", strings.Replace(profile, "/", "-", -1)), + Annotations: map[string]string{ + "container.apparmor.security.alpha.kubernetes.io/test": profile, + }, + }, + Spec: api.PodSpec{ + Containers: []api.Container{{ + Name: "test", + Image: ImageRegistry[busyBoxImage], + Command: []string{"touch", "foo"}, + }}, + RestartPolicy: api.RestartPolicyNever, + }, + } + return f.PodClient().Create(pod) +} + +func isAppArmorEnabled() bool { + // TODO(timstclair): Pass this through the image setup rather than hardcoding. + if strings.Contains(framework.TestContext.NodeName, "-gci-dev-") { + gciVersionRe := regexp.MustCompile("-gci-dev-([0-9]+)-") + matches := gciVersionRe.FindStringSubmatch(framework.TestContext.NodeName) + if len(matches) == 2 { + version, err := strconv.Atoi(matches[1]) + if err != nil { + glog.Errorf("Error parsing GCI version from NodeName %q: %v", framework.TestContext.NodeName, err) + return false + } + return version >= 54 + } + return false + } + if strings.Contains(framework.TestContext.NodeName, "-ubuntu-") { + return true + } + return apparmor.IsAppArmorEnabled() +}