mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #52367 from tallclair/psp-config
Automatic merge from submit-queue (batch tested with PRs 52367, 53363, 54989, 54872, 54643). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Basic GCE PodSecurityPolicy Config **What this PR does / why we need it**: This PR lays the foundation for enabling PodSecurityPolicy in GCE and other default deployments. The 3 commits are: 1. Add policies, roles & bindings for the default addons on GCE. 2. Enable the PSP admission controller & load the addon policies when the`ENABLE_POD_SECURITY_POLICY=true` environment variable is set. 3. Support the PodSecurityPolicy in the E2E environment & add PSP tests. NOTES: - ~~Depends on https://github.com/kubernetes/kubernetes/pull/52301 for privileged capabilities~~ - ~~Depends on https://github.com/kubernetes/kubernetes/pull/52849 for sane mutations~~ - ~~Depends on https://github.com/kubernetes/kubernetes/pull/53479 for aggregator tests to pass~~ - ~~Depends on https://github.com/kubernetes/kubernetes/pull/54175 for dedicated fluentd service~~ account - This PR is a fork of https://github.com/kubernetes/kubernetes/pull/46064, credit to @Q-Lee **Which issue this PR fixes**: #43538 **Release note**: ```release-note Add support for PodSecurityPolicy on GCE: `ENABLE_POD_SECURITY_POLICY=true` enables the admission controller, and installs policies for default addons. ```
This commit is contained in:
commit
96d81fe688
@ -1,3 +1,13 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: metadata-proxy
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metadata-proxy
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
@ -23,6 +33,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
serviceAccountName: metadata-proxy
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default
|
||||
containers:
|
||||
|
@ -1,32 +0,0 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
annotations:
|
||||
kubernetes.io/description: 'privileged allows access to all privileged and host
|
||||
features and the ability to run as any user, any group, any fsGroup, and with
|
||||
any SELinux context.'
|
||||
creationTimestamp: 2016-05-06T19:28:58Z
|
||||
name: privileged
|
||||
spec:
|
||||
privileged: true
|
||||
defaultAddCapabilities: null
|
||||
requiredDropCapabilities: null
|
||||
allowedCapabilities: null
|
||||
volumes:
|
||||
- '*'
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
-
|
||||
min: 0
|
||||
max: 65535
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: false
|
@ -633,6 +633,7 @@ KUBE_PROXY_DAEMONSET: $(yaml-quote ${KUBE_PROXY_DAEMONSET:-false})
|
||||
KUBE_PROXY_TOKEN: $(yaml-quote ${KUBE_PROXY_TOKEN:-})
|
||||
NODE_PROBLEM_DETECTOR_TOKEN: $(yaml-quote ${NODE_PROBLEM_DETECTOR_TOKEN:-})
|
||||
ADMISSION_CONTROL: $(yaml-quote ${ADMISSION_CONTROL:-})
|
||||
ENABLE_POD_SECURITY_POLICY: $(yaml-quote ${ENABLE_POD_SECURITY_POLICY:-})
|
||||
MASTER_IP_RANGE: $(yaml-quote ${MASTER_IP_RANGE})
|
||||
RUNTIME_CONFIG: $(yaml-quote ${RUNTIME_CONFIG})
|
||||
CA_CERT: $(yaml-quote ${CA_CERT_BASE64:-})
|
||||
|
@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:event-exporter
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: gce:podsecuritypolicy:event-exporter
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: event-exporter-sa
|
||||
namespace: kube-system
|
@ -0,0 +1,17 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:event-exporter
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resourceNames:
|
||||
- gce.event-exporter
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
38
cluster/gce/addons/podsecuritypolicies/event-exporter.yaml
Normal file
38
cluster/gce/addons/podsecuritypolicies/event-exporter.yaml
Normal file
@ -0,0 +1,38 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: gce.event-exporter
|
||||
annotations:
|
||||
kubernetes.io/description: 'Policy used by the event-exporter addon.'
|
||||
# TODO: event-exporter should run with the default seccomp profile
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
# 'runtime/default' is already the default, but must be filled in on the
|
||||
# pod to pass admission.
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
|
||||
labels:
|
||||
kubernetes.io/cluster-service: 'true'
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
volumes:
|
||||
- 'hostPath'
|
||||
- 'secret'
|
||||
# TODO: This only needs a hostPath to read /etc/ssl/certs,
|
||||
# but it should be able to just include these in the image.
|
||||
allowedHostPaths:
|
||||
- pathPrefix: /etc/ssl/certs
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
# TODO: This doesn't need to run as root.
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: false
|
@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:fluentd-gcp
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: gce:podsecuritypolicy:fluentd-gcp
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: fluentd-gcp
|
||||
namespace: kube-system
|
17
cluster/gce/addons/podsecuritypolicies/fluentd-gcp-role.yaml
Normal file
17
cluster/gce/addons/podsecuritypolicies/fluentd-gcp-role.yaml
Normal file
@ -0,0 +1,17 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:fluentd-gcp
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resourceNames:
|
||||
- gce.fluentd-gcp
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
38
cluster/gce/addons/podsecuritypolicies/fluentd-gcp.yaml
Normal file
38
cluster/gce/addons/podsecuritypolicies/fluentd-gcp.yaml
Normal file
@ -0,0 +1,38 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: gce.fluentd-gcp
|
||||
annotations:
|
||||
kubernetes.io/description: 'Policy used by the fluentd-gcp addon.'
|
||||
# TODO: fluentd-gcp should run with the default seccomp profile
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
# 'runtime/default' is already the default, but must be filled in on the
|
||||
# pod to pass admission.
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
|
||||
labels:
|
||||
kubernetes.io/cluster-service: 'true'
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
volumes:
|
||||
- 'configMap'
|
||||
- 'hostPath'
|
||||
- 'secret'
|
||||
allowedHostPaths:
|
||||
- pathPrefix: /var/log
|
||||
- pathPrefix: /var/lib/docker/containers
|
||||
- pathPrefix: /usr/lib64
|
||||
hostNetwork: true
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: false
|
@ -0,0 +1,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:kube-proxy
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: gce:podsecuritypolicy:privileged
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-proxy
|
||||
namespace: kube-system
|
@ -0,0 +1,17 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:unprivileged-addon
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: gce:podsecuritypolicy:unprivileged-addon
|
||||
subjects:
|
||||
- kind: Group
|
||||
# All service accounts in the kube-system namespace are allowed to use this.
|
||||
name: system:serviceaccounts:kube-system
|
||||
apiGroup: rbac.authorization.k8s.io
|
@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:metadata-proxy
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: gce:podsecuritypolicy:privileged
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: metadata-proxy
|
||||
namespace: kube-system
|
24
cluster/gce/addons/podsecuritypolicies/node-binding.yaml
Normal file
24
cluster/gce/addons/podsecuritypolicies/node-binding.yaml
Normal file
@ -0,0 +1,24 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:nodes
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
kubernetes.io/description: 'Allow nodes to create privileged pods. Should
|
||||
be used in combination with the NodeRestriction admission plugin to limit
|
||||
nodes to mirror pods bound to themselves.'
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: 'true'
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: gce:podsecuritypolicy:privileged
|
||||
subjects:
|
||||
- kind: Group
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
name: system:nodes
|
||||
- kind: User
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
# Legacy node ID
|
||||
name: kubelet
|
16
cluster/gce/addons/podsecuritypolicies/npd-binding.yaml
Normal file
16
cluster/gce/addons/podsecuritypolicies/npd-binding.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:npd
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: gce:podsecuritypolicy:privileged
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: node-problem-detector
|
||||
namespace: kube-system
|
@ -0,0 +1,18 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
# The persistent volume binder creates recycler pods in the default namespace,
|
||||
# but the addon manager only creates namespaced objects in the kube-system
|
||||
# namespace, so this is a ClusterRoleBinding.
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:persistent-volume-binder
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
kubernetes.io/cluster-service: "true"
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: gce:podsecuritypolicy:persistent-volume-binder
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: persistent-volume-binder
|
||||
namespace: kube-system
|
@ -0,0 +1,20 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
# The persistent volume binder creates recycler pods in the default namespace,
|
||||
# but the addon manager only creates namespaced objects in the kube-system
|
||||
# namespace, so this is a ClusterRole.
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:persistent-volume-binder
|
||||
namespace: default
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resourceNames:
|
||||
- gce.persistent-volume-binder
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
@ -0,0 +1,29 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: gce.persistent-volume-binder
|
||||
annotations:
|
||||
kubernetes.io/description: 'Policy used by the persistent-volume-binder
|
||||
(a.k.a. persistentvolume-controller) to run recycler pods.'
|
||||
# TODO: This should use the default seccomp profile.
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
labels:
|
||||
kubernetes.io/cluster-service: 'true'
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
privileged: false
|
||||
volumes:
|
||||
- 'nfs'
|
||||
- 'secret' # Required for service account credentials.
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: false
|
16
cluster/gce/addons/podsecuritypolicies/privileged-role.yaml
Normal file
16
cluster/gce/addons/podsecuritypolicies/privileged-role.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:privileged
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resourceNames:
|
||||
- gce.privileged
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
33
cluster/gce/addons/podsecuritypolicies/privileged.yaml
Normal file
33
cluster/gce/addons/podsecuritypolicies/privileged.yaml
Normal file
@ -0,0 +1,33 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: gce.privileged
|
||||
annotations:
|
||||
kubernetes.io/description: 'privileged allows full unrestricted access to
|
||||
pod features, as if the PodSecurityPolicy controller was not enabled.'
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
privileged: true
|
||||
allowPrivilegeEscalation: true
|
||||
allowedCapabilities:
|
||||
- '*'
|
||||
volumes:
|
||||
- '*'
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- min: 0
|
||||
max: 65535
|
||||
hostIPC: true
|
||||
hostPID: true
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: false
|
@ -0,0 +1,17 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: gce:podsecuritypolicy:unprivileged-addon
|
||||
namespace: kube-system
|
||||
labels:
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resourceNames:
|
||||
- gce.unprivileged-addon
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
@ -0,0 +1,38 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
name: gce.unprivileged-addon
|
||||
annotations:
|
||||
kubernetes.io/description: 'This policy grants the minimum ammount of
|
||||
privilege necessary to run non-privileged kube-system pods. This policy is
|
||||
not intended for use outside of kube-system, and may include further
|
||||
restrictions in the future.'
|
||||
# TODO: Addons should use the default seccomp profile.
|
||||
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
|
||||
# 'runtime/default' is already the default, but must be filled in on the
|
||||
# pod to pass admission.
|
||||
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
|
||||
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
|
||||
labels:
|
||||
kubernetes.io/cluster-service: 'true'
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
spec:
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
volumes:
|
||||
- 'emptyDir'
|
||||
- 'configMap'
|
||||
- 'secret'
|
||||
hostNetwork: false
|
||||
hostIPC: false
|
||||
hostPID: false
|
||||
# TODO: The addons using this profile should not run as root.
|
||||
runAsUser:
|
||||
rule: 'RunAsAny'
|
||||
seLinux:
|
||||
rule: 'RunAsAny'
|
||||
supplementalGroups:
|
||||
rule: 'RunAsAny'
|
||||
fsGroup:
|
||||
rule: 'RunAsAny'
|
||||
readOnlyRootFilesystem: false
|
@ -257,8 +257,14 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
|
||||
fi
|
||||
|
||||
# Admission Controllers to invoke prior to persisting objects in cluster
|
||||
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
|
||||
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,ResourceQuota
|
||||
ADMISSION_CONTROL=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority
|
||||
|
||||
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
|
||||
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
|
||||
fi
|
||||
|
||||
# ResourceQuota must come last, or a creation is recorded, but the pod was forbidden.
|
||||
ADMISSION_CONTROL="${ADMISSION_CONTROL},ResourceQuota"
|
||||
|
||||
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
|
||||
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
|
||||
|
@ -298,8 +298,16 @@ if [[ -n "${GCE_GLBC_IMAGE:-}" ]]; then
|
||||
PROVIDER_VARS="${PROVIDER_VARS:-} GCE_GLBC_IMAGE"
|
||||
fi
|
||||
|
||||
# If we included ResourceQuota, we should keep it at the end of the list to prevent incrementing quota usage prematurely.
|
||||
ADMISSION_CONTROL="${KUBE_ADMISSION_CONTROL:-Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,ResourceQuota,GenericAdmissionWebhook}"
|
||||
if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then
|
||||
ADMISSION_CONTROL="Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,PodPreset,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority"
|
||||
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
|
||||
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
|
||||
fi
|
||||
# ResourceQuota must come last, or a creation is recorded, but the pod may be forbidden.
|
||||
ADMISSION_CONTROL="${ADMISSION_CONTROL},ResourceQuota,GenericAdmissionWebhook"
|
||||
else
|
||||
ADMISSION_CONTROL=${KUBE_ADMISSION_CONTROL}
|
||||
fi
|
||||
|
||||
# Optional: if set to true kube-up will automatically check for existing resources and clean them up.
|
||||
KUBE_UP_AUTOMATIC_CLEANUP=${KUBE_UP_AUTOMATIC_CLEANUP:-false}
|
||||
|
@ -421,6 +421,7 @@ enable_l7_loadbalancing: '$(echo "$ENABLE_L7_LOADBALANCING" | sed -e "s/'/''/g")
|
||||
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
|
||||
enable_metadata_proxy: '$(echo "$ENABLE_METADATA_CONCEALMENT" | sed -e "s/'/''/g")'
|
||||
enable_metrics_server: '$(echo "$ENABLE_METRICS_SERVER" | sed -e "s/'/''/g")'
|
||||
enable_pod_security_policy: '$(echo "$ENABLE_POD_SECURITY_POLICY" | sed -e "s/'/''/g")'
|
||||
enable_rescheduler: '$(echo "$ENABLE_RESCHEDULER" | sed -e "s/'/''/g")'
|
||||
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
|
||||
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
|
||||
|
@ -1750,6 +1750,10 @@ function start-kube-addons {
|
||||
# prep addition kube-up specific rbac objects
|
||||
setup-addon-manifests "addons" "rbac"
|
||||
|
||||
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
|
||||
setup-addon-manifests "addons" "podsecuritypolicies"
|
||||
fi
|
||||
|
||||
# Set up manifests of other addons.
|
||||
if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" ]]; then
|
||||
prepare-kube-proxy-manifest-variables "$src_dir/kube-proxy/kube-proxy-ds.yaml"
|
||||
|
@ -165,6 +165,17 @@ addon-dir-create:
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_pod_security_policy', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/podsecuritypolicies:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/podsecuritypolicies
|
||||
- include_pat: E@^.+\.yaml$
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_ui', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/dashboard:
|
||||
file.recurse:
|
||||
|
@ -13,12 +13,17 @@ go_library(
|
||||
"framework.go",
|
||||
"metadata_concealment.go",
|
||||
"node_authz.go",
|
||||
"pod_security_policy.go",
|
||||
"service_accounts.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/auth",
|
||||
deps = [
|
||||
"//pkg/security/apparmor:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/util:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//plugin/pkg/admission/serviceaccount:go_default_library",
|
||||
"//test/e2e/common:go_default_library",
|
||||
"//test/e2e/framework:go_default_library",
|
||||
"//test/utils/image:go_default_library",
|
||||
"//vendor/github.com/evanphx/json-patch:go_default_library",
|
||||
@ -28,15 +33,18 @@ go_library(
|
||||
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//vendor/k8s.io/apiextensions-apiserver/test/integration/testserver:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/apis/audit/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
|
316
test/e2e/auth/pod_security_policy.go
Normal file
316
test/e2e/auth/pod_security_policy.go
Normal file
@ -0,0 +1,316 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
|
||||
psputil "k8s.io/kubernetes/pkg/security/podsecuritypolicy/util"
|
||||
"k8s.io/kubernetes/test/e2e/common"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
var (
|
||||
restrictivePSPTemplate = &extensionsv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "restrictive",
|
||||
Annotations: map[string]string{
|
||||
seccomp.AllowedProfilesAnnotationKey: "docker/default",
|
||||
seccomp.DefaultProfileAnnotationKey: "docker/default",
|
||||
apparmor.AllowedProfilesAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
apparmor.DefaultProfileAnnotationKey: apparmor.ProfileRuntimeDefault,
|
||||
},
|
||||
Labels: map[string]string{
|
||||
"kubernetes.io/cluster-service": "true",
|
||||
"addonmanager.kubernetes.io/mode": "Reconcile",
|
||||
},
|
||||
},
|
||||
Spec: extensionsv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: false,
|
||||
AllowPrivilegeEscalation: boolPtr(false),
|
||||
RequiredDropCapabilities: []corev1.Capability{
|
||||
"AUDIT_WRITE",
|
||||
"CHOWN",
|
||||
"DAC_OVERRIDE",
|
||||
"FOWNER",
|
||||
"FSETID",
|
||||
"KILL",
|
||||
"MKNOD",
|
||||
"NET_RAW",
|
||||
"SETGID",
|
||||
"SETUID",
|
||||
"SYS_CHROOT",
|
||||
},
|
||||
Volumes: []extensionsv1beta1.FSType{
|
||||
extensionsv1beta1.ConfigMap,
|
||||
extensionsv1beta1.EmptyDir,
|
||||
extensionsv1beta1.PersistentVolumeClaim,
|
||||
"projected",
|
||||
extensionsv1beta1.Secret,
|
||||
},
|
||||
HostNetwork: false,
|
||||
HostIPC: false,
|
||||
HostPID: false,
|
||||
RunAsUser: extensionsv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: extensionsv1beta1.RunAsUserStrategyMustRunAsNonRoot,
|
||||
},
|
||||
SELinux: extensionsv1beta1.SELinuxStrategyOptions{
|
||||
Rule: extensionsv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: extensionsv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: extensionsv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: extensionsv1beta1.FSGroupStrategyOptions{
|
||||
Rule: extensionsv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("PodSecurityPolicy", func() {
|
||||
f := framework.NewDefaultFramework("podsecuritypolicy")
|
||||
f.SkipPrivilegedPSPBinding = true
|
||||
|
||||
// Client that will impersonate the default service account, in order to run
|
||||
// with reduced privileges.
|
||||
var c clientset.Interface
|
||||
var ns string // Test namespace, for convenience
|
||||
BeforeEach(func() {
|
||||
if !framework.IsPodSecurityPolicyEnabled(f) {
|
||||
framework.Skipf("PodSecurityPolicy not enabled")
|
||||
}
|
||||
if !framework.IsRBACEnabled(f) {
|
||||
framework.Skipf("RBAC not enabled")
|
||||
}
|
||||
ns = f.Namespace.Name
|
||||
|
||||
By("Creating a kubernetes client that impersonates the default service account")
|
||||
config, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
config.Impersonate = restclient.ImpersonationConfig{
|
||||
UserName: serviceaccount.MakeUsername(ns, "default"),
|
||||
Groups: serviceaccount.MakeGroupNames(ns),
|
||||
}
|
||||
c, err = clientset.NewForConfig(config)
|
||||
framework.ExpectNoError(err)
|
||||
|
||||
By("Binding the edit role to the default SA")
|
||||
framework.BindClusterRole(f.ClientSet.RbacV1beta1(), "edit", ns,
|
||||
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: ns, Name: "default"})
|
||||
})
|
||||
|
||||
It("should forbid pod creation when no PSP is available", func() {
|
||||
By("Running a restricted pod")
|
||||
_, err := c.Core().Pods(ns).Create(restrictedPod(f, "restricted"))
|
||||
expectForbidden(err)
|
||||
})
|
||||
|
||||
It("should enforce the restricted PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a restricted policy for the test service account")
|
||||
_, cleanup := createAndBindPSP(f, restrictivePSPTemplate)
|
||||
defer cleanup()
|
||||
|
||||
By("Running a restricted pod")
|
||||
pod, err := c.Core().Pods(ns).Create(restrictedPod(f, "allowed"))
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, pod.Name, pod.Namespace))
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
_, err := c.Core().Pods(ns).Create(pod)
|
||||
expectForbidden(err)
|
||||
})
|
||||
})
|
||||
|
||||
It("should allow pods under the privileged PodSecurityPolicy", func() {
|
||||
By("Creating & Binding a privileged policy for the test service account")
|
||||
// Ensure that the permissive policy is used even in the presence of the restricted policy.
|
||||
_, cleanup := createAndBindPSP(f, restrictivePSPTemplate)
|
||||
defer cleanup()
|
||||
expectedPSP, cleanup := createAndBindPSP(f, framework.PrivilegedPSP("permissive"))
|
||||
defer cleanup()
|
||||
|
||||
testPrivilegedPods(f, func(pod *v1.Pod) {
|
||||
p, err := c.Core().Pods(ns).Create(pod)
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(framework.WaitForPodNameRunningInNamespace(c, p.Name, p.Namespace))
|
||||
|
||||
// Verify expected PSP was used.
|
||||
p, err = c.Core().Pods(ns).Get(p.Name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
validated, found := p.Annotations[psputil.ValidatedPSPAnnotation]
|
||||
Expect(found).To(BeTrue(), "PSP annotation not found")
|
||||
Expect(validated).To(Equal(expectedPSP.Name), "Unexpected validated PSP")
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func expectForbidden(err error) {
|
||||
Expect(err).To(HaveOccurred(), "should be forbidden")
|
||||
Expect(apierrs.IsForbidden(err)).To(BeTrue(), "should be forbidden error")
|
||||
}
|
||||
|
||||
func testPrivilegedPods(f *framework.Framework, tester func(pod *v1.Pod)) {
|
||||
By("Running a privileged pod", func() {
|
||||
privileged := restrictedPod(f, "privileged")
|
||||
privileged.Spec.Containers[0].SecurityContext.Privileged = boolPtr(true)
|
||||
privileged.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
|
||||
tester(privileged)
|
||||
})
|
||||
|
||||
By("Running a HostPath pod", func() {
|
||||
hostpath := restrictedPod(f, "hostpath")
|
||||
hostpath.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{{
|
||||
Name: "hp",
|
||||
MountPath: "/hp",
|
||||
}}
|
||||
hostpath.Spec.Volumes = []v1.Volume{{
|
||||
Name: "hp",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/tmp"},
|
||||
},
|
||||
}}
|
||||
tester(hostpath)
|
||||
})
|
||||
|
||||
By("Running a HostNetwork pod", func() {
|
||||
hostnet := restrictedPod(f, "hostnet")
|
||||
hostnet.Spec.HostNetwork = true
|
||||
tester(hostnet)
|
||||
})
|
||||
|
||||
By("Running a HostPID pod", func() {
|
||||
hostpid := restrictedPod(f, "hostpid")
|
||||
hostpid.Spec.HostPID = true
|
||||
tester(hostpid)
|
||||
})
|
||||
|
||||
By("Running a HostIPC pod", func() {
|
||||
hostipc := restrictedPod(f, "hostipc")
|
||||
hostipc.Spec.HostIPC = true
|
||||
tester(hostipc)
|
||||
})
|
||||
|
||||
if common.IsAppArmorSupported() {
|
||||
By("Running a custom AppArmor profile pod", func() {
|
||||
aa := restrictedPod(f, "apparmor")
|
||||
// Every node is expected to have the docker-default profile.
|
||||
aa.Annotations[apparmor.ContainerAnnotationKeyPrefix+"pause"] = "localhost/docker-default"
|
||||
tester(aa)
|
||||
})
|
||||
}
|
||||
|
||||
By("Running an unconfined Seccomp pod", func() {
|
||||
unconfined := restrictedPod(f, "seccomp")
|
||||
unconfined.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
|
||||
tester(unconfined)
|
||||
})
|
||||
|
||||
By("Running a CAP_SYS_ADMIN pod", func() {
|
||||
sysadmin := restrictedPod(f, "sysadmin")
|
||||
sysadmin.Spec.Containers[0].SecurityContext.Capabilities = &v1.Capabilities{
|
||||
Add: []v1.Capability{"CAP_SYS_ADMIN"},
|
||||
}
|
||||
sysadmin.Spec.Containers[0].SecurityContext.AllowPrivilegeEscalation = nil
|
||||
tester(sysadmin)
|
||||
})
|
||||
}
|
||||
|
||||
func createAndBindPSP(f *framework.Framework, pspTemplate *extensionsv1beta1.PodSecurityPolicy) (psp *extensionsv1beta1.PodSecurityPolicy, cleanup func()) {
|
||||
// Create the PodSecurityPolicy object.
|
||||
psp = pspTemplate.DeepCopy()
|
||||
// Add the namespace to the name to ensure uniqueness and tie it to the namespace.
|
||||
ns := f.Namespace.Name
|
||||
name := fmt.Sprintf("%s-%s", ns, psp.Name)
|
||||
psp.Name = name
|
||||
psp, err := f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp)
|
||||
framework.ExpectNoError(err, "Failed to create PSP")
|
||||
|
||||
// Create the Role to bind it to the namespace.
|
||||
_, err = f.ClientSet.RbacV1beta1().Roles(ns).Create(&rbacv1beta1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Rules: []rbacv1beta1.PolicyRule{{
|
||||
APIGroups: []string{"extensions"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
ResourceNames: []string{name},
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
})
|
||||
framework.ExpectNoError(err, "Failed to create PSP role")
|
||||
|
||||
// Bind the role to the namespace.
|
||||
framework.BindRoleInNamespace(f.ClientSet.RbacV1beta1(), name, ns, rbacv1beta1.Subject{
|
||||
Kind: rbacv1beta1.ServiceAccountKind,
|
||||
Namespace: ns,
|
||||
Name: "default",
|
||||
})
|
||||
framework.ExpectNoError(framework.WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
|
||||
serviceaccount.MakeUsername(ns, "default"), ns, "use", name,
|
||||
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
|
||||
|
||||
return psp, func() {
|
||||
// Cleanup non-namespaced PSP object.
|
||||
f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
func restrictedPod(f *framework.Framework, name string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{
|
||||
v1.SeccompPodAnnotationKey: "docker/default",
|
||||
apparmor.ContainerAnnotationKeyPrefix + "pause": apparmor.ProfileRuntimeDefault,
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "pause",
|
||||
Image: framework.GetPauseImageName(f.ClientSet),
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
AllowPrivilegeEscalation: boolPtr(false),
|
||||
RunAsUser: intPtr(65534),
|
||||
},
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func boolPtr(b bool) *bool {
|
||||
return &b
|
||||
}
|
||||
|
||||
func intPtr(i int64) *int64 {
|
||||
return &i
|
||||
}
|
@ -40,6 +40,10 @@ const (
|
||||
// AppArmorDistros are distros with AppArmor support
|
||||
var AppArmorDistros = []string{"gci", "ubuntu"}
|
||||
|
||||
func IsAppArmorSupported() bool {
|
||||
return framework.NodeOSDistroIs(AppArmorDistros...)
|
||||
}
|
||||
|
||||
func SkipIfAppArmorNotSupported() {
|
||||
framework.SkipUnlessNodeOSDistroIs(AppArmorDistros...)
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ go_library(
|
||||
"nodes_util.go",
|
||||
"perf_util.go",
|
||||
"pods.go",
|
||||
"psp_util.go",
|
||||
"pv_util.go",
|
||||
"rc_util.go",
|
||||
"resource_usage_gatherer.go",
|
||||
@ -66,6 +67,7 @@ go_library(
|
||||
"//pkg/kubelet/util/format:go_default_library",
|
||||
"//pkg/kubemark:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/ssh:go_default_library",
|
||||
"//pkg/util/file:go_default_library",
|
||||
"//pkg/util/system:go_default_library",
|
||||
@ -125,6 +127,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/yaml:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/version:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
|
@ -18,6 +18,7 @@ package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
authorizationv1beta1 "k8s.io/api/authorization/v1beta1"
|
||||
@ -38,6 +39,12 @@ const (
|
||||
// WaitForAuthorizationUpdate checks if the given user can perform the named verb and action.
|
||||
// If policyCachePollTimeout is reached without the expected condition matching, an error is returned
|
||||
func WaitForAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGetter, user, namespace, verb string, resource schema.GroupResource, allowed bool) error {
|
||||
return WaitForNamedAuthorizationUpdate(c, user, namespace, verb, "", resource, allowed)
|
||||
}
|
||||
|
||||
// WaitForAuthorizationUpdate checks if the given user can perform the named verb and action on the named resource.
|
||||
// If policyCachePollTimeout is reached without the expected condition matching, an error is returned
|
||||
func WaitForNamedAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGetter, user, namespace, verb, resourceName string, resource schema.GroupResource, allowed bool) error {
|
||||
review := &authorizationv1beta1.SubjectAccessReview{
|
||||
Spec: authorizationv1beta1.SubjectAccessReviewSpec{
|
||||
ResourceAttributes: &authorizationv1beta1.ResourceAttributes{
|
||||
@ -45,6 +52,7 @@ func WaitForAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGette
|
||||
Verb: verb,
|
||||
Resource: resource.Resource,
|
||||
Namespace: namespace,
|
||||
Name: resourceName,
|
||||
},
|
||||
User: user,
|
||||
},
|
||||
@ -92,21 +100,52 @@ func BindClusterRole(c v1beta1rbac.ClusterRoleBindingsGetter, clusterRole, ns st
|
||||
|
||||
// BindClusterRoleInNamespace binds the cluster role at the namespace scope
|
||||
func BindClusterRoleInNamespace(c v1beta1rbac.RoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
bindInNamespace(c, "ClusterRole", clusterRole, ns, subjects...)
|
||||
}
|
||||
|
||||
// BindRoleInNamespace binds the role at the namespace scope
|
||||
func BindRoleInNamespace(c v1beta1rbac.RoleBindingsGetter, role, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
bindInNamespace(c, "Role", role, ns, subjects...)
|
||||
}
|
||||
|
||||
func bindInNamespace(c v1beta1rbac.RoleBindingsGetter, roleType, role, ns string, subjects ...rbacv1beta1.Subject) {
|
||||
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
|
||||
_, err := c.RoleBindings(ns).Create(&rbacv1beta1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns + "--" + clusterRole,
|
||||
Name: ns + "--" + role,
|
||||
},
|
||||
RoleRef: rbacv1beta1.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: clusterRole,
|
||||
Kind: roleType,
|
||||
Name: role,
|
||||
},
|
||||
Subjects: subjects,
|
||||
})
|
||||
|
||||
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
|
||||
if err != nil {
|
||||
fmt.Printf("Error binding clusterrole/%s into %q for %v\n", clusterRole, ns, subjects)
|
||||
fmt.Printf("Error binding %s/%s into %q for %v\n", roleType, role, ns, subjects)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
isRBACEnabledOnce sync.Once
|
||||
isRBACEnabled bool
|
||||
)
|
||||
|
||||
func IsRBACEnabled(f *Framework) bool {
|
||||
isRBACEnabledOnce.Do(func() {
|
||||
crs, err := f.ClientSet.RbacV1().ClusterRoles().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error listing ClusterRoles; assuming RBAC is disabled: %v", err)
|
||||
isRBACEnabled = false
|
||||
} else if crs == nil || len(crs.Items) == 0 {
|
||||
Logf("No ClusteRoles found; assuming RBAC is disabled.")
|
||||
isRBACEnabled = false
|
||||
} else {
|
||||
Logf("Found ClusterRoles; assuming RBAC is enabled.")
|
||||
isRBACEnabled = true
|
||||
}
|
||||
})
|
||||
return isRBACEnabled
|
||||
}
|
||||
|
@ -71,6 +71,7 @@ type Framework struct {
|
||||
Namespace *v1.Namespace // Every test has at least one namespace unless creation is skipped
|
||||
namespacesToDelete []*v1.Namespace // Some tests have more than one.
|
||||
NamespaceDeletionTimeout time.Duration
|
||||
SkipPrivilegedPSPBinding bool // Whether to skip creating a binding to the privileged PSP in the test namespace
|
||||
|
||||
gatherer *containerResourceGatherer
|
||||
// Constraints that passed to a check which is executed after data is gathered to
|
||||
@ -373,6 +374,11 @@ func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (
|
||||
if ns != nil {
|
||||
f.namespacesToDelete = append(f.namespacesToDelete, ns)
|
||||
}
|
||||
|
||||
if !f.SkipPrivilegedPSPBinding {
|
||||
CreatePrivilegedPSPBinding(f, ns.Name)
|
||||
}
|
||||
|
||||
return ns, err
|
||||
}
|
||||
|
||||
|
143
test/e2e/framework/psp_util.go
Normal file
143
test/e2e/framework/psp_util.go
Normal file
@ -0,0 +1,143 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/authentication/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/security/podsecuritypolicy/seccomp"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
const (
|
||||
podSecurityPolicyPrivileged = "e2e-test-privileged-psp"
|
||||
)
|
||||
|
||||
var (
|
||||
isPSPEnabledOnce sync.Once
|
||||
isPSPEnabled bool
|
||||
)
|
||||
|
||||
// Creates a PodSecurityPolicy that allows everything.
|
||||
func PrivilegedPSP(name string) *extensionsv1beta1.PodSecurityPolicy {
|
||||
allowPrivilegeEscalation := true
|
||||
return &extensionsv1beta1.PodSecurityPolicy{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: map[string]string{seccomp.AllowedProfilesAnnotationKey: seccomp.AllowAny},
|
||||
},
|
||||
Spec: extensionsv1beta1.PodSecurityPolicySpec{
|
||||
Privileged: true,
|
||||
AllowPrivilegeEscalation: &allowPrivilegeEscalation,
|
||||
AllowedCapabilities: []corev1.Capability{"*"},
|
||||
Volumes: []extensionsv1beta1.FSType{extensionsv1beta1.All},
|
||||
HostNetwork: true,
|
||||
HostPorts: []extensionsv1beta1.HostPortRange{{Min: 0, Max: 65535}},
|
||||
HostIPC: true,
|
||||
HostPID: true,
|
||||
RunAsUser: extensionsv1beta1.RunAsUserStrategyOptions{
|
||||
Rule: extensionsv1beta1.RunAsUserStrategyRunAsAny,
|
||||
},
|
||||
SELinux: extensionsv1beta1.SELinuxStrategyOptions{
|
||||
Rule: extensionsv1beta1.SELinuxStrategyRunAsAny,
|
||||
},
|
||||
SupplementalGroups: extensionsv1beta1.SupplementalGroupsStrategyOptions{
|
||||
Rule: extensionsv1beta1.SupplementalGroupsStrategyRunAsAny,
|
||||
},
|
||||
FSGroup: extensionsv1beta1.FSGroupStrategyOptions{
|
||||
Rule: extensionsv1beta1.FSGroupStrategyRunAsAny,
|
||||
},
|
||||
ReadOnlyRootFilesystem: false,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func IsPodSecurityPolicyEnabled(f *Framework) bool {
|
||||
isPSPEnabledOnce.Do(func() {
|
||||
psps, err := f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
Logf("Error listing PodSecurityPolicies; assuming PodSecurityPolicy is disabled: %v", err)
|
||||
isPSPEnabled = false
|
||||
} else if psps == nil || len(psps.Items) == 0 {
|
||||
Logf("No PodSecurityPolicies found; assuming PodSecurityPolicy is disabled.")
|
||||
isPSPEnabled = false
|
||||
} else {
|
||||
Logf("Found PodSecurityPolicies; assuming PodSecurityPolicy is enabled.")
|
||||
isPSPEnabled = true
|
||||
}
|
||||
})
|
||||
return isPSPEnabled
|
||||
}
|
||||
|
||||
var (
|
||||
privilegedPSPOnce sync.Once
|
||||
)
|
||||
|
||||
func CreatePrivilegedPSPBinding(f *Framework, namespace string) {
|
||||
if !IsPodSecurityPolicyEnabled(f) {
|
||||
return
|
||||
}
|
||||
// Create the privileged PSP & role
|
||||
privilegedPSPOnce.Do(func() {
|
||||
_, err := f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Get(
|
||||
podSecurityPolicyPrivileged, metav1.GetOptions{})
|
||||
if !apierrs.IsNotFound(err) {
|
||||
// Privileged PSP was already created.
|
||||
ExpectNoError(err, "Failed to get PodSecurityPolicy %s", podSecurityPolicyPrivileged)
|
||||
return
|
||||
}
|
||||
|
||||
psp := PrivilegedPSP(podSecurityPolicyPrivileged)
|
||||
psp, err = f.ClientSet.ExtensionsV1beta1().PodSecurityPolicies().Create(psp)
|
||||
ExpectNoError(err, "Failed to create PSP %s", podSecurityPolicyPrivileged)
|
||||
|
||||
// Create the Role to bind it to the namespace.
|
||||
_, err = f.ClientSet.RbacV1beta1().ClusterRoles().Create(&rbacv1beta1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: podSecurityPolicyPrivileged},
|
||||
Rules: []rbacv1beta1.PolicyRule{{
|
||||
APIGroups: []string{"extensions"},
|
||||
Resources: []string{"podsecuritypolicies"},
|
||||
ResourceNames: []string{podSecurityPolicyPrivileged},
|
||||
Verbs: []string{"use"},
|
||||
}},
|
||||
})
|
||||
ExpectNoError(err, "Failed to create PSP role")
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Binding the %s PodSecurityPolicy to the default service account in %s",
|
||||
podSecurityPolicyPrivileged, namespace))
|
||||
BindClusterRoleInNamespace(f.ClientSet.RbacV1beta1(),
|
||||
podSecurityPolicyPrivileged,
|
||||
namespace,
|
||||
rbacv1beta1.Subject{
|
||||
Kind: rbacv1beta1.ServiceAccountKind,
|
||||
Namespace: namespace,
|
||||
Name: "default",
|
||||
})
|
||||
ExpectNoError(WaitForNamedAuthorizationUpdate(f.ClientSet.AuthorizationV1beta1(),
|
||||
serviceaccount.MakeUsername(namespace, "default"), namespace, "use", podSecurityPolicyPrivileged,
|
||||
schema.GroupResource{Group: "extensions", Resource: "podsecuritypolicies"}, true))
|
||||
}
|
Loading…
Reference in New Issue
Block a user