Remove PodSecurityPolicy cluster config

This commit is contained in:
Jordan Liggitt 2022-05-04 15:45:08 -04:00
parent 410ac59c0d
commit a44192b955
21 changed files with 0 additions and 426 deletions

View File

@ -1,16 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:event-exporter
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gce:podsecuritypolicy:event-exporter
subjects:
- kind: ServiceAccount
name: event-exporter-sa
namespace: kube-system

View File

@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: gce:podsecuritypolicy:event-exporter
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- policy
resourceNames:
- gce.event-exporter
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -1,39 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: gce.event-exporter
annotations:
kubernetes.io/description: 'Policy used by the event-exporter addon.'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default,docker/default'
# 'runtime/default' is already the default, but must be filled in on the
# pod to pass admission.
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
labels:
kubernetes.io/cluster-service: 'true'
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
volumes:
- 'hostPath'
- 'secret'
- 'projected'
# TODO: This only needs a hostPath to read /etc/ssl/certs,
# but it should be able to just include these in the image.
allowedHostPaths:
- pathPrefix: /etc/ssl/certs
hostNetwork: false
hostIPC: false
hostPID: false
# TODO: This doesn't need to run as root.
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false

View File

@ -1,16 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:fluentd-gcp
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gce:podsecuritypolicy:fluentd-gcp
subjects:
- kind: ServiceAccount
name: fluentd-gcp
namespace: kube-system

View File

@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: gce:podsecuritypolicy:fluentd-gcp
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- policy
resourceNames:
- gce.fluentd-gcp
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -1,39 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: gce.fluentd-gcp
annotations:
kubernetes.io/description: 'Policy used by the fluentd-gcp addon.'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default,docker/default'
# 'runtime/default' is already the default, but must be filled in on the
# pod to pass admission.
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
labels:
kubernetes.io/cluster-service: 'true'
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
volumes:
- 'configMap'
- 'hostPath'
- 'secret'
- 'projected'
allowedHostPaths:
- pathPrefix: /var/log
- pathPrefix: /var/lib/docker/containers
- pathPrefix: /usr/lib64
hostNetwork: true
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false

View File

@ -1,16 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:kube-proxy
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:podsecuritypolicy:privileged
subjects:
- kind: ServiceAccount
name: kube-proxy
namespace: kube-system

View File

@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:unprivileged-addon
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: gce:podsecuritypolicy:unprivileged-addon
subjects:
- kind: Group
# All service accounts in the kube-system namespace are allowed to use this.
name: system:serviceaccounts:kube-system
apiGroup: rbac.authorization.k8s.io

View File

@ -1,24 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: gce:podsecuritypolicy:nodes
namespace: kube-system
annotations:
kubernetes.io/description: 'Allow nodes to create privileged pods. Should
be used in combination with the NodeRestriction admission plugin to limit
nodes to mirror pods bound to themselves.'
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: 'true'
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:podsecuritypolicy:privileged
subjects:
- kind: Group
apiGroup: rbac.authorization.k8s.io
name: system:nodes
- kind: User
apiGroup: rbac.authorization.k8s.io
# Legacy node ID
name: kubelet

View File

@ -1,18 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
# The persistent volume binder creates recycler pods in the default namespace,
# but the addon manager only creates namespaced objects in the kube-system
# namespace, so this is a ClusterRoleBinding.
kind: ClusterRoleBinding
metadata:
name: gce:podsecuritypolicy:persistent-volume-binder
labels:
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:podsecuritypolicy:persistent-volume-binder
subjects:
- kind: ServiceAccount
name: persistent-volume-binder
namespace: kube-system

View File

@ -1,20 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
# The persistent volume binder creates recycler pods in the default namespace,
# but the addon manager only creates namespaced objects in the kube-system
# namespace, so this is a ClusterRole.
kind: ClusterRole
metadata:
name: gce:podsecuritypolicy:persistent-volume-binder
namespace: default
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- policy
resourceNames:
- gce.persistent-volume-binder
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -1,30 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: gce.persistent-volume-binder
annotations:
kubernetes.io/description: 'Policy used by the persistent-volume-binder
(a.k.a. persistentvolume-controller) to run recycler pods.'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default,docker/default'
labels:
kubernetes.io/cluster-service: 'true'
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
volumes:
- 'nfs'
- 'secret' # Required for service account credentials.
- 'projected'
hostNetwork: false
hostIPC: false
hostPID: false
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false

View File

@ -1,16 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: gce:podsecuritypolicy:privileged
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- policy
resourceNames:
- gce.privileged
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -1,33 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: gce.privileged
annotations:
kubernetes.io/description: 'privileged allows full unrestricted access to
pod features, as if the PodSecurityPolicy controller was not enabled.'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: true
allowPrivilegeEscalation: true
allowedCapabilities:
- '*'
volumes:
- '*'
hostNetwork: true
hostPorts:
- min: 0
max: 65535
hostIPC: true
hostPID: true
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false

View File

@ -1,17 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: gce:podsecuritypolicy:unprivileged-addon
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- policy
resourceNames:
- gce.unprivileged-addon
resources:
- podsecuritypolicies
verbs:
- use

View File

@ -1,55 +0,0 @@
apiVersion: policy/v1beta1
kind: PodSecurityPolicy
metadata:
name: gce.unprivileged-addon
annotations:
kubernetes.io/description: 'This policy grants the minimum amount of
privilege necessary to run non-privileged kube-system pods. This policy is
not intended for use outside of kube-system, and may include further
restrictions in the future.'
seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'runtime/default,docker/default'
# 'runtime/default' is already the default, but must be filled in on the
# pod to pass admission.
apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default'
labels:
kubernetes.io/cluster-service: 'true'
addonmanager.kubernetes.io/mode: Reconcile
spec:
privileged: false
allowPrivilegeEscalation: false
# The docker default set of capabilities
allowedCapabilities:
- SETPCAP
- MKNOD
- AUDIT_WRITE
- CHOWN
- NET_RAW
- DAC_OVERRIDE
- FOWNER
- FSETID
- KILL
- SETGID
- SETUID
- NET_BIND_SERVICE
- SYS_CHROOT
- SETFCAP
volumes:
- 'emptyDir'
- 'configMap'
- 'secret'
- 'projected'
hostNetwork: false
hostIPC: false
hostPID: false
# TODO: The addons using this profile should not run as root.
runAsUser:
rule: 'RunAsAny'
seLinux:
rule: 'RunAsAny'
supplementalGroups:
rule: 'RunAsAny'
fsGroup:
rule: 'RunAsAny'
readOnlyRootFilesystem: false

View File

@ -366,10 +366,6 @@ CUSTOM_INGRESS_YAML="${CUSTOM_INGRESS_YAML:-}"
# Admission Controllers to invoke prior to persisting objects in cluster
ADMISSION_CONTROL=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,PersistentVolumeClaimResize,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection,RuntimeClass
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
# MutatingAdmissionWebhook should be the last controller that modifies the
# request object, otherwise users will be confused if the mutating webhooks'
# modification is overwritten.

View File

@ -411,9 +411,6 @@ CUSTOM_INGRESS_YAML=${CUSTOM_INGRESS_YAML:-}
if [[ -z "${KUBE_ADMISSION_CONTROL:-}" ]]; then
ADMISSION_CONTROL='NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection,PersistentVolumeClaimResize,RuntimeClass'
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" = 'true' ]]; then
ADMISSION_CONTROL="${ADMISSION_CONTROL},PodSecurityPolicy"
fi
# ResourceQuota must come last, or a creation is recorded, but the pod may be forbidden.
ADMISSION_CONTROL="${ADMISSION_CONTROL},MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
else

View File

@ -2337,15 +2337,6 @@ function setup-addon-manifests {
local -r dst_dir="/etc/kubernetes/$1/$2"
copy-manifests "${src_dir}/$2" "${dst_dir}"
# If the PodSecurityPolicy admission controller is enabled,
# set up the corresponding addon policies.
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
local -r psp_dir="${src_dir}/${3:-$2}/podsecuritypolicies"
if [[ -d "${psp_dir}" ]]; then
copy-manifests "${psp_dir}" "${dst_dir}"
fi
fi
}
# A function that downloads extra addons from a URL and puts them in the GCI
@ -2695,10 +2686,6 @@ function start-kube-addons {
setup-addon-manifests "addons" "rbac/legacy-kubelet-user-disable"
fi
if [[ "${ENABLE_POD_SECURITY_POLICY:-}" == "true" ]]; then
setup-addon-manifests "addons" "podsecuritypolicies"
fi
# Set up manifests of other addons.
if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" ]] && [[ "${KUBE_PROXY_DISABLE:-}" != "true" ]]; then
if [ -n "${CUSTOM_KUBE_PROXY_YAML:-}" ]; then

View File

@ -1124,7 +1124,6 @@ KUBE_PROXY_MODE: $(yaml-quote "${KUBE_PROXY_MODE:-iptables}")
DETECT_LOCAL_MODE: $(yaml-quote "${DETECT_LOCAL_MODE:-}")
NODE_PROBLEM_DETECTOR_TOKEN: $(yaml-quote "${NODE_PROBLEM_DETECTOR_TOKEN:-}")
ADMISSION_CONTROL: $(yaml-quote "${ADMISSION_CONTROL:-}")
ENABLE_POD_SECURITY_POLICY: $(yaml-quote "${ENABLE_POD_SECURITY_POLICY:-}")
MASTER_IP_RANGE: $(yaml-quote "${MASTER_IP_RANGE}")
RUNTIME_CONFIG: $(yaml-quote "${RUNTIME_CONFIG}")
CA_CERT: $(yaml-quote "${CA_CERT_BASE64:-}")

View File

@ -26,7 +26,6 @@ export DOCKER=(docker "${DOCKER_OPTS[@]}")
DOCKER_ROOT=${DOCKER_ROOT:-""}
ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
DENY_SECURITY_CONTEXT_ADMISSION=${DENY_SECURITY_CONTEXT_ADMISSION:-""}
PSP_ADMISSION=${PSP_ADMISSION:-""}
RUNTIME_CONFIG=${RUNTIME_CONFIG:-""}
KUBELET_AUTHORIZATION_WEBHOOK=${KUBELET_AUTHORIZATION_WEBHOOK:-""}
KUBELET_AUTHENTICATION_WEBHOOK=${KUBELET_AUTHENTICATION_WEBHOOK:-""}
@ -480,9 +479,6 @@ function start_apiserver {
if [[ -n "${DENY_SECURITY_CONTEXT_ADMISSION}" ]]; then
security_admission=",SecurityContextDeny"
fi
if [[ -n "${PSP_ADMISSION}" ]]; then
security_admission=",PodSecurityPolicy"
fi
# Append security_admission plugin
ENABLE_ADMISSION_PLUGINS="${ENABLE_ADMISSION_PLUGINS}${security_admission}"
@ -939,13 +935,6 @@ function start_csi_snapshotter {
fi
}
function create_psp_policy {
echo "Create podsecuritypolicy policies for RBAC."
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${KUBE_ROOT}/examples/podsecuritypolicy/rbac/policies.yaml"
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${KUBE_ROOT}/examples/podsecuritypolicy/rbac/roles.yaml"
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create -f "${KUBE_ROOT}/examples/podsecuritypolicy/rbac/bindings.yaml"
}
function create_storage_class {
if [ -z "${CLOUD_PROVIDER}" ]; then
CLASS_FILE=${KUBE_ROOT}/cluster/addons/storage-class/local/default.yaml
@ -1208,10 +1197,6 @@ if [[ "${START_MODE}" != "kubeletonly" ]]; then
fi
fi
if [[ -n "${PSP_ADMISSION}" && "${AUTHORIZATION_MODE}" = *RBAC* ]]; then
create_psp_policy
fi
if [[ "${DEFAULT_STORAGE_CLASS}" = "true" ]]; then
create_storage_class
fi