mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
Merge pull request #45565 from Q-Lee/mds
Automatic merge from submit-queue Adding a metadata proxy addon **What this PR does / why we need it**: adds a metadata server proxy daemonset to hide kubelet secrets. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: this partially addresses #8867 **Special notes for your reviewer**: **Release note**: the gce metadata server can be hidden behind a proxy, hiding the kubelet's token. ```release-note The gce metadata server can be hidden behind a proxy, hiding the kubelet's token. ```
This commit is contained in:
commit
3473b8a792
8
cluster/addons/metadata-proxy/OWNERS
Normal file
8
cluster/addons/metadata-proxy/OWNERS
Normal file
@ -0,0 +1,8 @@
|
||||
approvers:
|
||||
- q-lee
|
||||
- cjcullen
|
||||
- mikedanese
|
||||
reviewers:
|
||||
- q-lee
|
||||
- cjcullen
|
||||
- mikedanese
|
5
cluster/addons/metadata-proxy/README.md
Normal file
5
cluster/addons/metadata-proxy/README.md
Normal file
@ -0,0 +1,5 @@
|
||||
# Metadata proxy
|
||||
==============
|
||||
|
||||
This metadata proxy returns a 403 for kubelet's kube-env data, but otherwise allows
|
||||
pods access to the metadata server.
|
@ -0,0 +1,60 @@
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: metadata-proxy-config
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
data:
|
||||
nginx.conf: |-
|
||||
user www-data;
|
||||
worker_processes 4;
|
||||
pid /run/nginx.pid;
|
||||
error_log /dev/stdout;
|
||||
|
||||
events {
|
||||
worker_connections 20;
|
||||
}
|
||||
|
||||
http {
|
||||
access_log /dev/stdout;
|
||||
server {
|
||||
listen 127.0.0.1:988;
|
||||
|
||||
# By default, return 403. This protects us from new API versions.
|
||||
location / {
|
||||
return 403;
|
||||
}
|
||||
|
||||
# Allow for REST discovery.
|
||||
location = / {
|
||||
proxy_pass http://169.254.169.254;
|
||||
}
|
||||
location = /computeMetadata/ {
|
||||
proxy_pass http://169.254.169.254;
|
||||
}
|
||||
|
||||
# By default, allow the v0.1, v1beta1, and v1 APIs.
|
||||
location /0.1/ {
|
||||
proxy_pass http://169.254.169.254;
|
||||
}
|
||||
location /computeMetadata/v1beta1/ {
|
||||
proxy_pass http://169.254.169.254;
|
||||
}
|
||||
location /computeMetadata/v1/ {
|
||||
proxy_pass http://169.254.169.254;
|
||||
}
|
||||
|
||||
# Return a 403 for the kube-env attribute in all allowed API versions.
|
||||
location /0.1/meta-data/attributes/kube-env {
|
||||
return 403;
|
||||
}
|
||||
location /computeMetadata/v1beta1/instance/attributes/kube-env {
|
||||
return 403;
|
||||
}
|
||||
location /computeMetadata/v1/instance/attributes/kube-env {
|
||||
return 403;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
52
cluster/addons/metadata-proxy/gce/metadata-proxy.yaml
Normal file
52
cluster/addons/metadata-proxy/gce/metadata-proxy.yaml
Normal file
@ -0,0 +1,52 @@
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: metadata-proxy-v0.1
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: metadata-proxy
|
||||
kubernetes.io/cluster-service: "true"
|
||||
addonmanager.kubernetes.io/mode: Reconcile
|
||||
version: v0.1
|
||||
spec:
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: metadata-proxy
|
||||
kubernetes.io/cluster-service: "true"
|
||||
version: v0.1
|
||||
# This annotation ensures that the proxy does not get evicted if the node
|
||||
# supports critical pod annotation based priority scheme.
|
||||
# Note that this does not guarantee admission on the nodes (#40573).
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default
|
||||
containers:
|
||||
- name: metadata-proxy
|
||||
image: gcr.io/google-containers/metadata-proxy:0.1
|
||||
imagePullPolicy: Always
|
||||
securityContext:
|
||||
privileged: true
|
||||
command:
|
||||
- '/start-proxy.sh'
|
||||
resources:
|
||||
requests:
|
||||
memory: "32Mi"
|
||||
cpu: "50m"
|
||||
limits:
|
||||
memory: "32Mi"
|
||||
cpu: "50m"
|
||||
volumeMounts:
|
||||
- name: config-volume
|
||||
mountPath: /etc/nginx/
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/metadata-proxy-ready: "true"
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: config-volume
|
||||
configMap:
|
||||
name: metadata-proxy-config
|
@ -129,7 +129,13 @@ NODE_LABELS="${KUBE_NODE_LABELS:-beta.kubernetes.io/fluentd-ds-ready=true}"
|
||||
# To avoid running Calico on a node that is not configured appropriately,
|
||||
# label each Node so that the DaemonSet can run the Pods only on ready Nodes.
|
||||
if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
|
||||
NODE_LABELS="$NODE_LABELS,projectcalico.org/ds-ready=true"
|
||||
NODE_LABELS="${NODE_LABELS},projectcalico.org/ds-ready=true"
|
||||
fi
|
||||
|
||||
# Turn the simple metadata proxy on by default.
|
||||
ENABLE_METADATA_PROXY="${ENABLE_METADATA_PROXY:-simple}"
|
||||
if [[ ${ENABLE_METADATA_PROXY} != "false" ]]; then
|
||||
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
|
||||
fi
|
||||
|
||||
# Optional: Enable node logging.
|
||||
|
@ -173,6 +173,12 @@ if [[ ${NETWORK_POLICY_PROVIDER:-} == "calico" ]]; then
|
||||
NODE_LABELS="$NODE_LABELS,projectcalico.org/ds-ready=true"
|
||||
fi
|
||||
|
||||
# Turn the simple metadata proxy on by default.
|
||||
ENABLE_METADATA_PROXY="${ENABLE_METADATA_PROXY:-simple}"
|
||||
if [[ ${ENABLE_METADATA_PROXY} != "false" ]]; then
|
||||
NODE_LABELS="${NODE_LABELS},beta.kubernetes.io/metadata-proxy-ready=true"
|
||||
fi
|
||||
|
||||
# Optional: Enable node logging.
|
||||
ENABLE_NODE_LOGGING="${KUBE_ENABLE_NODE_LOGGING:-true}"
|
||||
LOGGING_DESTINATION="${KUBE_LOGGING_DESTINATION:-gcp}" # options: elasticsearch, gcp
|
||||
|
@ -419,6 +419,7 @@ enable_cluster_ui: '$(echo "$ENABLE_CLUSTER_UI" | sed -e "s/'/''/g")'
|
||||
enable_node_problem_detector: '$(echo "$ENABLE_NODE_PROBLEM_DETECTOR" | sed -e "s/'/''/g")'
|
||||
enable_l7_loadbalancing: '$(echo "$ENABLE_L7_LOADBALANCING" | sed -e "s/'/''/g")'
|
||||
enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
|
||||
enable_metadata_proxy: '$(echo "$ENABLE_METADATA_PROXY" | sed -e "s/'/''/g")'
|
||||
enable_rescheduler: '$(echo "$ENABLE_RESCHEDULER" | sed -e "s/'/''/g")'
|
||||
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
|
||||
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
|
||||
|
@ -1487,6 +1487,9 @@ function start-kube-addons {
|
||||
if [[ "${NON_MASQUERADE_CIDR:-}" == "0.0.0.0/0" ]]; then
|
||||
setup-addon-manifests "addons" "ip-masq-agent"
|
||||
fi
|
||||
if [[ "${ENABLE_METADATA_PROXY:-}" == "simple" ]]; then
|
||||
setup-addon-manifests "addons" "metadata-proxy/gce"
|
||||
fi
|
||||
|
||||
# Place addon manager pod manifest.
|
||||
cp "${src_dir}/kube-addon-manager.yaml" /etc/kubernetes/manifests
|
||||
|
@ -178,6 +178,17 @@ addon-dir-create:
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_metadata_proxy', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/metadata-proxy/gce:
|
||||
file.recurse:
|
||||
- source: salt://kube-addons/metadata-proxy/gce
|
||||
- include_pat: E@^.+\.yaml$
|
||||
- user: root
|
||||
- group: root
|
||||
- dir_mode: 755
|
||||
- file_mode: 644
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.get('enable_cluster_ui', '').lower() == 'true' %}
|
||||
/etc/kubernetes/addons/dashboard:
|
||||
file.recurse:
|
||||
|
Loading…
Reference in New Issue
Block a user