Merge pull request #39349 from shyamjvs/rbac-for-kubemark

Automatic merge from submit-queue

Updated kubemark with RBAC for controllers, proxy and kubelet

Fixes issue #39244 

@kubernetes/sig-scalability-misc @wojtek-t @gmarek
This commit is contained in:
Kubernetes Submit Queue 2017-01-06 13:42:54 -08:00 committed by GitHub
commit 85ad3045be
5 changed files with 131 additions and 17 deletions

View File

@ -0,0 +1 @@
These resources are used to add extra (non-default) bindings to kubemark to match users and groups that are particular to the kubemark environment. These are not standard bootstrap bindings and not standard users they are bound to, and have been adapted from cluster/addons/e2e-rbac-bindings. Tighten/loosen these access rights as required in future.

View File

@ -0,0 +1,18 @@
# This is the role binding for the local kubectl, which is
# used for listing hollow-nodes in start-kubemark.sh and
# send resource creation requests, etc in run-e2e-tests.sh.
# Also useful if you manually want to use local kubectl.
apiVersion: rbac.authorization.k8s.io/v1alpha1
kind: ClusterRoleBinding
metadata:
name: kubecfg-cluster-admin
labels:
kubernetes.io/cluster-service: "true"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- apiVersion: rbac/v1alpha1
kind: User
name: kubecfg

View File

@ -0,0 +1,37 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-addon-manager
namespace: kube-system
labels:
component: kube-addon-manager
spec:
hostNetwork: true
containers:
- name: kube-addon-manager
# When updating version also bump it in:
# - cluster/images/hyperkube/static-pods/addon-manager-singlenode.json
# - cluster/images/hyperkube/static-pods/addon-manager-multinode.json
# - cluster/gce/coreos/kube-manifests/kube-addon-manager.yaml
image: {{kube_docker_registry}}/kube-addon-manager:v6.1
command:
- /bin/bash
- -c
- /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1
resources:
requests:
cpu: 5m
memory: 50Mi
volumeMounts:
- name: addons
mountPath: /etc/kubernetes/
readOnly: true
- name: varlog
mountPath: /var/log/kube-addon-manager.log
volumes:
- name: addons
hostPath:
path: /etc/kubernetes/
- name: varlog
hostPath:
path: /var/log/kube-addon-manager.log

View File

@ -43,6 +43,7 @@ function create-dirs {
echo "Creating required directories"
mkdir -p /var/lib/kubelet
mkdir -p /etc/kubernetes/manifests
mkdir -p /etc/kubernetes/addons
}
# Setup working directory for kubelet.
@ -134,6 +135,31 @@ function mount-pd() {
# locations.
}
# Create kubeconfig for controller-manager's service account authentication.
function create-kubecontrollermanager-kubeconfig {
echo "Creating kube-controller-manager kubeconfig file"
mkdir -p /etc/srv/kubernetes/kube-controller-manager
cat <<EOF >/etc/srv/kubernetes/kube-controller-manager/kubeconfig
apiVersion: v1
kind: Config
users:
- name: kube-controller-manager
user:
token: ${KUBE_CONTROLLER_MANAGER_TOKEN}
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
server: https://localhost:443
contexts:
- context:
cluster: local
user: kube-controller-manager
name: service-account-context
current-context: service-account-context
EOF
}
function assemble-docker-flags {
echo "Assemble docker command line flags"
local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false"
@ -241,6 +267,26 @@ function prepare-log-file {
chown root:root $1
}
# A helper function for copying addon manifests and set dir/files
# permissions.
#
# $1: addon category under /etc/kubernetes
# $2: manifest source dir
function setup-addon-manifests {
local -r src_dir="${KUBE_ROOT}/$2"
local -r dst_dir="/etc/kubernetes/$1/$2"
if [[ ! -d "${dst_dir}" ]]; then
mkdir -p "${dst_dir}"
fi
local files=$(find "${src_dir}" -maxdepth 1 -name "*.yaml")
if [[ -n "${files}" ]]; then
cp "${src_dir}/"*.yaml "${dst_dir}"
fi
chown -R root:root "${dst_dir}"
chmod 755 "${dst_dir}"
chmod 644 "${dst_dir}"/*
}
# Computes command line arguments to be passed to etcd.
function compute-etcd-params {
local params="${ETCD_TEST_ARGS:-}"
@ -279,13 +325,15 @@ function compute-kube-apiserver-params {
params+=" --storage-backend=${STORAGE_BACKEND}"
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
params+=" --admission-control=${CUSTOM_ADMISSION_PLUGINS}"
params+=" --authorization-mode=RBAC"
echo "${params}"
}
# Computes command line arguments to be passed to controller-manager.
function compute-kube-controller-manager-params {
local params="${CONTROLLER_MANAGER_TEST_ARGS:-}"
params+=" --master=127.0.0.1:8080"
params+=" --use-service-account-credentials"
params+=" --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig"
params+=" --service-account-private-key-file=/etc/srv/kubernetes/server.key"
params+=" --root-ca-file=/etc/srv/kubernetes/ca.crt"
params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
@ -302,12 +350,18 @@ function compute-kube-scheduler-params {
echo "${params}"
}
# Computes command line arguments to be passed to addon-manager.
function compute-kube-addon-manager-params {
echo ""
}
# Start a kubernetes master component '$1' which can be any of the following:
# 1. etcd
# 2. etcd-events
# 3. kube-apiserver
# 4. kube-controller-manager
# 5. kube-scheduler
# 6. kube-addon-manager
#
# It prepares the log file, loads the docker tag, calculates variables, sets them
# in the manifest file, and then copies the manifest file to /etc/kubernetes/manifests.
@ -315,13 +369,8 @@ function compute-kube-scheduler-params {
# Assumed vars:
# DOCKER_REGISTRY
function start-kubemaster-component() {
echo "Start master component $1"
local -r component=$1
local component_is_etcd=false
if [ "${component:0:4}" == "etcd" ]; then
component_is_etcd=true
fi
echo "Start master component ${component}"
prepare-log-file /var/log/"${component}".log
local -r src_file="${KUBE_ROOT}/${component}.yaml"
local -r params=$(compute-${component}-params)
@ -330,8 +379,10 @@ function start-kubemaster-component() {
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
sed -i -e "s@{{kube_docker_registry}}@${DOCKER_REGISTRY}@g" "${src_file}"
sed -i -e "s@{{instance_prefix}}@${INSTANCE_PREFIX}@g" "${src_file}"
if [ "${component_is_etcd}" == "true" ]; then
if [ "${component:0:4}" == "etcd" ]; then
sed -i -e "s@{{etcd_image}}@${ETCD_IMAGE}@g" "${src_file}"
elif [ "${component}" == "kube-addon-manager" ]; then
setup-addon-manifests "addons" "kubemark-rbac-bindings"
else
local -r component_docker_tag=$(cat ${KUBE_BINDIR}/${component}.docker_tag)
sed -i -e "s@{{${component}_docker_tag}}@${component_docker_tag}@g" "${src_file}"
@ -347,13 +398,18 @@ cd "${KUBE_ROOT}"
tar xzf kubernetes-server-linux-amd64.tar.gz
source "${KUBE_ROOT}/kubemark-master-env.sh"
# Setup IP firewall rules, required directory structure and etcd variables.
# Setup IP firewall rules, required directory structure and etcd config.
config-ip-firewall
create-dirs
setup-kubelet-dir
delete-default-etcd-configs
compute-etcd-variables
# Setup authentication token and kubeconfig for controller-manager.
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
echo "${KUBE_CONTROLLER_MANAGER_TOKEN},system:kube-controller-manager,uid:system:kube-controller-manager" >> /etc/srv/kubernetes/known_tokens.csv
create-kubecontrollermanager-kubeconfig
# Mount master PD for etcd and create symbolic links to it.
{
main_etcd_mount_point="/mnt/disks/master-pd"
@ -398,6 +454,7 @@ fi
start-kubemaster-component "kube-apiserver"
start-kubemaster-component "kube-controller-manager"
start-kubemaster-component "kube-scheduler"
start-kubemaster-component "kube-addon-manager"
# Wait till apiserver is working fine.
until [ "$(curl 127.0.0.1:8080/healthz 2> /dev/null)" == "ok" ]; do

View File

@ -146,17 +146,16 @@ password=$(python -c 'import string,random; print("".join(random.SystemRandom().
gcloud compute ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" \
--command="sudo mkdir /home/kubernetes -p && sudo mkdir /etc/srv/kubernetes -p && \
sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/ca.crt\" && \
sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/server.cert\" && \
sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 --decode > /etc/srv/kubernetes/server.key\" && \
sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/ca.crt\" && \
sudo bash -c \"echo ${KUBECFG_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/kubecfg.crt\" && \
sudo bash -c \"echo ${KUBECFG_KEY_BASE64} | base64 --decode > /etc/srv/kubernetes/kubecfg.key\" && \
sudo bash -c \"echo \"${KUBE_BEARER_TOKEN},admin,admin\" > /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBELET_TOKEN},kubelet,kubelet\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBELET_TOKEN},system:node:node-name,uid:kubelet,system:nodes\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},system:kube-proxy,uid:kube_proxy\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo ${password},admin,admin > /etc/srv/kubernetes/basic_auth.csv\""
gcloud compute copy-files --zone="${ZONE}" --project="${PROJECT}" \
"${SERVER_BINARY_TAR}" \
"${RESOURCE_DIRECTORY}/kubemark-master-env.sh" \
@ -167,6 +166,8 @@ gcloud compute copy-files --zone="${ZONE}" --project="${PROJECT}" \
"${RESOURCE_DIRECTORY}/manifests/kube-apiserver.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-scheduler.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-controller-manager.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-addon-manager.yaml" \
"${RESOURCE_DIRECTORY}/manifests/addons/kubemark-rbac-bindings" \
"kubernetes@${MASTER_NAME}":/home/kubernetes/
gcloud compute ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" \
@ -228,7 +229,7 @@ cat > "${LOCAL_KUBECONFIG}" << EOF
apiVersion: v1
kind: Config
users:
- name: admin
- name: kubecfg
user:
client-certificate-data: "${KUBECFG_CERT_BASE64}"
client-key-data: "${KUBECFG_KEY_BASE64}"
@ -242,7 +243,7 @@ clusters:
contexts:
- context:
cluster: kubemark
user: admin
user: kubecfg
name: kubemark-context
current-context: kubemark-context
EOF
@ -272,7 +273,7 @@ rm "${NODE_CONFIGMAP}"
echo "Waiting for all HollowNodes to become Running..."
start=$(date +%s)
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node) || true
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
until [[ "${ready}" -ge "${NUM_NODES}" ]]; do
@ -297,7 +298,7 @@ until [[ "${ready}" -ge "${NUM_NODES}" ]]; do
echo $(echo "${pods}" | grep -v "Running")
exit 1
fi
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node) || true
nodes=$("${KUBECTL}" --kubeconfig="${LOCAL_KUBECONFIG}" get node 2> /dev/null) || true
ready=$(($(echo "${nodes}" | grep -v "NotReady" | wc -l) - 1))
done
echo ""