Merge pull request #38616 from shyamjvs/kubemark-gci-master

Automatic merge from submit-queue

Moved kubemark master from Debian to GCI

This PR fixes issue #37484
Kubemark master now runs on GCI instead of Debian, taking it one step closer to a real cluster master.
Primary changes:
1. changing master VM image/OS in kubemark's config-default.sh to debian
2. moving kubelet to systemd from supervisord
3. changing directory for cert/key/csv files from /srv/kubernetes to /etc/srv/kubernetes

cc @kubernetes/sig-scalability-misc  @wojtek-t @gmarek
This commit is contained in:
Kubernetes Submit Queue 2016-12-20 00:36:21 -08:00 committed by GitHub
commit 8e888a7671
5 changed files with 76 additions and 57 deletions

View File

@ -34,9 +34,9 @@ MASTER_ROOT_DISK_SIZE=${KUBEMARK_MASTER_ROOT_DISK_SIZE:-10GB}
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false} REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false} PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-debian} MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-gci}
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-debian} NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-debian}
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-container-vm-v20161208} MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-gci-dev-56-8977-0-0}
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers} MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-google-containers}
NETWORK=${KUBE_GCE_NETWORK:-default} NETWORK=${KUBE_GCE_NETWORK:-default}

View File

@ -37,7 +37,7 @@ spec:
protocol: TCP protocol: TCP
volumeMounts: volumeMounts:
- name: srvkube - name: srvkube
mountPath: /srv/kubernetes mountPath: /etc/srv/kubernetes
readOnly: true readOnly: true
- name: logfile - name: logfile
mountPath: /var/log/kube-apiserver.log mountPath: /var/log/kube-apiserver.log
@ -48,11 +48,11 @@ spec:
mountPath: /usr/share/ca-certificates mountPath: /usr/share/ca-certificates
readOnly: true readOnly: true
- name: srvsshproxy - name: srvsshproxy
mountPath: /srv/sshproxy mountPath: /etc/srv/sshproxy
volumes: volumes:
- name: srvkube - name: srvkube
hostPath: hostPath:
path: /srv/kubernetes path: /etc/srv/kubernetes
- name: logfile - name: logfile
hostPath: hostPath:
path: /var/log/kube-apiserver.log path: /var/log/kube-apiserver.log
@ -64,4 +64,4 @@ spec:
path: /usr/share/ca-certificates path: /usr/share/ca-certificates
- name: srvsshproxy - name: srvsshproxy
hostPath: hostPath:
path: /srv/sshproxy path: /etc/srv/sshproxy

View File

@ -28,7 +28,7 @@ spec:
timeoutSeconds: 15 timeoutSeconds: 15
volumeMounts: volumeMounts:
- name: srvkube - name: srvkube
mountPath: /srv/kubernetes mountPath: /etc/srv/kubernetes
readOnly: true readOnly: true
- name: logfile - name: logfile
mountPath: /var/log/kube-controller-manager.log mountPath: /var/log/kube-controller-manager.log
@ -41,7 +41,7 @@ spec:
volumes: volumes:
- name: srvkube - name: srvkube
hostPath: hostPath:
path: /srv/kubernetes path: /etc/srv/kubernetes
- name: logfile - name: logfile
hostPath: hostPath:
path: /var/log/kube-controller-manager.log path: /var/log/kube-controller-manager.log

View File

@ -21,6 +21,24 @@
KUBE_ROOT="/home/kubernetes" KUBE_ROOT="/home/kubernetes"
KUBE_BINDIR="${KUBE_ROOT}/kubernetes/server/bin" KUBE_BINDIR="${KUBE_ROOT}/kubernetes/server/bin"
function config-ip-firewall {
echo "Configuring IP firewall rules"
# The GCI image has host firewall which drop most inbound/forwarded packets.
# We need to add rules to accept all TCP/UDP/ICMP packets.
if iptables -L INPUT | grep "Chain INPUT (policy DROP)" > /dev/null; then
echo "Add rules to accept all inbound TCP/UDP/ICMP packets"
iptables -A INPUT -w -p TCP -j ACCEPT
iptables -A INPUT -w -p UDP -j ACCEPT
iptables -A INPUT -w -p ICMP -j ACCEPT
fi
if iptables -L FORWARD | grep "Chain FORWARD (policy DROP)" > /dev/null; then
echo "Add rules to accept all forwarded TCP/UDP/ICMP packets"
iptables -A FORWARD -w -p TCP -j ACCEPT
iptables -A FORWARD -w -p UDP -j ACCEPT
iptables -A FORWARD -w -p ICMP -j ACCEPT
fi
}
function create-dirs { function create-dirs {
echo "Creating required directories" echo "Creating required directories"
mkdir -p /var/lib/kubelet mkdir -p /var/lib/kubelet
@ -102,8 +120,8 @@ function mount-pd() {
return return
fi fi
echo "Mounting PD '${pd_path}' at '${mount_point}'"
local -r pd_path="/dev/disk/by-id/${pd_name}" local -r pd_path="/dev/disk/by-id/${pd_name}"
echo "Mounting PD '${pd_path}' at '${mount_point}'"
# Format and mount the disk, create directories on it for all of the master's # Format and mount the disk, create directories on it for all of the master's
# persistent data, and link them to where they're used. # persistent data, and link them to where they're used.
mkdir -p "${mount_point}" mkdir -p "${mount_point}"
@ -123,8 +141,7 @@ function assemble-docker-flags {
# TODO(shyamjvs): Incorporate network plugin options, etc later. # TODO(shyamjvs): Incorporate network plugin options, etc later.
echo "DOCKER_OPTS=\"${docker_opts}\"" > /etc/default/docker echo "DOCKER_OPTS=\"${docker_opts}\"" > /etc/default/docker
echo "DOCKER_NOFILE=65536" >> /etc/default/docker # For setting ulimit -n echo "DOCKER_NOFILE=65536" >> /etc/default/docker # For setting ulimit -n
service docker restart systemd restart docker
# TODO(shyamjvs): Make docker run through systemd/supervisord.
} }
# A helper function for loading a docker image. It keeps trying up to 5 times. # A helper function for loading a docker image. It keeps trying up to 5 times.
@ -176,39 +193,40 @@ function compute-kubelet-params {
echo "${params}" echo "${params}"
} }
# Creates the supervisord config file for kubelet from the exec_command ($1). # Creates the systemd config file for kubelet.service.
function create-kubelet-conf() { function create-kubelet-conf() {
local -r name="kubelet" local -r kubelet_bin="$1"
local exec_command="$1 " local -r kubelet_env_file="/etc/default/kubelet"
exec_command+=$(compute-kubelet-params) local -r flags=$(compute-kubelet-params)
echo "KUBELET_OPTS=\"${flags}\"" > "${kubelet_env_file}"
cat >>"/etc/supervisor/conf.d/${name}.conf" <<EOF # Write the systemd service file for kubelet.
[program:${name}] cat <<EOF >/etc/systemd/system/kubelet.service
command=${exec_command} [Unit]
stderr_logfile=/var/log/${name}.log Description=Kubermark kubelet
stdout_logfile=/var/log/${name}.log Requires=network-online.target
autorestart=true After=network-online.target
startretries=1000000
[Service]
Restart=always
RestartSec=10
EnvironmentFile=${kubelet_env_file}
ExecStart=${kubelet_bin} \$KUBELET_OPTS
[Install]
WantedBy=multi-user.target
EOF EOF
} }
# This function assembles the kubelet supervisord config file and starts it using # This function assembles the kubelet systemd service file and starts it using
# supervisorctl, on the kubemark master. # systemctl, on the kubemark master.
function start-kubelet { function start-kubelet {
# Kill any pre-existing kubelet process(es). # Create systemd config.
pkill kubelet local -r kubelet_bin="/usr/bin/kubelet"
# Replace the builtin kubelet (if any) with the correct binary. create-kubelet-conf "${kubelet_bin}"
local -r builtin_kubelet="$(which kubelet)"
if [[ -n "${builtin_kubelet}" ]]; then
cp "${KUBE_BINDIR}/kubelet" "$(dirname "$builtin_kubelet")"
fi
# Create supervisord config for kubelet. # Start the kubelet service.
create-kubelet-conf "${KUBE_BINDIR}/kubelet" systemctl start kubelet.service
# Update supervisord to make it run kubelet.
supervisorctl reread
supervisorctl update
} }
# Create the log file and set its properties. # Create the log file and set its properties.
@ -248,12 +266,12 @@ function compute-kube-apiserver-params {
params+=" --insecure-bind-address=0.0.0.0" params+=" --insecure-bind-address=0.0.0.0"
params+=" --etcd-servers=http://127.0.0.1:2379" params+=" --etcd-servers=http://127.0.0.1:2379"
params+=" --etcd-servers-overrides=/events#${EVENT_STORE_URL}" params+=" --etcd-servers-overrides=/events#${EVENT_STORE_URL}"
params+=" --tls-cert-file=/srv/kubernetes/server.cert" params+=" --tls-cert-file=/etc/srv/kubernetes/server.cert"
params+=" --tls-private-key-file=/srv/kubernetes/server.key" params+=" --tls-private-key-file=/etc/srv/kubernetes/server.key"
params+=" --client-ca-file=/srv/kubernetes/ca.crt" params+=" --client-ca-file=/etc/srv/kubernetes/ca.crt"
params+=" --token-auth-file=/srv/kubernetes/known_tokens.csv" params+=" --token-auth-file=/etc/srv/kubernetes/known_tokens.csv"
params+=" --secure-port=443" params+=" --secure-port=443"
params+=" --basic-auth-file=/srv/kubernetes/basic_auth.csv" params+=" --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv"
params+=" --target-ram-mb=$((${NUM_NODES} * 60))" params+=" --target-ram-mb=$((${NUM_NODES} * 60))"
params+=" --storage-backend=${STORAGE_BACKEND}" params+=" --storage-backend=${STORAGE_BACKEND}"
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
@ -265,8 +283,8 @@ function compute-kube-apiserver-params {
function compute-kube-controller-manager-params { function compute-kube-controller-manager-params {
local params="${CONTROLLER_MANAGER_TEST_ARGS:-}" local params="${CONTROLLER_MANAGER_TEST_ARGS:-}"
params+=" --master=127.0.0.1:8080" params+=" --master=127.0.0.1:8080"
params+=" --service-account-private-key-file=/srv/kubernetes/server.key" params+=" --service-account-private-key-file=/etc/srv/kubernetes/server.key"
params+=" --root-ca-file=/srv/kubernetes/ca.crt" params+=" --root-ca-file=/etc/srv/kubernetes/ca.crt"
params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}" params+=" --allocate-node-cidrs=${ALLOCATE_NODE_CIDRS}"
params+=" --cluster-cidr=${CLUSTER_IP_RANGE}" params+=" --cluster-cidr=${CLUSTER_IP_RANGE}"
params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}" params+=" --service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}"
@ -326,7 +344,8 @@ cd "${KUBE_ROOT}"
tar xzf kubernetes-server-linux-amd64.tar.gz tar xzf kubernetes-server-linux-amd64.tar.gz
source "${KUBE_ROOT}/kubemark-master-env.sh" source "${KUBE_ROOT}/kubemark-master-env.sh"
# Setup required directory structure and etcd variables. # Setup IP firewall rules, required directory structure and etcd variables.
config-ip-firewall
create-dirs create-dirs
setup-kubelet-dir setup-kubelet-dir
delete-default-etcd-configs delete-default-etcd-configs

View File

@ -145,16 +145,16 @@ done
password=$(python -c 'import string,random; print("".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)))') password=$(python -c 'import string,random; print("".join(random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(16)))')
gcloud compute ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" \ gcloud compute ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" \
--command="sudo mkdir /home/kubernetes -p && sudo mkdir /srv/kubernetes -p && \ --command="sudo mkdir /home/kubernetes -p && sudo mkdir /etc/srv/kubernetes -p && \
sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 --decode > /srv/kubernetes/server.cert\" && \ sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/server.cert\" && \
sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 --decode > /srv/kubernetes/server.key\" && \ sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 --decode > /etc/srv/kubernetes/server.key\" && \
sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /srv/kubernetes/ca.crt\" && \ sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/ca.crt\" && \
sudo bash -c \"echo ${KUBECFG_CERT_BASE64} | base64 --decode > /srv/kubernetes/kubecfg.crt\" && \ sudo bash -c \"echo ${KUBECFG_CERT_BASE64} | base64 --decode > /etc/srv/kubernetes/kubecfg.crt\" && \
sudo bash -c \"echo ${KUBECFG_KEY_BASE64} | base64 --decode > /srv/kubernetes/kubecfg.key\" && \ sudo bash -c \"echo ${KUBECFG_KEY_BASE64} | base64 --decode > /etc/srv/kubernetes/kubecfg.key\" && \
sudo bash -c \"echo \"${KUBE_BEARER_TOKEN},admin,admin\" > /srv/kubernetes/known_tokens.csv\" && \ sudo bash -c \"echo \"${KUBE_BEARER_TOKEN},admin,admin\" > /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBELET_TOKEN},kubelet,kubelet\" >> /srv/kubernetes/known_tokens.csv\" && \ sudo bash -c \"echo \"${KUBELET_TOKEN},kubelet,kubelet\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy\" >> /srv/kubernetes/known_tokens.csv\" && \ sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},kube_proxy,kube_proxy\" >> /etc/srv/kubernetes/known_tokens.csv\" && \
sudo bash -c \"echo ${password},admin,admin > /srv/kubernetes/basic_auth.csv\"" sudo bash -c \"echo ${password},admin,admin > /etc/srv/kubernetes/basic_auth.csv\""
gcloud compute copy-files --zone="${ZONE}" --project="${PROJECT}" \ gcloud compute copy-files --zone="${ZONE}" --project="${PROJECT}" \
@ -167,12 +167,12 @@ gcloud compute copy-files --zone="${ZONE}" --project="${PROJECT}" \
"${RESOURCE_DIRECTORY}/manifests/kube-apiserver.yaml" \ "${RESOURCE_DIRECTORY}/manifests/kube-apiserver.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-scheduler.yaml" \ "${RESOURCE_DIRECTORY}/manifests/kube-scheduler.yaml" \
"${RESOURCE_DIRECTORY}/manifests/kube-controller-manager.yaml" \ "${RESOURCE_DIRECTORY}/manifests/kube-controller-manager.yaml" \
"root@${MASTER_NAME}":/home/kubernetes/ "kubernetes@${MASTER_NAME}":/home/kubernetes/
gcloud compute ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" \ gcloud compute ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" \
--command="sudo chmod a+x /home/kubernetes/configure-kubectl.sh && \ --command="sudo chmod a+x /home/kubernetes/configure-kubectl.sh && \
sudo chmod a+x /home/kubernetes/start-kubemark-master.sh && \ sudo chmod a+x /home/kubernetes/start-kubemark-master.sh && \
sudo /home/kubernetes/start-kubemark-master.sh" sudo bash /home/kubernetes/start-kubemark-master.sh"
# create kubeconfig for Kubelet: # create kubeconfig for Kubelet:
KUBECONFIG_CONTENTS=$(echo "apiVersion: v1 KUBECONFIG_CONTENTS=$(echo "apiVersion: v1