Store all of the master's persistent data on a separate "data"

persistent disk when running on GCE.

I'll follow up soon with a second PR that enables kube-push to
completely bring down the master VM and replace it with a new one.
This commit is contained in:
Alex Robinson 2014-12-18 20:24:57 +00:00
parent 308b078ee7
commit f892e84e0a
2 changed files with 49 additions and 1 deletions

View File

@ -0,0 +1,39 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Formats and mounts a persistent disk to store the persistent data on the
# master -- etcd's data and the security certs/keys.
device_info=$(ls -l /dev/disk/by-id/google-master-pd)
relative_path=${device_info##* }
device_path="/dev/disk/by-id/${relative_path}"
# Format and mount the disk to the directory used by etcd.
mkdir -p /mnt/master-pd
/usr/share/google/safe_format_and_mount -m "mkfs.ext4 -F" "${device_path}" /mnt/master-pd
mkdir -m 700 -p /mnt/master-pd/var/etcd
mkdir -p /mnt/master-pd/srv/kubernetes
ln -s /mnt/master-pd/var/etcd /var/etcd
ln -s /mnt/master-pd/srv/kubernetes /srv/kubernetes
# This is a bit of a hack to get around the fact that salt has to run after the
# PD and mounted directory are already set up. We can't give ownership of the
# directory to etcd until the etcd user and group exist, but they don't exist
# until salt runs if we don't create them here. We could alternatively make the
# permissions on the directory more permissive, but this seems less bad.
useradd -s /sbin/nologin -d /var/etcd etcd
chown etcd /mnt/master-pd/var/etcd
chgrp etcd /mnt/master-pd/var/etcd

View File

@ -391,6 +391,7 @@ function kube-up {
echo "readonly ENABLE_NODE_LOGGING='${ENABLE_NODE_LOGGING:-false}'"
echo "readonly LOGGING_DESTINATION='${LOGGING_DESTINATION:-}'"
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/common.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/format-and-mount-pd.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/create-dynamic-salt-files.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/download-release.sh"
grep -v "^#" "${KUBE_ROOT}/cluster/gce/templates/salt-master.sh"
@ -405,6 +406,13 @@ function kube-up {
fi
fi
# We have to make sure the disk is created before creating the master VM, so
# run this in the foreground.
gcloud compute disks create "${MASTER_NAME}-pd" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--size "10GB"
gcloud compute instances create "${MASTER_NAME}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
@ -414,7 +422,8 @@ function kube-up {
--tags "${MASTER_TAG}" \
--network "${NETWORK}" \
--scopes "storage-ro" "compute-rw" \
--metadata-from-file "startup-script=${KUBE_TEMP}/master-start.sh" &
--metadata-from-file "startup-script=${KUBE_TEMP}/master-start.sh" \
--disk name="${MASTER_NAME}-pd" device-name=master-pd mode=rw boot=no auto-delete=no &
# Create the firewall rules, 10 at a time.
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do