Launch a cluster-local registry.

This registry can be accessed through proxies that run on each node
listening on port 5000. We send the proxy images to the nodes directly
to avoid requests that hit the network during cluster launch. For now,
we continue to pull the registry itself over the network, especially
given its large size (we should be able to dramatically shrink the
image). On GCE we create a PD and use that for storage, otherwise we
use an emptyDir. The registry is not enabled outside of GCE. All
communication is currently plain HTTP. In order to use SSL, we will
need to be able to request a certificate/key from the apiserver signed
by the apiserver's CA cert.
This commit is contained in:
Muhammed Uluyol 2015-07-27 11:50:31 -07:00
parent 9b01580946
commit 7129d477d3
20 changed files with 319 additions and 0 deletions

View File

@ -100,6 +100,12 @@ readonly KUBE_DOCKER_WRAPPED_BINARIES=(
kube-scheduler
)
# The set of addons images that should be prepopulated
readonly KUBE_ADDON_PATHS=(
gcr.io/google_containers/pause:0.8.0
uluyol/kube-registry-proxy:0.2.3
)
# ---------------------------------------------------------------------------
# Basic setup functions
@ -602,6 +608,7 @@ function kube::release::package_server_tarballs() {
local release_stage="${RELEASE_STAGE}/server/${platform_tag}/kubernetes"
rm -rf "${release_stage}"
mkdir -p "${release_stage}/server/bin"
mkdir -p "${release_stage}/addons"
# This fancy expression will expand to prepend a path
# (${LOCAL_OUTPUT_BINPATH}/${platform}/) to every item in the
@ -610,6 +617,7 @@ function kube::release::package_server_tarballs() {
"${release_stage}/server/bin/"
kube::release::create_docker_images_for_server "${release_stage}/server/bin";
kube::release::write_addon_docker_images_for_server "${release_stage}/addons"
# Include the client binaries here too as they are useful debugging tools.
local client_bins=("${KUBE_CLIENT_BINARIES[@]}")
@ -681,6 +689,27 @@ function kube::release::create_docker_images_for_server() {
)
}
# This will pull and save docker images for addons which need to placed
# on the nodes directly.
function kube::release::write_addon_docker_images_for_server() {
# Create a sub-shell so that we don't pollute the outer environment
(
local addon_path
for addon_path in "${KUBE_ADDON_PATHS[@]}"; do
(
kube::log::status "Pulling and writing Docker image for addon: ${addon_path}"
local dest_name="${addon_path//\//\~}"
docker pull "${addon_path}"
docker save "${addon_path}" > "${1}/${dest_name}.tar"
) &
done
kube::util::wait-for-jobs || { kube::log::error "unable to pull or write addon image"; return 1; }
kube::log::status "Addon images done"
)
}
# Package up the salt configuration tree. This is an optional helper to getting
# a cluster up and running.
function kube::release::package_salt_tarball() {

View File

@ -0,0 +1,12 @@
FROM haproxy:1.5
MAINTAINER Muhammed Uluyol <uluyol@google.com>
RUN apt-get update && apt-get install -y dnsutils
ADD proxy.conf.insecure.in /proxy.conf.in
ADD run_proxy.sh /usr/bin/run_proxy
RUN chown root:users /usr/bin/run_proxy
RUN chmod 755 /usr/bin/run_proxy
CMD ["/usr/bin/run_proxy"]

View File

@ -0,0 +1,10 @@
.PHONY: build push vet test clean
TAG = 0.2.3
REPO = uluyol/kube-registry-proxy
build:
docker build -t $(REPO):$(TAG) .
push:
docker push $(REPO):$(TAG)

View File

@ -0,0 +1,17 @@
global
maxconn 1024
defaults
mode http
retries 3
option redispatch
timeout client 1s
timeout server 5s
timeout connect 5s
frontend forwarder
bind *:%FWDPORT%
default_backend registry
backend registry
server kube-registry %HOST%:%PORT% ssl verify required ca-file %CA_FILE%

View File

@ -0,0 +1,17 @@
global
maxconn 1024
defaults
mode http
retries 3
option redispatch
timeout client 1s
timeout server 5s
timeout connect 5s
frontend forwarder
bind *:%FWDPORT%
default_backend registry
backend registry
server kube-registry %HOST%:%PORT%

View File

@ -0,0 +1,33 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
REGISTRY_HOST=${REGISTRY_HOST:?no host}
REGISTRY_PORT=${REGISTRY_PORT:-5000}
REGISTRY_CA=${REGISTRY_CA:-/var/run/secrets/kubernetes.io/serviceaccount/ca.crt}
FORWARD_PORT=${FORWARD_PORT:-5000}
sed -e "s/%HOST%/$REGISTRY_HOST/g" \
-e "s/%PORT%/$REGISTRY_PORT/g" \
-e "s/%FWDPORT%/$FORWARD_PORT/g" \
-e "s|%CA_FILE%|$REGISTRY_CA|g" \
</proxy.conf.in >/proxy.conf
# wait for registry to come online
while ! host "$REGISTRY_HOST" &>/dev/null; do
printf "waiting for %s to come online\n" "$REGISTRY_HOST"
sleep 1
done
printf "starting proxy\n"
exec haproxy -f /proxy.conf "$@"

View File

@ -0,0 +1,53 @@
apiVersion: v1
kind: ReplicationController
metadata:
name: kube-registry-v0
namespace: kube-system
labels:
k8s-app: kube-registry
version: v0
kubernetes.io/cluster-service: "true"
spec:
replicas: 1
selector:
k8s-app: kube-registry
version: v0
template:
metadata:
labels:
k8s-app: kube-registry
version: v0
kubernetes.io/cluster-service: "true"
spec:
containers:
- name: registry
image: registry:2
resources:
limits:
cpu: 100m
memory: 100Mi
env:
- name: REGISTRY_HTTP_ADDR
value: :5000
- name: REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY
value: /var/lib/registry
volumeMounts:
- name: image-store
mountPath: /var/lib/registry
ports:
- containerPort: 5000
name: registry
protocol: TCP
# TODO: use a persistent volume claim
volumes:
- name: image-store
{%- if pillar.get('cluster_registry_disk', '') != '' -%}
{%- for disk in pillar['cluster_registry_disk'].keys() %}
{{ disk }}:
{%- for k, v in pillar['cluster_registry_disk'][disk].items() %}
{{k}}: {{v}}
{%- endfor -%}
{%- endfor -%}
{%- else %}
emptyDir: {}
{%- endif -%}

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: kube-registry
namespace: kube-system
labels:
k8s-app: kube-registry
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "KubeRegistry"
spec:
selector:
k8s-app: kube-registry
ports:
- name: registry
port: 5000
protocol: TCP

View File

@ -77,6 +77,12 @@ DNS_SERVER_IP="10.0.0.10"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-true}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_PD:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE="${CLUSTER_REGISTRY_DISK_TYPE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"

View File

@ -82,6 +82,12 @@ DNS_SERVER_IP="10.0.0.10"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1
# Optional: Install cluster docker registry.
ENABLE_CLUSTER_REGISTRY="${KUBE_ENABLE_CLUSTER_REGISTRY:-true}"
CLUSTER_REGISTRY_DISK="${CLUSTER_REGISTRY_DISK:-${INSTANCE_PREFIX}-kube-system-kube-registry}"
CLUSTER_REGISTRY_DISK_SIZE="${CLUSTER_REGISTRY_DISK_SIZE:-200GB}"
CLUSTER_REGISTRY_DISK_TYPE="${CLUSTER_REGISTRY_DISK_TYPE:-pd-standard}"
# Optional: Install Kubernetes UI
ENABLE_CLUSTER_UI="${KUBE_ENABLE_CLUSTER_UI:-true}"

View File

@ -272,6 +272,7 @@ enable_node_logging: '$(echo "$ENABLE_NODE_LOGGING" | sed -e "s/'/''/g")'
logging_destination: '$(echo "$LOGGING_DESTINATION" | sed -e "s/'/''/g")'
elasticsearch_replicas: '$(echo "$ELASTICSEARCH_LOGGING_REPLICAS" | sed -e "s/'/''/g")'
enable_cluster_dns: '$(echo "$ENABLE_CLUSTER_DNS" | sed -e "s/'/''/g")'
enable_cluster_registry: '$(echo "$ENABLE_CLUSTER_REGISTRY" | sed -e "s/'/''/g")'
dns_replicas: '$(echo "$DNS_REPLICAS" | sed -e "s/'/''/g")'
dns_server: '$(echo "$DNS_SERVER_IP" | sed -e "s/'/''/g")'
dns_domain: '$(echo "$DNS_DOMAIN" | sed -e "s/'/''/g")'
@ -301,6 +302,15 @@ EOF
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
kubeproxy_test_args: '$(echo "$KUBEPROXY_TEST_ARGS" | sed -e "s/'/''/g")'
EOF
fi
# TODO: Replace this with a persistent volume (and create it).
if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then
cat <<EOF >>/srv/salt-overlay/pillar/cluster-params.sls
cluster_registry_disk:
gcePersistentDisk:
pdName: ${CLUSTER_REGISTRY_DISK}
fsType: ext4
EOF
fi
}

View File

@ -43,6 +43,7 @@ ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
ENABLE_CLUSTER_REGISTRY: $(yaml-quote ${ENABLE_CLUSTER_REGISTRY:-false})
DNS_REPLICAS: $(yaml-quote ${DNS_REPLICAS:-})
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})
@ -80,6 +81,7 @@ ENABLE_NODE_LOGGING=${ENABLE_NODE_LOGGING:-false}
LOGGING_DESTINATION=${LOGGING_DESTINATION:-}
ELASTICSEARCH_LOGGING_REPLICAS=${ELASTICSEARCH_LOGGING_REPLICAS:-}
ENABLE_CLUSTER_DNS=${ENABLE_CLUSTER_DNS:-false}
ENABLE_CLUSTER_REGISTRY=${ENABLE_CLUSTER_REGISTRY:-false})
DNS_REPLICAS=${DNS_REPLICAS:-}
DNS_SERVER_IP=${DNS_SERVER_IP:-}
DNS_DOMAIN=${DNS_DOMAIN:-}

View File

@ -41,6 +41,8 @@ ENABLE_NODE_LOGGING: $(yaml-quote ${ENABLE_NODE_LOGGING:-false})
LOGGING_DESTINATION: $(yaml-quote ${LOGGING_DESTINATION:-})
ELASTICSEARCH_LOGGING_REPLICAS: $(yaml-quote ${ELASTICSEARCH_LOGGING_REPLICAS:-})
ENABLE_CLUSTER_DNS: $(yaml-quote ${ENABLE_CLUSTER_DNS:-false})
ENABLE_CLUSTER_REGISTRY: $(yaml-quote ${ENABLE_CLUSTER_REGISTRY:-false})
CLUSTER_REGISTRY_DISK: $(yaml-quote ${CLUSTER_REGISTRY_DISK})
DNS_REPLICAS: $(yaml-quote ${DNS_REPLICAS:-})
DNS_SERVER_IP: $(yaml-quote ${DNS_SERVER_IP:-})
DNS_DOMAIN: $(yaml-quote ${DNS_DOMAIN:-})

View File

@ -643,6 +643,15 @@ function kube-up {
--type "${MASTER_DISK_TYPE}" \
--size "${MASTER_DISK_SIZE}"
# Create disk for cluster registry if enabled
if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then
gcloud compute disks create "${CLUSTER_REGISTRY_DISK}" \
--project "${PROJECT}" \
--zone "${ZONE}" \
--type "${CLUSTER_REGISTRY_DISK_TYPE}" \
--size "${CLUSTER_REGISTRY_DISK_SIZE}" &
fi
# Generate a bearer token for this cluster. We push this separately
# from the other cluster variables so that the client (this
# computer) can forget it later. This should disappear with
@ -837,6 +846,17 @@ function kube-down {
"${MASTER_NAME}"-pd
fi
# Delete disk for cluster registry if enabled
if [[ "${ENABLE_CLUSTER_REGISTRY}" == true && -n "${CLUSTER_REGISTRY_DISK}" ]]; then
if gcloud compute disks describe "${CLUSTER_REGISTRY_DISK}" --zone "${ZONE}" --project "${PROJECT}" &>/dev/null; then
gcloud compute disks delete \
--project "${PROJECT}" \
--quiet \
--zone "${ZONE}" \
"${CLUSTER_REGISTRY_DISK}"
fi
fi
# Find out what minions are running.
local -a minions
minions=( $(gcloud compute instances list \
@ -955,6 +975,11 @@ function check-resources {
return 1
fi
if gcloud compute disks describe --project "${PROJECT}" "${CLUSTER_REGISTRY_DISK}" --zone "${ZONE}" &>/dev/null; then
KUBE_RESOURCE_FOUND="Persistent disk ${CLUSTER_REGISTRY_DISK}"
return 1
fi
# Find out what minions are running.
local -a minions
minions=( $(gcloud compute instances list \

View File

@ -64,7 +64,9 @@ done
echo "+++ Install binaries from tar: $1"
tar -xz -C "${KUBE_TEMP}" -f "$1"
mkdir -p /srv/salt-new/salt/kube-bins
mkdir -p /srv/salt-new/salt/kube-addons-images
cp -v "${KUBE_TEMP}/kubernetes/server/bin/"* /srv/salt-new/salt/kube-bins/
cp -v "${KUBE_TEMP}/kubernetes/addons/"* /srv/salt-new/salt/kube-addons-images/
kube_bin_dir="/srv/salt-new/salt/kube-bins";
docker_images_sls_file="/srv/salt-new/pillar/docker-images.sls";

View File

@ -81,6 +81,25 @@ addon-dir-create:
- makedirs: True
{% endif %}
{% if pillar.get('enable_cluster_registry', '').lower() == 'true' %}
/etc/kubernetes/addons/registry/registry-svc.yaml:
file.managed:
- source: salt://kube-addons/registry/registry-svc.yaml
- user: root
- group: root
- file_mode: 644
- makedirs: True
/etc/kubernetes/addons/registry/registry-rc.yaml:
file.managed:
- source: salt://kube-addons/registry/registry-rc.yaml.in
- template: jinja
- user: root
- group: root
- file_mode: 644
- makedirs: True
{% endif %}
{% if pillar.get('enable_node_logging', '').lower() == 'true'
and pillar.get('logging_destination').lower() == 'elasticsearch'
and pillar.get('enable_cluster_logging', '').lower() == 'true' %}

View File

@ -125,6 +125,28 @@ function create-resource-from-string() {
return 1;
}
# $1 is the directory containing all of the docker images
function load-docker-images() {
local success
local restart_docker
while true; do
success=true
restart_docker=false
for image in "$1/"*; do
timeout 30 docker load -i "${image}" &>/dev/null
rc=$?
if [[ $rc == 124 ]]; then
restart_docker=true
elif [[ $rc != 0 ]]; then
success=false
fi
done
if [[ $success == true ]]; then break; fi
if [[ $restart_docker == true ]]; then service docker restart; fi
sleep 15
done
}
# The business logic for whether a given object should be created
# was already enforced by salt, and /etc/kubernetes/addons is the
# managed result is of that. Start everything below that directory.
@ -142,6 +164,9 @@ for k,v in yaml.load(sys.stdin).iteritems():
''' < "${kube_env_yaml}")
fi
# Load any images that we may need
load-docker-images /srv/salt/kube-addons-images
# Create the namespace that will be used to host the cluster-level add-ons.
start_addon /etc/kubernetes/addons/namespace.yaml 100 10 "" &

View File

@ -0,0 +1,8 @@
/etc/kubernetes/manifests/kube-registry-proxy.yaml:
file.managed:
- source: salt://kube-registry-proxy/kube-registry-proxy.yaml
- user: root
- group: root
- mode: 644
- makedirs: True
- dir_mode: 755

View File

@ -0,0 +1,24 @@
apiVersion: v1
kind: Pod
metadata:
name: kube-registry-proxy
namespace: kube-system
spec:
containers:
- name: kube-registry-proxy
image: uluyol/kube-registry-proxy:0.2.3
resources:
limits:
cpu: 100m
memory: 50Mi
env:
- name: REGISTRY_HOST
value: kube-registry.kube-system.svc.cluster.local
- name: REGISTRY_PORT
value: "5000"
- name: FORWARD_PORT
value: "5000"
ports:
- name: registry
containerPort: 5000
hostPort: 5000

View File

@ -24,6 +24,9 @@ base:
{% elif pillar['logging_destination'] == 'gcp' %}
- fluentd-gcp
{% endif %}
{% endif %}
{% if pillar.get('enable_cluster_registry', '').lower() == 'true' %}
- kube-registry-proxy
{% endif %}
- logrotate
{% if grains['cloud'] is defined and grains.cloud == 'gce' %}