Add Rackspace to build/release.sh

Updates to the build/release.sh scripts to upload build binaries to
Cloud Files.
This commit is contained in:
Ryan Richard 2014-10-17 15:10:08 -05:00
parent a8bae68865
commit f2838483a1
7 changed files with 37 additions and 86 deletions

View File

@ -672,13 +672,15 @@ function kube::release::rackspace::release() {
[[ ${KUBE_RACKSPACE_UPLOAD_RELEASE-y} =~ ^[yY]$ ]] || return 0
CLOUDFILES_CONTAINER="kubernetes-releases-${OS_USERNAME}"
KUBE_RACKSPACE_RELEASE_BUCKET=${KUBE_RACKSPACE_RELEASE_BUCKET-kubernetes-releases-${OS_USERNAME}}
KUBE_RACKSPACE_RELEASE_PREFIX=${KUBE_RACKSPACE_RELEASE_PREFIX-devel/}
kube::release::rackspace::verify_prereqs
kube::release::rackspace::ensure_release_container
kube::release::rackspace::copy_release_tarballs
}
# Verify things are set up for uploading to GCS
# Verify things are set up for uploading to Rackspace
function kube::release::rackspace::verify_prereqs() {
# Make sure swiftly is installed and available
@ -709,9 +711,6 @@ function kube::release::rackspace::verify_prereqs() {
function kube::release::rackspace::ensure_release_container() {
KUBE_RACKSPACE_RELEASE_BUCKET=${KUBE_RACKSPACE_RELEASE_BUCKET-kubernetes-releases-${OS_USERNAME}}
KUBE_RACKSPACE_RELEASE_PREFIX=${KUBE_RACKSPACE_RELEASE_PREFIX-devel/}
SWIFTLY_CMD="swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD}"
if ! ${SWIFTLY_CMD} get ${CLOUDFILES_CONTAINER} > /dev/null 2>&1 ; then
@ -720,9 +719,9 @@ function kube::release::rackspace::ensure_release_container() {
fi
}
# Copy kubernetes-server-linux-amd64.tar.gz to cloud files object store
function kube::release::rackspace::copy_release_tarballs() {
# Copy release tar.gz to cloud files object store
echo "build/common.sh: Uploading to Cloud Files"
${SWIFTLY_CMD} put -i ${RELEASE_DIR}/kubernetes-server-linux-amd64.tar.gz ${CLOUDFILES_CONTAINER}/devel/kubernetes-server-linux-amd64.tar.gz > /dev/null 2>&1

View File

@ -41,4 +41,4 @@ kube::build::copy_output
kube::build::run_image
kube::release::package_tarballs
kube::release::${KUBERNETES_PROVIDER}::release
kube::release::${KUBERNETES_PROVIDER-gce}::release

View File

@ -22,7 +22,7 @@ write_files:
#!/bin/bash
OBJECT_URL="CLOUD_FILES_URL"
echo "Downloading release ($OBJECT_URL)"
wget $OBJECT_URL -O /opt/kubernetes.tar.gz
wget "${OBJECT_URL}" -O /opt/kubernetes.tar.gz
echo "Unpacking release"
rm -rf /opt/kubernetes || false
tar xzf /opt/kubernetes.tar.gz -C /opt/
@ -47,8 +47,6 @@ coreos:
reboot-strategy: etcd-lock
units:
#- name: nova-agent-watcher.service
# command: try-restart
- name: etcd.service
command: start
- name: fleet.service
@ -78,8 +76,8 @@ coreos:
Requires=download-release.service
[Service]
EnvironmentFile=-/run/apiserver/minions.env
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/platforms/linux/amd64/apiserver /opt/bin/apiserver
ExecStart=/opt/bin/apiserver --address=127.0.0.1 --port=8080 --machines=${MINIONS} --etcd_servers=http://127.0.0.1:4001 --logtostderr=true
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/apiserver /opt/bin/apiserver
ExecStart=/opt/bin/apiserver --address=127.0.0.1 --port=8080 --machines=${MINIONS} --etcd_servers=http://127.0.0.1:4001 --portal_net="PORTAL_NET" --logtostderr=true
Restart=always
RestartSec=2
- name: master-apiserver-sighup.path
@ -117,7 +115,7 @@ coreos:
After=master-apiserver.service
Requires=master-apiserver.service
[Service]
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/platforms/linux/amd64/controller-manager /opt/bin/controller-manager
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/controller-manager /opt/bin/controller-manager
ExecStart=/opt/bin/controller-manager --master=127.0.0.1:8080 --logtostderr=true
Restart=always
RestartSec=2
@ -132,10 +130,11 @@ coreos:
After=master-apiserver.service
Requires=master-apiserver.service
[Service]
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/platforms/linux/amd64/scheduler /opt/bin/scheduler
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/scheduler /opt/bin/scheduler
ExecStart=/opt/bin/scheduler --master=127.0.0.1:8080 --logtostderr=true
Restart=always
RestartSec=10
#Running nginx service with --net="host" is a necessary evil until running all k8s services in docker.
- name: kubernetes-nginx.service
command: start
content: |

View File

@ -16,9 +16,9 @@ write_files:
permissions: 0755
content: |
#!/bin/bash
OBJECT_URL="http://storage.googleapis.com/kubernetes-releases-56726/devel/kubernetes.tar.gz"
OBJECT_URL="CLOUD_FILES_URL"
echo "Downloading release ($OBJECT_URL)"
wget $OBJECT_URL -O /opt/kubernetes.tar.gz
wget "${OBJECT_URL}" -O /opt/kubernetes.tar.gz
echo "Unpacking release"
rm -rf /opt/kubernetes || false
tar xzf /opt/kubernetes.tar.gz -C /opt/
@ -102,7 +102,7 @@ coreos:
After=download-release.service
Requires=download-release.service
[Service]
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/platforms/linux/amd64/kubelet /opt/bin/kubelet
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/kubelet /opt/bin/kubelet
ExecStart=/opt/bin/kubelet --address=$private_ipv4 --hostname_override=$private_ipv4 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true --config=/opt/kubernetes-manifests
Restart=always
RestartSec=2
@ -119,7 +119,7 @@ coreos:
After=download-release.service
Requires=download-release.service
[Service]
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/platforms/linux/amd64/proxy /opt/bin/proxy
ExecStartPre=/usr/bin/ln -sf /opt/kubernetes/server/bin/proxy /opt/bin/proxy
ExecStart=/opt/bin/proxy --bind_address=$private_ipv4 --etcd_servers=http://127.0.0.1:4001 --logtostderr=true
Restart=always
RestartSec=2

View File

@ -83,13 +83,14 @@ find-release-tars() {
fi
}
# Retrieves a tempurl from cloudfiles to make the release object publicly accessible for 6 hours.
find-object-url() {
RELEASE=kubernetes-releases-${OS_USERNAME}/devel/kubernetes-server-linux-amd64.tar.gz
TEMP_URL=$(swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD} tempurl GET $RELEASE)
RELEASE_TMP_URL=$(swiftly -A ${OS_AUTH_URL} -U ${OS_USERNAME} -K ${OS_PASSWORD} tempurl GET ${RELEASE})
echo "cluster/rackspace/util.sh: Object temp URL:"
echo -e "\t${TEMP_URL}"
echo -e "\t${RELEASE_TMP_URL}"
}
@ -99,19 +100,16 @@ rax-boot-master() {
DISCOVERY_ID=$(echo "${DISCOVERY_URL}" | cut -f 4 -d /)
echo "cluster/rackspace/util.sh: etcd discovery URL: ${DISCOVERY_URL}"
get-password
find-object-url
# Copy cloud-config to KUBE_TEMP and work some sed magic
sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \
-e "s|CLOUD_FILES_URL|${TEMP_URL}|" \
-e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\&}|" \
-e "s|KUBE_USER|${KUBE_USER}|" \
-e "s|KUBE_PASSWORD|${KUBE_PASSWORD}|" \
-e "s|PORTAL_NET|${PORTAL_NET}|" \
$(dirname $0)/rackspace/cloud-config/master-cloud-config.yaml > $KUBE_TEMP/master-cloud-config.yaml
MASTER_BOOT_CMD="nova boot \
MASTER_BOOT_CMD="nova boot
--key-name ${SSH_KEY_NAME} \
--flavor ${KUBE_MASTER_FLAVOR} \
--image ${KUBE_IMAGE} \
@ -136,7 +134,7 @@ rax-boot-minions() {
sed -e "s|DISCOVERY_ID|${DISCOVERY_ID}|" \
-e "s|INDEX|$((i + 1))|g" \
-e "s|CLOUD_FILES_URL|${TEMP_URL}|" \
-e "s|CLOUD_FILES_URL|${RELEASE_TMP_URL//&/\&}|" \
$(dirname $0)/rackspace/cloud-config/minion-cloud-config.yaml > $KUBE_TEMP/minion-cloud-config-$(($i + 1)).yaml
@ -204,7 +202,7 @@ kube-up() {
# Find the release to use. Generally it will be passed when doing a 'prod'
# install and will default to the release/config.sh version when doing a
# developer up.
#find-object-url $CONTAINER output/release/$TAR_FILE
find-object-url
# Create a temp directory to hold scripts that will be uploaded to master/minions
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)

View File

@ -1,12 +1,9 @@
# Rackspace
In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE. The specific implementation is different due to the use of CoreOS and network design.
In general, the dev-build-and-up.sh workflow for Rackspace is the similar to GCE. The specific implementation is different due to the use of CoreOS, Rackspace Cloud Files and network design.
These scripts should be used to deploy development environments for Kubernetes. If your account leverages RackConnect or non-standard networking, these scripts will most likely not work without modification.
NOTE: The rackspace scripts do NOT rely on saltstack.
For older versions please either:
* Sync back to `v0.3` with `git checkout v0.3`
* Download a [snapshot of `v0.3`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.3.tar.gz)
NOTE: The rackspace scripts do NOT rely on `saltstack`.
The current cluster design is inspired by:
- [corekube](https://github.com/metral/corekube/)
@ -15,17 +12,16 @@ The current cluster design is inspired by:
## Prerequisites
1. You need to have both `nova` and `swiftly` installed. It's recommended to use a python virtualenv to install these packages into.
2. Make sure you have the appropriate environment variables set to interact with the OpenStack APIs. See [Rackspace Documentation](http://docs.rackspace.com/servers/api/v2/cs-gettingstarted/content/section_gs_install_nova.html) for more details.
3. You can test this by running `nova list` to make sure you're authenticated successfully.
## Provider: Rackspace
- To use Rackspace as the provider, set the KUBERNETES_PROVIDER ENV variable:
`export KUBERNETES_PROVIDER=rackspace` and run the `bash cluster/kube-up.sh` script.
`export KUBERNETES_PROVIDER=rackspace` and run the `bash hack/dev-build-and-up.sh` script.
## Build
1. The kubernetes binaries will be built via the common build scripts in `release/`. There is a specific `release/rackspace` directory with scripts for the following steps:
1. The kubernetes binaries will be built via the common build scripts in `build/`.
2. If you've set the ENV `KUBERNETES_PROVIDER=rackspace`, the scripts will upload `kubernetes-server-linux-amd64.tar.gz` to Cloud Files.
2. A cloud files container will be created via the `swiftly` CLI and a temp URL will be enabled on the object.
3. The built `master-release.tar.gz` will be uploaded to this container and the URL will be passed to master/minions nodes when booted.
- NOTE: RELEASE tagging and launch scripts are not used currently.
3. The built `kubernetes-server-linux-amd64.tar.gz` will be uploaded to this container and the URL will be passed to master/minions nodes when booted.
## Cluster
1. There is a specific `cluster/rackspace` directory with the scripts for the following steps:
@ -37,3 +33,11 @@ The current cluster design is inspired by:
## Some notes:
- The scripts expect `eth2` to be the cloud network that the containers will communicate across.
- A number of the items in `config-default.sh` are overridable via environment variables.
- For older versions please either:
* Sync back to `v0.3` with `git checkout v0.3`
* Download a [snapshot of `v0.3`](https://github.com/GoogleCloudPlatform/kubernetes/archive/v0.3.tar.gz)
## Network Design
- eth0 - Public Interface used for servers/containers to reach the internet
- eth1 - ServiceNet - Intra-cluster communication (k8s, etcd, etc) communicate via this interface. The `cloud-config` files use the special CoreOS identifier `$private_ipv4` to configure the services.
- eth2 - Cloud Network - Used for k8s pods to communicate with one another. The proxy service will pass traffic via this interface.

View File

@ -1,49 +0,0 @@
#!/bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script will build and release Kubernetes.
#
# The main parameters to this script come from the config.sh file. This is set
# up by default for development releases. Feel free to edit it or override some
# of the variables there.
# exit on any error
set -e
SCRIPT_DIR=$(CDPATH="" cd $(dirname $0); pwd)
source $SCRIPT_DIR/config.sh
KUBE_REPO_ROOT="$(cd "$(dirname "$0")/../../" && pwd -P)"
source "${KUBE_REPO_ROOT}/cluster/kube-env.sh"
source $SCRIPT_DIR/../../cluster/rackspace/${KUBE_CONFIG_FILE-"config-default.sh"}
source $SCRIPT_DIR/../../cluster/rackspace/util.sh
$SCRIPT_DIR/../build-release.sh $INSTANCE_PREFIX
# Copy everything up to swift object store
echo "release/rackspace/release.sh: Uploading to Cloud Files"
if ! swiftly -A $OS_AUTH_URL -U $OS_USERNAME -K $OS_PASSWORD get $CONTAINER > /dev/null 2>&1 ; then
echo "release/rackspace/release.sh: Container doesn't exist. Creating..."
swiftly -A $OS_AUTH_URL -U $OS_USERNAME -K $OS_PASSWORD put $CONTAINER > /dev/null 2>&1
fi
for x in master-release.tgz; do
swiftly -A $OS_AUTH_URL -U $OS_USERNAME -K $OS_PASSWORD put -i _output/release/$x $CONTAINER/output/release/$x > /dev/null 2>&1
done
echo "Release pushed."