Merge pull request #1221 from jbeda/dockerized-deploy

Fix up dockerized build and release build
This commit is contained in:
Filipe Brandenburger 2014-09-09 16:34:21 -07:00
commit 516de0550a
21 changed files with 319 additions and 208 deletions

View File

@ -49,13 +49,21 @@ ENV GOPATH /go
ENV GOOS linux ENV GOOS linux
ENV GOARCH amd64 ENV GOARCH amd64
# Get the code coverage tool and etcd for integration tests # Get the code coverage tool and godep
RUN go get code.google.com/p/go.tools/cmd/cover github.com/coreos/etcd github.com/tools/godep RUN go get code.google.com/p/go.tools/cmd/cover github.com/tools/godep
RUN mkdir -p /go/src/github.com/coreos/etcd && \
cd /go/src/github.com/coreos/etcd && \
git clone https://github.com/coreos/etcd.git . -b v0.4.6 --depth=1 && \
go install github.com/coreos/etcd
# Mark this as a kube-build container # Mark this as a kube-build container
RUN touch /kube-build-image RUN touch /kube-build-image
WORKDIR /go/src/github.com/GoogleCloudPlatform/kubernetes WORKDIR /go/src/github.com/GoogleCloudPlatform/kubernetes
# Propagate the git tree version into the build image
ADD kube-version-defs /kube-version-defs
# Upload Kubernetes source # Upload Kubernetes source
ADD kube-source.tar.gz /go/src/github.com/GoogleCloudPlatform/kubernetes ADD kube-source.tar.gz /go/src/github.com/GoogleCloudPlatform/kubernetes

View File

@ -25,34 +25,53 @@ readonly KUBE_GO_PACKAGE=github.com/GoogleCloudPlatform/kubernetes
mkdir -p "${KUBE_TARGET}" mkdir -p "${KUBE_TARGET}"
if [[ ! -f "/kube-build-image" ]]; then if [[ ! -f "/kube-build-image" ]]; then
echo "WARNING: This script should be run in the kube-build conrtainer image!" >&2 echo "WARNING: This script should be run in the kube-build container image!" >&2
fi fi
function make-binaries() { if [[ -f "/kube-version-defs" ]]; then
readonly BINARIES=" source "/kube-version-defs"
proxy else
integration echo "WARNING: No version information provided in build image"
apiserver fi
controller-manager
kubelet function kube::build::make_binary() {
kubecfg" local -r gopkg=$1
local -r bin=${gopkg##*/}
echo "+++ Building ${bin} for ${GOOS}/${GOARCH}"
pushd "${KUBE_REPO_ROOT}" >/dev/null
godep go build -ldflags "${KUBE_LD_FLAGS-}" -o "${ARCH_TARGET}/${bin}" "${gopkg}"
popd >/dev/null
}
function kube::build::make_binaries() {
if [[ ${#targets[@]} -eq 0 ]]; then
targets=(
cmd/proxy
cmd/apiserver
cmd/controller-manager
cmd/kubelet
cmd/kubecfg
plugin/cmd/scheduler
)
fi
binaries=()
local target
for target in "${targets[@]}"; do
binaries+=("${KUBE_GO_PACKAGE}/${target}")
done
ARCH_TARGET="${KUBE_TARGET}/${GOOS}/${GOARCH}" ARCH_TARGET="${KUBE_TARGET}/${GOOS}/${GOARCH}"
mkdir -p "${ARCH_TARGET}" mkdir -p "${ARCH_TARGET}"
function make-binary() { if [[ -n "$1" ]]; then
echo "+++ Building $1 for ${GOOS}/${GOARCH}" kube::build::make_binary "$1"
godep go build \
-o "${ARCH_TARGET}/$1" \
github.com/GoogleCloudPlatform/kubernetes/cmd/$1
}
if [[ -n $1 ]]; then
make-binary $1
exit 0 exit 0
fi fi
for b in ${BINARIES}; do local b
make-binary $b for b in "${binaries[@]}"; do
kube::build::make_binary "$b"
done done
} }

View File

@ -14,10 +14,8 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# This and builds all go components.
set -e set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
make-binaries "$@" kube::build::make_binaries "$@"

View File

@ -14,22 +14,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# This and builds all go components.
set -e set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
readonly CROSS_BINARIES=" readonly CROSS_BINARIES=(
kubecfg ./cmd/kubecfg
" )
for platform in ${KUBE_CROSSPLATFORMS}; do for platform in ${KUBE_CROSSPLATFORMS}; do
( (
export GOOS=${platform%/*} export GOOS=${platform%/*}
export GOARCH=${platform##*/} export GOARCH=${platform##*/}
for binary in ${CROSS_BINARIES}; do for binary in "${CROSS_BINARIES[@]}"; do
make-binaries "${binary}" kube::build::make_binaries "${binary}"
done done
) )
done done

View File

@ -18,13 +18,15 @@ set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
ETCD_DIR="${KUBE_REPO_ROOT}/_output/etcd" kube::build::make_binaries "./cmd/integration"
readonly ETCD_DIR="${KUBE_REPO_ROOT}/_output/etcd"
mkdir -p "${ETCD_DIR}" mkdir -p "${ETCD_DIR}"
echo "+++ Running integration test" echo "+++ Running integration test"
etcd -name test -data-dir ${ETCD_DIR} > "${KUBE_REPO_ROOT}/_output/etcd.log" & etcd -name test -data-dir ${ETCD_DIR} > "${KUBE_REPO_ROOT}/_output/etcd.log" &
ETCD_PID=$! readonly ETCD_PID=$!
sleep 5 sleep 5

View File

@ -14,7 +14,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
set -e set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh

View File

@ -17,6 +17,9 @@
# Common utilties, variables and checks for all build scripts. # Common utilties, variables and checks for all build scripts.
cd $(dirname "${BASH_SOURCE}")/.. cd $(dirname "${BASH_SOURCE}")/..
source hack/config-go.sh
readonly KUBE_REPO_ROOT="${PWD}" readonly KUBE_REPO_ROOT="${PWD}"
readonly KUBE_GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null) readonly KUBE_GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null)
@ -38,11 +41,13 @@ readonly DOCKER_CONTAINER_NAME=kube-build
readonly DOCKER_MOUNT="-v ${LOCAL_OUTPUT_DIR}:${REMOTE_OUTPUT_DIR}" readonly DOCKER_MOUNT="-v ${LOCAL_OUTPUT_DIR}:${REMOTE_OUTPUT_DIR}"
readonly KUBE_RUN_IMAGE_BASE="kubernetes" readonly KUBE_RUN_IMAGE_BASE="kubernetes"
readonly KUBE_RUN_BINARIES=" readonly KUBE_RUN_BINARIES=(
apiserver apiserver
controller-manager controller-manager
proxy proxy
" scheduler
)
# This is where the final release artifacts are created locally # This is where the final release artifacts are created locally
readonly RELEASE_DIR="${KUBE_REPO_ROOT}/_output/release" readonly RELEASE_DIR="${KUBE_REPO_ROOT}/_output/release"
@ -51,7 +56,7 @@ readonly RELEASE_DIR="${KUBE_REPO_ROOT}/_output/release"
# Basic setup functions # Basic setup functions
# Verify that the right utilities and such are installed for building Kube. # Verify that the right utilities and such are installed for building Kube.
function verify-prereqs() { function kube::build::verify_prereqs() {
if [[ -z "$(which docker)" ]]; then if [[ -z "$(which docker)" ]]; then
echo "Can't find 'docker' in PATH, please fix and retry." >&2 echo "Can't find 'docker' in PATH, please fix and retry." >&2
echo "See https://docs.docker.com/installation/#installation for installation instructions." >&2 echo "See https://docs.docker.com/installation/#installation for installation instructions." >&2
@ -82,42 +87,13 @@ function verify-prereqs() {
fi fi
} }
# Verify things are set up for uploading to GCS
function verify-gcs-prereqs() {
if [[ -z "$(which gsutil)" || -z "$(which gcloud)" ]]; then
echo "Releasing Kubernetes requires gsutil and gcloud. Please download,"
echo "install and authorize through the Google Cloud SDK: "
echo
echo " https://developers.google.com/cloud/sdk/"
return 1
fi
FIND_ACCOUNT="gcloud auth list 2>/dev/null | grep '(active)' | awk '{ print \$2 }'"
GCLOUD_ACCOUNT=${GCLOUD_ACCOUNT-$(eval ${FIND_ACCOUNT})}
if [[ -z "${GCLOUD_ACCOUNT}" ]]; then
echo "No account authorized through gcloud. Please fix with:"
echo
echo " gcloud auth login"
return 1
fi
FIND_PROJECT="gcloud config list project | tail -n 1 | awk '{ print \$3 }'"
GCLOUD_PROJECT=${GCLOUD_PROJECT-$(eval ${FIND_PROJECT})}
if [[ -z "${GCLOUD_PROJECT}" ]]; then
echo "No account authorized through gcloud. Please fix with:"
echo
echo " gcloud config set project <project id>"
return 1
fi
}
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Building # Building
# Set up the context directory for the kube-build image and build it. # Set up the context directory for the kube-build image and build it.
function build-image() { function kube::build::build_image() {
local -r BUILD_CONTEXT_DIR="${KUBE_REPO_ROOT}/_output/images/${KUBE_BUILD_IMAGE}" local -r build_context_dir="${KUBE_REPO_ROOT}/_output/images/${KUBE_BUILD_IMAGE}"
local -r SOURCE=" local -r source=(
api api
build build
cmd cmd
@ -125,78 +101,103 @@ function build-image() {
Godeps Godeps
hack hack
LICENSE LICENSE
README.md
pkg pkg
plugin plugin
README.md
third_party third_party
" )
mkdir -p ${BUILD_CONTEXT_DIR} mkdir -p "${build_context_dir}"
tar czf ${BUILD_CONTEXT_DIR}/kube-source.tar.gz ${SOURCE} tar czf "${build_context_dir}/kube-source.tar.gz" "${source[@]}"
cp build/build-image/Dockerfile ${BUILD_CONTEXT_DIR}/Dockerfile cat >"${build_context_dir}/kube-version-defs" <<EOF
docker-build "${KUBE_BUILD_IMAGE}" "${BUILD_CONTEXT_DIR}" KUBE_LD_FLAGS="$(kube::version_ldflags)"
EOF
cp build/build-image/Dockerfile ${build_context_dir}/Dockerfile
kube::build::docker_build "${KUBE_BUILD_IMAGE}" "${build_context_dir}"
} }
# Builds the runtime image. Assumes that the appropriate binaries are already # Builds the runtime image. Assumes that the appropriate binaries are already
# built and in _output/build/. # built and in _output/build/.
function run-image() { function kube::build::run_image() {
local -r BUILD_CONTEXT_BASE="${KUBE_REPO_ROOT}/_output/images/${KUBE_RUN_IMAGE_BASE}" local -r build_context_base="${KUBE_REPO_ROOT}/_output/images/${KUBE_RUN_IMAGE_BASE}"
# First build the base image. This one brings in all of the binaries. # First build the base image. This one brings in all of the binaries.
mkdir -p "${BUILD_CONTEXT_BASE}" mkdir -p "${build_context_base}"
tar czf ${BUILD_CONTEXT_BASE}/kube-bins.tar.gz \ tar czf "${build_context_base}/kube-bins.tar.gz" \
-C "_output/build/linux/amd64" \ -C "_output/build/linux/amd64" \
${KUBE_RUN_BINARIES} "${KUBE_RUN_BINARIES[@]}"
cp -R build/run-images/base/* "${BUILD_CONTEXT_BASE}/" cp -R build/run-images/base/* "${build_context_base}/"
docker-build "${KUBE_RUN_IMAGE_BASE}" "${BUILD_CONTEXT_BASE}" kube::build::docker_build "${KUBE_RUN_IMAGE_BASE}" "${build_context_base}"
for b in $KUBE_RUN_BINARIES ; do local b
local SUB_CONTEXT_DIR="${BUILD_CONTEXT_BASE}-$b" for b in "${KUBE_RUN_BINARIES[@]}" ; do
mkdir -p "${SUB_CONTEXT_DIR}" local sub_context_dir="${build_context_base}-$b"
cp -R build/run-images/$b/* "${SUB_CONTEXT_DIR}/" mkdir -p "${sub_context_dir}"
docker-build "${KUBE_RUN_IMAGE_BASE}-$b" "${SUB_CONTEXT_DIR}" cp -R build/run-images/$b/* "${sub_context_dir}/"
kube::build::docker_build "${KUBE_RUN_IMAGE_BASE}-$b" "${sub_context_dir}"
done done
} }
function kube::build::clean_images() {
kube::build::clean_image "${KUBE_BUILD_IMAGE}"
kube::build::clean_image "${KUBE_RUN_IMAGE_BASE}"
local b
for b in "${KUBE_RUN_BINARIES[@]}" ; do
kube::build::clean_image "${KUBE_RUN_IMAGE_BASE}-${b}"
done
echo "+++ Cleaning all other untagged docker images"
docker rmi $(docker images | awk '/^<none>/ {print $3}') 2> /dev/null || true
}
# Build a docker image from a Dockerfile. # Build a docker image from a Dockerfile.
# $1 is the name of the image to build # $1 is the name of the image to build
# $2 is the location of the "context" directory, with the Dockerfile at the root. # $2 is the location of the "context" directory, with the Dockerfile at the root.
function docker-build() { function kube::build::docker_build() {
local -r IMAGE=$1 local -r image=$1
local -r CONTEXT_DIR=$2 local -r context_dir=$2
local -r BUILD_CMD="docker build -t ${IMAGE} ${CONTEXT_DIR}" local -r build_cmd="docker build -t ${image} ${context_dir}"
echo "+++ Building Docker image ${IMAGE}. This can take a while." echo "+++ Building Docker image ${image}. This can take a while."
set +e # We are handling the error here manually set +e # We are handling the error here manually
local -r DOCKER_OUTPUT="$(${BUILD_CMD} 2>&1)" local -r docker_output="$(${build_cmd} 2>&1)"
if [ $? -ne 0 ]; then if [ $? -ne 0 ]; then
set -e set -e
echo "+++ Docker build command failed for ${IMAGE}" >&2 echo "+++ Docker build command failed for ${image}" >&2
echo >&2 echo >&2
echo "${DOCKER_OUTPUT}" >&2 echo "${docker_output}" >&2
echo >&2 echo >&2
echo "To retry manually, run:" >&2 echo "To retry manually, run:" >&2
echo >&2 echo >&2
echo " ${DOCKER_BUILD_CMD}" >&2 echo " ${build_cmd}" >&2
echo >&2 echo >&2
return 1 return 1
fi fi
set -e set -e
} }
function kube::build::clean_image() {
local -r image=$1
echo "+++ Deleting docker image ${image}"
docker rmi ${image} 2> /dev/null || true
}
# Run a command in the kube-build image. This assumes that the image has # Run a command in the kube-build image. This assumes that the image has
# already been built. This will sync out all output data from the build. # already been built. This will sync out all output data from the build.
function run-build-command() { function kube::build::run_build_command() {
[[ -n "$@" ]] || { echo "Invalid input." >&2; return 4; } [[ -n "$@" ]] || { echo "Invalid input." >&2; return 4; }
local -r DOCKER="docker run --rm --name=${DOCKER_CONTAINER_NAME} -it ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}" local -r docker="docker run --rm --name=${DOCKER_CONTAINER_NAME} -it ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}"
docker rm ${DOCKER_CONTAINER_NAME} >/dev/null 2>&1 || true docker rm ${DOCKER_CONTAINER_NAME} >/dev/null 2>&1 || true
${DOCKER} "$@" ${docker} "$@"
} }
# If the Docker server is remote, copy the results back out. # If the Docker server is remote, copy the results back out.
function copy-output() { function kube::build::copy_output() {
if [[ "$OSTYPE" == "darwin"* ]]; then if [[ "$OSTYPE" == "darwin"* ]]; then
# When we are on the Mac with boot2docker we need to copy the results back # When we are on the Mac with boot2docker we need to copy the results back
# out. Ideally we would leave the container around and use 'docker cp' to # out. Ideally we would leave the container around and use 'docker cp' to
@ -207,7 +208,7 @@ function copy-output() {
# The easiest thing I (jbeda) could figure out was to launch another # The easiest thing I (jbeda) could figure out was to launch another
# container pointed at the same volume, tar the output directory and ship # container pointed at the same volume, tar the output directory and ship
# that tar over stdou. # that tar over stdou.
local DOCKER="docker run -a stdout --rm --name=${DOCKER_CONTAINER_NAME} ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}" local -r docker="docker run -a stdout --rm --name=${DOCKER_CONTAINER_NAME} ${DOCKER_MOUNT} ${KUBE_BUILD_IMAGE}"
# Kill any leftover container # Kill any leftover container
docker rm ${DOCKER_CONTAINER_NAME} >/dev/null 2>&1 || true docker rm ${DOCKER_CONTAINER_NAME} >/dev/null 2>&1 || true
@ -215,7 +216,7 @@ function copy-output() {
echo "+++ Syncing back _output directory from boot2docker VM" echo "+++ Syncing back _output directory from boot2docker VM"
mkdir -p "${LOCAL_OUTPUT_DIR}" mkdir -p "${LOCAL_OUTPUT_DIR}"
rm -rf "${LOCAL_OUTPUT_DIR}/*" rm -rf "${LOCAL_OUTPUT_DIR}/*"
${DOCKER} sh -c "tar c -C ${REMOTE_OUTPUT_DIR} ." \ ${docker} sh -c "tar c -C ${REMOTE_OUTPUT_DIR} ." \
| tar xv -C "${LOCAL_OUTPUT_DIR}" | tar xv -C "${LOCAL_OUTPUT_DIR}"
# I (jbeda) also tried getting rsync working using 'docker run' as the # I (jbeda) also tried getting rsync working using 'docker run' as the
@ -229,17 +230,82 @@ function copy-output() {
} }
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
# Release # Build final release artifacts
# Package up all of the cross compiled clients
function kube::build::package_tarballs() {
mkdir -p "${RELEASE_DIR}"
# Find all of the built kubecfg binaries
local platform
for platform in _output/build/*/* ; do
local platform_tag=${platform}
platform_tag=${platform_tag#*/*/} # remove the first two path components
platform_tag=${platform_tag/\//-} # Replace a "/" for a "-"
echo "+++ Building client package for $platform_tag"
local client_release_stage="${KUBE_REPO_ROOT}/_output/release-stage/${platform_tag}/kubernetes"
mkdir -p "${client_release_stage}"
mkdir -p "${client_release_stage}/bin"
cp "${platform}"/* "${client_release_stage}/bin"
local client_package_name="${RELEASE_DIR}/kubernetes-${platform_tag}.tar.gz"
tar czf "${client_package_name}" -C "${client_release_stage}/.." .
done
}
# ---------------------------------------------------------------------------
# GCS Release
function kube::release::gcs::release() {
kube::release::gcs::verify_prereqs
kube::release::gcs::ensure_release_bucket
kube::release::gcs::push_images
kube::release::gcs::copy_release_tarballs
}
# Verify things are set up for uploading to GCS
function kube::release::gcs::verify_prereqs() {
if [[ -z "$(which gsutil)" || -z "$(which gcloud)" ]]; then
echo "Releasing Kubernetes requires gsutil and gcloud. Please download,"
echo "install and authorize through the Google Cloud SDK: "
echo
echo " https://developers.google.com/cloud/sdk/"
return 1
fi
if [[ -z "${GCLOUD_ACCOUNT-}" ]]; then
GCLOUD_ACCOUNT=$(gcloud auth list 2>/dev/null | awk '/(active)/ { print $2 }')
fi
if [[ -z "${GCLOUD_ACCOUNT}" ]]; then
echo "No account authorized through gcloud. Please fix with:"
echo
echo " gcloud auth login"
return 1
fi
if [[ -z "${GCLOUD_PROJECT-}" ]]; then
GCLOUD_PROJECT=$(gcloud config list project | awk '{project = $3} END {print project}')
fi
if [[ -z "${GCLOUD_PROJECT}" ]]; then
echo "No account authorized through gcloud. Please fix with:"
echo
echo " gcloud config set project <project id>"
return 1
fi
}
# Create a unique bucket name for releasing Kube and make sure it exists. # Create a unique bucket name for releasing Kube and make sure it exists.
function ensure-gcs-release-bucket() { function kube::release::gcs::ensure_release_bucket() {
local project_hash
if which md5 > /dev/null 2>&1; then if which md5 > /dev/null 2>&1; then
HASH=$(md5 -q -s "$GCLOUD_PROJECT") project_hash=$(md5 -q -s "$GCLOUD_PROJECT")
else else
HASH=$(echo -n "$GCLOUD_PROJECT" | md5sum) project_hash=$(echo -n "$GCLOUD_PROJECT" | md5sum)
fi fi
HASH=${HASH:0:5} project_hash=${project_hash:0:5}
KUBE_RELEASE_BUCKET=${KUBE_RELEASE_BUCKET-kubernetes-releases-$HASH} KUBE_RELEASE_BUCKET=${KUBE_RELEASE_BUCKET-kubernetes-releases-${project_hash}}
KUBE_RELEASE_PREFIX=${KUBE_RELEASE_PREFIX-devel/} KUBE_RELEASE_PREFIX=${KUBE_RELEASE_PREFIX-devel/}
KUBE_DOCKER_REG_PREFIX=${KUBE_DOCKER_REG_PREFIX-docker-reg/} KUBE_DOCKER_REG_PREFIX=${KUBE_DOCKER_REG_PREFIX-docker-reg/}
@ -249,87 +315,66 @@ function ensure-gcs-release-bucket() {
fi fi
} }
function ensure-gcs-docker-registry() { function kube::release::gcs::ensure_docker_registry() {
local -r REG_CONTAINER_NAME="gcs-registry" local -r reg_container_name="gcs-registry"
local -r RUNNING=$(docker inspect ${REG_CONTAINER_NAME} 2>/dev/null \ local -r running=$(docker inspect ${reg_container_name} 2>/dev/null \
| build/json-extractor.py 0.State.Running 2>/dev/null) | build/json-extractor.py 0.State.Running 2>/dev/null)
[[ "$RUNNING" != "true" ]] || return 0 [[ "$running" != "true" ]] || return 0
# Grovel around and find the OAuth token in the gcloud config # Grovel around and find the OAuth token in the gcloud config
local -r BOTO=~/.config/gcloud/legacy_credentials/${GCLOUD_ACCOUNT}/.boto local -r boto=~/.config/gcloud/legacy_credentials/${GCLOUD_ACCOUNT}/.boto
local -r REFRESH_TOKEN=$(grep 'gs_oauth2_refresh_token =' $BOTO | awk '{ print $3 }') local -r refresh_token=$(grep 'gs_oauth2_refresh_token =' $boto | awk '{ print $3 }')
if [[ -z $REFRESH_TOKEN ]]; then if [[ -z "$refresh_token" ]]; then
echo "Couldn't find OAuth 2 refresh token in ${BOTO}" >&2 echo "Couldn't find OAuth 2 refresh token in ${boto}" >&2
return 1 return 1
fi fi
# If we have an old one sitting around, remove it # If we have an old one sitting around, remove it
docker rm ${REG_CONTAINER_NAME} >/dev/null 2>&1 || true docker rm ${reg_container_name} >/dev/null 2>&1 || true
echo "+++ Starting GCS backed Docker registry" echo "+++ Starting GCS backed Docker registry"
local DOCKER="docker run -d --name=${REG_CONTAINER_NAME} " local docker="docker run -d --name=${reg_container_name} "
DOCKER+="-e GCS_BUCKET=${KUBE_RELEASE_BUCKET} " docker+="-e GCS_BUCKET=${KUBE_RELEASE_BUCKET} "
DOCKER+="-e STORAGE_PATH=${KUBE_DOCKER_REG_PREFIX} " docker+="-e STORAGE_PATH=${KUBE_DOCKER_REG_PREFIX} "
DOCKER+="-e GCP_OAUTH2_REFRESH_TOKEN=${REFRESH_TOKEN} " docker+="-e GCP_OAUTH2_REFRESH_TOKEN=${refresh_token} "
DOCKER+="-p 127.0.0.1:5000:5000 " docker+="-p 127.0.0.1:5000:5000 "
DOCKER+="google/docker-registry" docker+="google/docker-registry"
${DOCKER} ${docker}
# Give it time to spin up before we start throwing stuff at it # Give it time to spin up before we start throwing stuff at it
sleep 5 sleep 5
} }
function push-images-to-gcs() { function kube::release::gcs::push_images() {
ensure-gcs-docker-registry kube::release::gcs::ensure_docker_registry
# Tag each of our run binaries with the right registry and push # Tag each of our run binaries with the right registry and push
for b in ${KUBE_RUN_BINARIES} ; do local b image_name
echo "+++ Tagging and pushing ${KUBE_RUN_IMAGE_BASE}-$b to GCS bucket ${KUBE_RELEASE_BUCKET}" for b in "${KUBE_RUN_BINARIES[@]}" ; do
docker tag "${KUBE_RUN_IMAGE_BASE}-$b" "localhost:5000/${KUBE_RUN_IMAGE_BASE}-$b" image_name="${KUBE_RUN_IMAGE_BASE}-${b}"
docker push "localhost:5000/${KUBE_RUN_IMAGE_BASE}-$b" echo "+++ Tagging and pushing ${image_name} to GCS bucket ${KUBE_RELEASE_BUCKET}"
docker rmi "localhost:5000/${KUBE_RUN_IMAGE_BASE}-$b" docker tag "${KUBE_RUN_IMAGE_BASE}-$b" "localhost:5000/${image_name}"
docker push "localhost:5000/${image_name}"
docker rmi "localhost:5000/${image_name}"
done done
} }
# Package up all of the cross compiled clients function kube::release::gcs::copy_release_tarballs() {
function package-tarballs() {
mkdir -p "${RELEASE_DIR}"
# Find all of the built kubecfg binaries
for platform in _output/build/*/* ; do
echo $platform
local PLATFORM_TAG=$(echo $platform | awk -F / '{ printf "%s-%s", $3, $4 }')
echo "+++ Building client package for $PLATFORM_TAG"
local CLIENT_RELEASE_STAGE="${KUBE_REPO_ROOT}/_output/release-stage/${PLATFORM_TAG}/kubernetes"
mkdir -p "${CLIENT_RELEASE_STAGE}"
mkdir -p "${CLIENT_RELEASE_STAGE}/bin"
cp $platform/* "${CLIENT_RELEASE_STAGE}/bin"
local CLIENT_PACKAGE_NAME="${RELEASE_DIR}/kubernetes-${PLATFORM_TAG}.tar.gz"
tar czf ${CLIENT_PACKAGE_NAME} \
-C "${CLIENT_RELEASE_STAGE}/.." \
.
done
}
function copy-release-to-gcs() {
# TODO: This isn't atomic. There will be points in time where there will be # TODO: This isn't atomic. There will be points in time where there will be
# no active release. Also, if something fails, the release could be half- # no active release. Also, if something fails, the release could be half-
# copied. The real way to do this would perhaps to have some sort of release # copied. The real way to do this would perhaps to have some sort of release
# version so that we are never overwriting a destination. # version so that we are never overwriting a destination.
local -r GCS_DESTINATION="gs://${KUBE_RELEASE_BUCKET}/${KUBE_RELEASE_PREFIX}" local -r gcs_destination="gs://${KUBE_RELEASE_BUCKET}/${KUBE_RELEASE_PREFIX}"
echo "+++ Copying client tarballs to ${GCS_DESTINATION}" echo "+++ Copying client tarballs to ${gcs_destination}"
# First delete all objects at the destination # First delete all objects at the destination
gsutil -q rm -f -R "${GCS_DESTINATION}" >/dev/null 2>&1 || true gsutil -q rm -f -R "${gcs_destination}" >/dev/null 2>&1 || true
# Now upload everything in release directory # Now upload everything in release directory
gsutil -m cp -r "${RELEASE_DIR}" "${GCS_DESTINATION}" >/dev/null 2>&1 gsutil -m cp -r "${RELEASE_DIR}" "${gcs_destination}" >/dev/null 2>&1
} }

View File

@ -23,5 +23,5 @@ set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
verify-prereqs kube::build::verify_prereqs
copy-output kube::build::copy_output

View File

@ -23,6 +23,6 @@ set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
verify-prereqs kube::build::verify_prereqs
build-image kube::build::build_image
run-build-command build/build-image/make-binaries.sh "$@" kube::build::run_build_command build/build-image/make-binaries.sh "$@"

View File

@ -25,5 +25,5 @@ set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
verify-prereqs kube::build::verify_prereqs
build-image kube::build::build_image

View File

@ -20,6 +20,10 @@ set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
verify-prereqs kube::build::verify_prereqs
build-image kube::build::build_image
run-build-command rm -rf _output/build/*
echo "+++ Cleaning out _output/build/*"
kube::build::run_build_command rm -rf _output/build/*
kube::build::clean_images

View File

@ -23,6 +23,6 @@ set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
verify-prereqs kube::build::verify_prereqs
build-image kube::build::build_image
run-build-command build/build-image/make-cross.sh kube::build::run_build_command build/build-image/make-cross.sh

View File

@ -23,8 +23,8 @@ set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
verify-prereqs kube::build::verify_prereqs
build-image kube::build::build_image
run-build-command build/build-image/make-binaries.sh "$@" kube::build::run_build_command build/build-image/make-binaries.sh "$@"
copy-output kube::build::copy_output
run-image kube::build::run_image

View File

@ -8,7 +8,7 @@ containers:
- name: registry - name: registry
image: google/docker-registry image: google/docker-registry
ports: ports:
- name: registry - name: registry
hostPort: 5000 hostPort: 5000
containerPort: 5000 containerPort: 5000
env: env:
@ -44,6 +44,6 @@ containers:
value: http://127.0.0.1:4001 value: http://127.0.0.1:4001
- key: API_SERVER - key: API_SERVER
value: 127.0.0.1:8090 value: 127.0.0.1:8090
volumes: volumes:
- name: etcddata - name: etcddata

View File

@ -22,16 +22,13 @@ set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
verify-prereqs kube::build::verify_prereqs
verify-gcs-prereqs kube::build::build_image
ensure-gcs-release-bucket kube::build::run_build_command build/build-image/make-binaries.sh
build-image kube::build::run_build_command build/build-image/make-cross.sh
run-build-command build/build-image/make-binaries.sh kube::build::run_build_command build/build-image/run-tests.sh
run-build-command build/build-image/make-cross.sh kube::build::run_build_command build/build-image/run-integration.sh
run-build-command build/build-image/run-tests.sh kube::build::copy_output
run-build-command build/build-image/run-integration.sh kube::build::run_image
copy-output kube::build::package_tarballs
run-image kube::release::gcs::release
package-tarballs
push-images-to-gcs
copy-release-to-gcs

View File

@ -14,10 +14,10 @@
# This file creates a minimal container for running Kubernetes binaries # This file creates a minimal container for running Kubernetes binaries
FROM google/debian:wheezy FROM google/debian:wheezy
MAINTAINER Joe Beda <jbeda@google.com> MAINTAINER Joe Beda <jbeda@google.com>
WORKDIR /kubernetes WORKDIR /kubernetes
# Upload Kubernetes # Upload Kubernetes server binaries
ADD kube-bins.tar.gz /kubernetes ADD kube-bins.tar.gz /kubernetes

View File

@ -0,0 +1,24 @@
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file creates a minimal container for running Kubernetes binaries
FROM kubernetes
MAINTAINER Joe Beda <jbeda@google.com>
ENV API_SERVER 127.0.0.1:8080
ADD . /kubernetes
CMD ["/kubernetes/run.sh"]

View File

@ -0,0 +1,17 @@
#! /bin/bash
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
./scheduler -master="${API_SERVER}"

View File

@ -20,7 +20,7 @@ set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
verify-prereqs kube::build::verify_prereqs
build-image kube::build::build_image
run-build-command build/build-image/make-binaries.sh "integration" kube::build::run_build_command build/build-image/make-binaries.sh "./cmd/integration"
run-build-command build/build-image/run-integration.sh kube::build::run_build_command build/build-image/run-integration.sh

View File

@ -20,6 +20,6 @@ set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
verify-prereqs kube::build::verify_prereqs
build-image kube::build::build_image
run-build-command build/build-image/run-tests.sh "$@" kube::build::run_build_command build/build-image/run-tests.sh "$@"

View File

@ -22,6 +22,6 @@ set -e
source $(dirname $0)/common.sh source $(dirname $0)/common.sh
verify-prereqs kube::build::verify_prereqs
build-image kube::build::build_image
run-build-command bash kube::build::run_build_command bash