Move everything out of src and reorganize scripts.

Fixed up some scripts to be more robust.  Changed the e2e test setup to use g1-small instances.  Fixed up documentation to reflect the new script locations.  Disabled the "curl | bash" cluster launch as it hasn't been well tested and doesn't include the cloudcfg tool yet.
This commit is contained in:
Joe Beda 2014-06-06 21:38:37 -07:00
parent 07d298755d
commit 894a7e3282
54 changed files with 115 additions and 94 deletions

7
.gitignore vendored
View File

@ -7,11 +7,8 @@
.settings/** .settings/**
# This is where the result of the go build goes # This is where the result of the go build goes
/target/** /output/**
/target /output
# This is where we stage releases
/release/**
# Emacs save files # Emacs save files
*~ *~

View File

@ -14,32 +14,32 @@ Kubernetes is an open source reference implementation of container cluster manag
### Setup ### Setup
``` ```
cd kubernetes cd kubernetes
./src/scripts/dev-build-and-up.sh hack/dev-build-and-up.sh
``` ```
### Running a container (simple version) ### Running a container (simple version)
``` ```
cd kubernetes cd kubernetes
./src/scripts/build-go.sh hack/build-go.sh
./src/scripts/cloudcfg.sh -p 8080:80 run dockerfile/nginx 2 myNginx cluster/cloudcfg.sh -p 8080:80 run dockerfile/nginx 2 myNginx
``` ```
This will spin up two containers running Nginx mapping port 80 to 8080. This will spin up two containers running Nginx mapping port 80 to 8080.
To stop the container: To stop the container:
``` ```
./src/scripts/cloudcfg.sh stop myNginx cluster/cloudcfg.sh stop myNginx
``` ```
To delete the container: To delete the container:
``` ```
./src/scripts/cloudcfg.sh rm myNginx cluster/cloudcfg.sh rm myNginx
``` ```
### Running a container (more complete version) ### Running a container (more complete version)
``` ```
cd kubernetes cd kubernetes
./src/scripts/cloudcfg.sh -c examples/task.json create /tasks cluster/cloudcfg.sh -c examples/task.json create /tasks
``` ```
Where task.json contains something like: Where task.json contains something like:
@ -59,12 +59,12 @@ Where task.json contains something like:
} }
``` ```
Look in the ```examples/``` for more examples Look in the `examples/` for more examples
### Tearing down the cluster ### Tearing down the cluster
``` ```
cd kubernetes cd kubernetes
./src/scripts/kube-down.sh cluster/kube-down.sh
``` ```
## Development ## Development
@ -82,7 +82,7 @@ ln -s "../../hooks/commit-msg" .git/hooks/commit-msg
### Unit tests ### Unit tests
``` ```
cd kubernetes cd kubernetes
./src/scripts/test-go.sh hack/test-go.sh
``` ```
### Coverage ### Coverage
@ -104,7 +104,7 @@ sudo ln -s "$REPO_ROOT/target/bin/etcd" /usr/bin/etcd
``` ```
cd kubernetes cd kubernetes
./src/scripts/integration-test.sh hack/integration-test.sh
``` ```
### Keeping your development fork in sync ### Keeping your development fork in sync

View File

@ -16,9 +16,14 @@
#!/bin/bash #!/bin/bash
. $(dirname $0)/util.sh source $(dirname $0)/util.sh
CLOUDCFG=$(dirname $0)/../output/go/cloudcfg
if [ ! -x $CLOUDCFG ]; then
echo "Could not find cloudcfg binary. Run hack/build-go.sh to build it."
exit 1
fi
detect-master detect-master
$(dirname $0)/../../target/cloudcfg -h https://${KUBE_MASTER_IP} $@ $CLOUDCFG -h https://${KUBE_MASTER_IP} $@

View File

@ -14,8 +14,8 @@
# TODO(jbeda): Provide a way to override project # TODO(jbeda): Provide a way to override project
ZONE=us-central1-b ZONE=us-central1-b
MASTER_SIZE=f1-micro MASTER_SIZE=g1-small
MINION_SIZE=f1-micro MINION_SIZE=g1-small
NUM_MINIONS=2 NUM_MINIONS=2
# gcloud will expand this to the latest supported image. # gcloud will expand this to the latest supported image.
IMAGE=debian-7-backports IMAGE=debian-7-backports

View File

@ -42,7 +42,7 @@ detect-master
( (
echo MASTER_RELEASE_TAR=$RELEASE_NORMALIZED/master-release.tgz echo MASTER_RELEASE_TAR=$RELEASE_NORMALIZED/master-release.tgz
cat $(dirname $0)/../templates/download-release.sh cat $(dirname $0)/templates/download-release.sh
echo "echo Executing configuration" echo "echo Executing configuration"
echo "sudo salt '*' mine.update" echo "sudo salt '*' mine.update"
echo "sudo salt --force-color '*' state.highstate" echo "sudo salt --force-color '*' state.highstate"

View File

@ -47,7 +47,7 @@ trap "rm -rf ${KUBE_TEMP}" EXIT
get-password get-password
echo "Generating password: $user:$passwd" echo "Generating password: $user:$passwd"
htpasswd -b -c /tmp/htpasswd $user $passwd htpasswd -b -c ${KUBE_TEMP}/htpasswd $user $passwd
cat << EOF > ~/.kubernetes_auth cat << EOF > ~/.kubernetes_auth
{ {
"User": "$user", "User": "$user",
@ -55,15 +55,15 @@ cat << EOF > ~/.kubernetes_auth
} }
EOF EOF
chmod 0600 ~/.kubernetes_auth chmod 0600 ~/.kubernetes_auth
HTPASSWD=$(cat /tmp/htpasswd) HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
( (
echo "#! /bin/bash" echo "#! /bin/bash"
echo "MASTER_NAME=${MASTER_NAME}" echo "MASTER_NAME=${MASTER_NAME}"
echo "MASTER_RELEASE_TAR=${RELEASE_NORMALIZED}/master-release.tgz" echo "MASTER_RELEASE_TAR=${RELEASE_NORMALIZED}/master-release.tgz"
echo "MASTER_HTPASSWD='${HTPASSWD}'" echo "MASTER_HTPASSWD='${HTPASSWD}'"
cat $(dirname $0)/../templates/download-release.sh cat $(dirname $0)/templates/download-release.sh
cat $(dirname $0)/../templates/salt-master.sh cat $(dirname $0)/templates/salt-master.sh
) > ${KUBE_TEMP}/master-start.sh ) > ${KUBE_TEMP}/master-start.sh
echo "Starting VMs and configuring firewalls" echo "Starting VMs and configuring firewalls"
@ -86,7 +86,7 @@ for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
echo "#! /bin/bash" echo "#! /bin/bash"
echo "MASTER_NAME=${MASTER_NAME}" echo "MASTER_NAME=${MASTER_NAME}"
echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}" echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}"
cat $(dirname $0)/../templates/salt-minion.sh cat $(dirname $0)/templates/salt-minion.sh
) > ${KUBE_TEMP}/minion-start-${i}.sh ) > ${KUBE_TEMP}/minion-start-${i}.sh
gcloud compute instances create ${MINION_NAMES[$i]} \ gcloud compute instances create ${MINION_NAMES[$i]} \
@ -132,7 +132,6 @@ done
echo echo
echo "Kubernetes cluster is running. Access the master at:" echo "Kubernetes cluster is running. Access the master at:"
echo echo
echo " https://${user}:${passwd}@${KUBE_MASTER_IP}" echo " https://${user}:${passwd}@${KUBE_MASTER_IP}"
echo echo

View File

@ -10,7 +10,7 @@ This example assumes that you have forked the repository and turned up a Kuberne
### Step One: Turn up the redis master. ### Step One: Turn up the redis master.
Create a file named redis-master.json, this file is describes a single task, which runs a redis key-value server in a container. Create a file named `redis-master.json`, this file is describes a single task, which runs a redis key-value server in a container.
```javascript ```javascript
{ {
@ -22,7 +22,7 @@ Create a file named redis-master.json, this file is describes a single task, whi
"image": "dockerfile/redis", "image": "dockerfile/redis",
"ports": [{ "ports": [{
"containerPort": 6379, "containerPort": 6379,
"hostPort": 6379 "hostPort": 6379
}] }]
}] }]
} }
@ -33,16 +33,16 @@ Create a file named redis-master.json, this file is describes a single task, whi
} }
``` ```
Once you have that task file, you can create the redis task in your Kubernetes cluster using the cloudcfg cli: Once you have that task file, you can create the redis task in your Kubernetes cluster using the `cloudcfg` cli:
```shell ```shell
./src/scripts/cloudcfg.sh -c redis-master.json create /tasks cluster/cloudcfg.sh -c redis-master.json create /tasks
``` ```
Once that's up you can list the tasks in the cluster, to verify that the master is running: Once that's up you can list the tasks in the cluster, to verify that the master is running:
```shell ```shell
./src/scripts/cloudcfg.sh list /tasks cluster/cloudcfg.sh list /tasks
``` ```
You should see a single redis master task. It will also display the machine that the task is running on. If you ssh to that machine, you can run You should see a single redis master task. It will also display the machine that the task is running on. If you ssh to that machine, you can run
@ -50,10 +50,10 @@ You should see a single redis master task. It will also display the machine tha
sudo docker ps sudo docker ps
``` ```
And see the actual task. (Note that initial ```docker pull``` may take a few minutes, depending on network conditions.) And see the actual task. (Note that initial `docker pull` may take a few minutes, depending on network conditions.)
### Step Two: Turn up the master service. ### Step Two: Turn up the master service.
A Kubernetes 'service' is named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via environment variables. Services find the containers to load balance based on task labels. The task that you created in Step One has the label "name=redis-master", so the corresponding service is defined by that label. Create a file named redis-master-service.json that contains: A Kubernetes 'service' is a named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via environment variables. Services find the containers to load balance based on task labels. The task that you created in Step One has the label `name=redis-master`, so the corresponding service is defined by that label. Create a file named `redis-master-service.json` that contains:
```javascript ```javascript
{ {
@ -65,16 +65,16 @@ A Kubernetes 'service' is named load balancer that proxies traffic to one or mor
} }
``` ```
Once you have that service description, you can create the service with the cloudcfg cli: Once you have that service description, you can create the service with the `cloudcfg` cli:
```shell ```shell
./src/scripts/cloudcfg.sh -c redis-master-service.json create /services cluster/cloudcfg.sh -c redis-master-service.json create /services
``` ```
Once created, the service proxy on each minion is configured to set up a proxy on the specified port (in this case port 10000). Once created, the service proxy on each minion is configured to set up a proxy on the specified port (in this case port 10000).
### Step Three: Turn up the replicated slave service. ### Step Three: Turn up the replicated slave service.
Although the redis master is a single task, the redis read slaves are a 'replicated' task, in Kubernetes, a replication controller is responsible for managing multiple instances of a replicated task. Create a file named redis-slave-controller.json that contains: Although the redis master is a single task, the redis read slaves are a 'replicated' task, in Kubernetes, a replication controller is responsible for managing multiple instances of a replicated task. Create a file named `redis-slave-controller.json` that contains:
```javascript ```javascript
{ {
@ -100,7 +100,7 @@ Although the redis master is a single task, the redis read slaves are a 'replica
Then you can create the service by running: Then you can create the service by running:
```shell ```shell
./src/scripts/cloudcfg.sh -c redis-slave-controller.json create /replicationControllers cluster/cloudcfg.sh -c redis-slave-controller.json create /replicationControllers
``` ```
The redis slave configures itself by looking for the Kubernetes service environment variables in the container environment. In particular, the redis slave is started with the following command: The redis slave configures itself by looking for the Kubernetes service environment variables in the container environment. In particular, the redis slave is started with the following command:
@ -112,7 +112,7 @@ redis-server --slaveof $SERVICE_HOST $REDISMASTER_SERVICE_PORT
Once that's up you can list the tasks in the cluster, to verify that the master and slaves are running: Once that's up you can list the tasks in the cluster, to verify that the master and slaves are running:
```shell ```shell
./src/scripts/cloudcfg.sh list /tasks cluster/cloudcfg.sh list /tasks
``` ```
You should see a single redis master task, and two redis slave tasks. You should see a single redis master task, and two redis slave tasks.
@ -131,17 +131,17 @@ Just like the master, we want to have a service to proxy connections to the read
} }
``` ```
This time the label query for the service is 'name=redis-slave'. This time the label query for the service is `name=redis-slave`.
Now that you have created the service specification, create it in your cluster with the cloudcfg cli: Now that you have created the service specification, create it in your cluster with the `cloudcfg` cli:
```shell ```shell
./src/scripts/cloudcfg.sh -c redis-slave-service.json create /services cluster/cloudcfg.sh -c redis-slave-service.json create /services
``` ```
### Step Five: Create the frontend service. ### Step Five: Create the frontend service.
This is a simple PHP server that is configured to talk to both the slave and master services depdending on if the request is a read or a write. It exposes a simple AJAX interface, and serves an angular based U/X. Like the redis read slaves it is a replicated service instantiated by a replication controller. Create a file named frontend-controller.json: This is a simple PHP server that is configured to talk to both the slave and master services depdending on if the request is a read or a write. It exposes a simple AJAX interface, and serves an angular based U/X. Like the redis read slaves it is a replicated service instantiated by a replication controller. Create a file named `frontend-controller.json`:
```javascript ```javascript
{ {
@ -167,13 +167,13 @@ This is a simple PHP server that is configured to talk to both the slave and mas
With this file, you can turn up your frontend with: With this file, you can turn up your frontend with:
```shell ```shell
./src/scripts/cloudcfg.sh -c frontend-controller.json create /replicationControllers cluster/cloudcfg.sh -c frontend-controller.json create /replicationControllers
``` ```
Once that's up you can list the tasks in the cluster, to verify that the master, slaves and frontends are running: Once that's up you can list the tasks in the cluster, to verify that the master, slaves and frontends are running:
```shell ```shell
./src/scripts/cloudcfg.sh list /tasks cluster/cloudcfg.sh list /tasks
``` ```
You should see a single redis master task, two redis slave and three frontend tasks. You should see a single redis master task, two redis slave and three frontend tasks.

View File

@ -15,9 +15,9 @@
# This script sets up a go workspace locally and builds all go components. # This script sets up a go workspace locally and builds all go components.
# You can 'source' this file if you want to set up GOPATH in your local shell. # You can 'source' this file if you want to set up GOPATH in your local shell.
pushd $(dirname "${BASH_SOURCE}")/../.. >/dev/null pushd $(dirname "${BASH_SOURCE}")/.. >/dev/null
KUBE_REPO_ROOT="${PWD}" KUBE_REPO_ROOT="${PWD}"
KUBE_TARGET="${KUBE_REPO_ROOT}/target" KUBE_TARGET="${KUBE_REPO_ROOT}/output/go"
popd >/dev/null popd >/dev/null
mkdir -p "${KUBE_TARGET}" mkdir -p "${KUBE_TARGET}"

View File

@ -20,4 +20,4 @@
$(dirname $0)/../release/release.sh $(dirname $0)/../release/release.sh
# Now push this out to the cluster # Now push this out to the cluster
$(dirname $0)/kube-push.sh $(dirname $0)/../cluster/kube-push.sh

View File

@ -21,4 +21,4 @@
$(dirname $0)/../release/release.sh $(dirname $0)/../release/release.sh
# Now bring a new cluster up with that release. # Now bring a new cluster up with that release.
$(dirname $0)/kube-up.sh $(dirname $0)/../cluster/kube-up.sh

View File

@ -22,23 +22,23 @@ set -e
# Use testing config # Use testing config
export KUBE_CONFIG_FILE="config-test.sh" export KUBE_CONFIG_FILE="config-test.sh"
source $(dirname $0)/util.sh source $(dirname $0)/../cluster/util.sh
# Build a release # Build a release
$(dirname $0)/../release/release.sh $(dirname $0)/../release/release.sh
# Now bring a test cluster up with that release. # Now bring a test cluster up with that release.
$(dirname $0)/kube-up.sh $(dirname $0)/../cluster/kube-up.sh
# Auto shutdown cluster when we exit # Auto shutdown cluster when we exit
function shutdown-test-cluster () { function shutdown-test-cluster () {
echo "Shutting down test cluster in background." echo "Shutting down test cluster in background."
$(dirname $0)/kube-down.sh > /dev/null & $(dirname $0)/../cluster/kube-down.sh > /dev/null &
} }
trap shutdown-test-cluster EXIT trap shutdown-test-cluster EXIT
# Launch a container # Launch a container
$(dirname $0)/cloudcfg.sh -p 8080:80 run dockerfile/nginx 2 myNginx $(dirname $0)/../cluster/cloudcfg.sh -p 8080:80 run dockerfile/nginx 2 myNginx
# Get minion IP addresses # Get minion IP addresses
detect-minions detect-minions

View File

@ -22,13 +22,16 @@ fi
# Stop right away if the build fails # Stop right away if the build fails
set -e set -e
./src/scripts/build-go.sh $(dirname $0)/build-go.sh
etcd -name test -data-dir /tmp/foo > /tmp/etcd.log & ETCD_DIR=$(mktemp -d -t kube-integration.XXXXXX)
trap "rm -rf ${ETCD_DIR}" EXIT
etcd -name test -data-dir ${ETCD_DIR} > /tmp/etcd.log &
ETCD_PID=$!
sleep 5 sleep 5
./target/integration $(dirname $0)/../output/go/integration
killall etcd kill $ETCD_PID
rm -rf /tmp/foo

View File

@ -0,0 +1,26 @@
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is a quick script that adds AllUsers as READER to a JSON file
# representing an ACL on a GCS object. This is a quick workaround for a bug in
# gsutil.
import json
import sys
acl = json.load(sys.stdin)
acl.append({
"entity": "allUsers",
"role": "READER"
})
json.dump(acl, sys.stdout)

View File

@ -25,21 +25,21 @@ set -e
source $(dirname $0)/config.sh source $(dirname $0)/config.sh
cd $(dirname $0)/../.. cd $(dirname $0)/..
# First build the release tar. This gets copied on to the master and installed # First build the release tar. This gets copied on to the master and installed
# from there. It includes the go source for the necessary servers along with # from there. It includes the go source for the necessary servers along with
# the salt configs. # the salt configs.
rm -rf release/* rm -rf output/release/*
MASTER_RELEASE_DIR=release/master-release MASTER_RELEASE_DIR=output/release/master-release
mkdir -p $MASTER_RELEASE_DIR/bin mkdir -p $MASTER_RELEASE_DIR/bin
mkdir -p $MASTER_RELEASE_DIR/src/scripts mkdir -p $MASTER_RELEASE_DIR/src/scripts
mkdir -p $MASTER_RELEASE_DIR/third_party/go mkdir -p $MASTER_RELEASE_DIR/third_party/go
echo "Building release tree" echo "Building release tree"
cp src/release/master-release-install.sh $MASTER_RELEASE_DIR/src/scripts/master-release-install.sh cp release/master-release-install.sh $MASTER_RELEASE_DIR/src/scripts/master-release-install.sh
cp -r src/saltbase $MASTER_RELEASE_DIR/src/saltbase cp -r cluster/saltbase $MASTER_RELEASE_DIR/src/saltbase
cp -r third_party $MASTER_RELEASE_DIR/third_party/go/src cp -r third_party $MASTER_RELEASE_DIR/third_party/go/src
function find_go_files() { function find_go_files() {
@ -56,41 +56,44 @@ for f in $(find_go_files); do
done done
echo "Packaging release" echo "Packaging release"
tar cz -C release -f release/master-release.tgz master-release tar cz -C output/release -f output/release/master-release.tgz master-release
echo "Building launch script" echo "Building launch script"
# Create the local install script. These are the tools to install the local # Create the local install script. These are the tools to install the local
# tools and launch a new cluster. # tools and launch a new cluster.
LOCAL_RELEASE_DIR=release/local-release LOCAL_RELEASE_DIR=output/release/local-release
mkdir -p $LOCAL_RELEASE_DIR/src mkdir -p $LOCAL_RELEASE_DIR/src/scripts
cp -r src/templates $LOCAL_RELEASE_DIR/src/templates cp -r cluster/templates $LOCAL_RELEASE_DIR/src/templates
cp -r src/scripts $LOCAL_RELEASE_DIR/src/scripts cp -r cluster/*.sh $LOCAL_RELEASE_DIR/src/scripts
tar cz -C $LOCAL_RELEASE_DIR -f release/launch-kubernetes.tgz . tar cz -C $LOCAL_RELEASE_DIR -f output/release/launch-kubernetes.tgz .
echo "#!/bin/bash" >> release/launch-kubernetes.sh echo "#!/bin/bash" >> output/release/launch-kubernetes.sh
echo "RELEASE_TAG=$RELEASE_TAG" >> release/launch-kubernetes.sh echo "RELEASE_TAG=$RELEASE_TAG" >> output/release/launch-kubernetes.sh
echo "RELEASE_PREFIX=$RELEASE_PREFIX" >> release/launch-kubernetes.sh echo "RELEASE_PREFIX=$RELEASE_PREFIX" >> output/release/launch-kubernetes.sh
echo "RELEASE_NAME=$RELEASE_NAME" >> release/launch-kubernetes.sh echo "RELEASE_NAME=$RELEASE_NAME" >> output/release/launch-kubernetes.sh
echo "RELEASE_FULL_PATH=$RELEASE_FULL_PATH" >> release/launch-kubernetes.sh echo "RELEASE_FULL_PATH=$RELEASE_FULL_PATH" >> output/release/launch-kubernetes.sh
cat src/release/launch-kubernetes-base.sh >> release/launch-kubernetes.sh cat release/launch-kubernetes-base.sh >> output/release/launch-kubernetes.sh
chmod a+x release/launch-kubernetes.sh chmod a+x output/release/launch-kubernetes.sh
# Now copy everything up to the release structure on GS # Now copy everything up to the release structure on GS
echo "Uploading to Google Storage" echo "Uploading to Google Storage"
if ! gsutil ls $RELEASE_BUCKET > /dev/null; then if ! gsutil ls $RELEASE_BUCKET > /dev/null 2>&1 ; then
echo "Creating $RELEASE_BUCKET" echo "Creating $RELEASE_BUCKET"
gsutil mb $RELEASE_BUCKET gsutil mb $RELEASE_BUCKET
fi fi
for x in master-release.tgz launch-kubernetes.tgz launch-kubernetes.sh; do for x in master-release.tgz launch-kubernetes.tgz launch-kubernetes.sh; do
gsutil -q cp release/$x $RELEASE_FULL_PATH/$x gsutil -q cp output/release/$x $RELEASE_FULL_PATH/$x
make_public_readable $RELEASE_FULL_PATH/$x make_public_readable $RELEASE_FULL_PATH/$x
done done
set_tag $RELEASE_FULL_TAG_PATH $RELEASE_FULL_PATH set_tag $RELEASE_FULL_TAG_PATH $RELEASE_FULL_PATH
echo "Release pushed ($RELEASE_PREFIX$RELEASE_NAME). Launch with:" echo "Release pushed ($RELEASE_PREFIX$RELEASE_NAME)."
echo
echo " curl -s -L ${RELEASE_FULL_PATH/gs:\/\//http://storage.googleapis.com/}/launch-kubernetes.sh | bash" # This isn't quite working right now. Need to figure out packaging the cloudcfg tool.
echo # echo " Launch with:"
# echo
# echo " curl -s -L ${RELEASE_FULL_PATH/gs:\/\//http://storage.googleapis.com/}/launch-kubernetes.sh | bash"
# echo

View File

@ -1,12 +0,0 @@
# This is a quick script that adds AllUsers as READER to a JSON file
# representing an ACL on a GCS object. This is a quick workaround for a bug in
# gsutil.
import json
import sys
acl = json.load(sys.stdin)
acl.append({
"entity": "allUsers",
"role": "READER"
})
json.dump(acl, sys.stdout)