diff --git a/.gitignore b/.gitignore index e2d7d627358..0ccb1dada9a 100755 --- a/.gitignore +++ b/.gitignore @@ -7,11 +7,8 @@ .settings/** # This is where the result of the go build goes -/target/** -/target - -# This is where we stage releases -/release/** +/output/** +/output # Emacs save files *~ diff --git a/README.md b/README.md index 43c6948e52f..a21b587a497 100644 --- a/README.md +++ b/README.md @@ -14,32 +14,32 @@ Kubernetes is an open source reference implementation of container cluster manag ### Setup ``` cd kubernetes -./src/scripts/dev-build-and-up.sh +hack/dev-build-and-up.sh ``` ### Running a container (simple version) ``` cd kubernetes -./src/scripts/build-go.sh -./src/scripts/cloudcfg.sh -p 8080:80 run dockerfile/nginx 2 myNginx +hack/build-go.sh +cluster/cloudcfg.sh -p 8080:80 run dockerfile/nginx 2 myNginx ``` This will spin up two containers running Nginx mapping port 80 to 8080. To stop the container: ``` -./src/scripts/cloudcfg.sh stop myNginx +cluster/cloudcfg.sh stop myNginx ``` To delete the container: ``` -./src/scripts/cloudcfg.sh rm myNginx +cluster/cloudcfg.sh rm myNginx ``` ### Running a container (more complete version) ``` cd kubernetes -./src/scripts/cloudcfg.sh -c examples/task.json create /tasks +cluster/cloudcfg.sh -c examples/task.json create /tasks ``` Where task.json contains something like: @@ -59,12 +59,12 @@ Where task.json contains something like: } ``` -Look in the ```examples/``` for more examples +Look in the `examples/` for more examples ### Tearing down the cluster ``` cd kubernetes -./src/scripts/kube-down.sh +cluster/kube-down.sh ``` ## Development @@ -82,7 +82,7 @@ ln -s "../../hooks/commit-msg" .git/hooks/commit-msg ### Unit tests ``` cd kubernetes -./src/scripts/test-go.sh +hack/test-go.sh ``` ### Coverage @@ -104,7 +104,7 @@ sudo ln -s "$REPO_ROOT/target/bin/etcd" /usr/bin/etcd ``` cd kubernetes -./src/scripts/integration-test.sh +hack/integration-test.sh ``` ### Keeping your development fork in sync diff --git a/src/scripts/cloudcfg.sh b/cluster/cloudcfg.sh similarity index 73% rename from src/scripts/cloudcfg.sh rename to cluster/cloudcfg.sh index 93efb79a655..4f231ac8b42 100755 --- a/src/scripts/cloudcfg.sh +++ b/cluster/cloudcfg.sh @@ -16,9 +16,14 @@ #!/bin/bash -. $(dirname $0)/util.sh +source $(dirname $0)/util.sh + +CLOUDCFG=$(dirname $0)/../output/go/cloudcfg +if [ ! -x $CLOUDCFG ]; then + echo "Could not find cloudcfg binary. Run hack/build-go.sh to build it." + exit 1 +fi detect-master -$(dirname $0)/../../target/cloudcfg -h https://${KUBE_MASTER_IP} $@ - +$CLOUDCFG -h https://${KUBE_MASTER_IP} $@ diff --git a/src/scripts/config-default.sh b/cluster/config-default.sh similarity index 100% rename from src/scripts/config-default.sh rename to cluster/config-default.sh diff --git a/src/scripts/config-test.sh b/cluster/config-test.sh similarity index 96% rename from src/scripts/config-test.sh rename to cluster/config-test.sh index 76ca5b571fa..dfaf3af06a0 100755 --- a/src/scripts/config-test.sh +++ b/cluster/config-test.sh @@ -14,8 +14,8 @@ # TODO(jbeda): Provide a way to override project ZONE=us-central1-b -MASTER_SIZE=f1-micro -MINION_SIZE=f1-micro +MASTER_SIZE=g1-small +MINION_SIZE=g1-small NUM_MINIONS=2 # gcloud will expand this to the latest supported image. IMAGE=debian-7-backports diff --git a/src/scripts/kube-down.sh b/cluster/kube-down.sh similarity index 100% rename from src/scripts/kube-down.sh rename to cluster/kube-down.sh diff --git a/src/scripts/kube-push.sh b/cluster/kube-push.sh similarity index 96% rename from src/scripts/kube-push.sh rename to cluster/kube-push.sh index b5e3b19b3b0..c2a5ae8cb2a 100755 --- a/src/scripts/kube-push.sh +++ b/cluster/kube-push.sh @@ -42,7 +42,7 @@ detect-master ( echo MASTER_RELEASE_TAR=$RELEASE_NORMALIZED/master-release.tgz - cat $(dirname $0)/../templates/download-release.sh + cat $(dirname $0)/templates/download-release.sh echo "echo Executing configuration" echo "sudo salt '*' mine.update" echo "sudo salt --force-color '*' state.highstate" diff --git a/src/scripts/kube-up.sh b/cluster/kube-up.sh similarity index 94% rename from src/scripts/kube-up.sh rename to cluster/kube-up.sh index c41193c4fe3..4f7e7fdb57b 100755 --- a/src/scripts/kube-up.sh +++ b/cluster/kube-up.sh @@ -47,7 +47,7 @@ trap "rm -rf ${KUBE_TEMP}" EXIT get-password echo "Generating password: $user:$passwd" -htpasswd -b -c /tmp/htpasswd $user $passwd +htpasswd -b -c ${KUBE_TEMP}/htpasswd $user $passwd cat << EOF > ~/.kubernetes_auth { "User": "$user", @@ -55,15 +55,15 @@ cat << EOF > ~/.kubernetes_auth } EOF chmod 0600 ~/.kubernetes_auth -HTPASSWD=$(cat /tmp/htpasswd) +HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd) ( echo "#! /bin/bash" echo "MASTER_NAME=${MASTER_NAME}" echo "MASTER_RELEASE_TAR=${RELEASE_NORMALIZED}/master-release.tgz" echo "MASTER_HTPASSWD='${HTPASSWD}'" - cat $(dirname $0)/../templates/download-release.sh - cat $(dirname $0)/../templates/salt-master.sh + cat $(dirname $0)/templates/download-release.sh + cat $(dirname $0)/templates/salt-master.sh ) > ${KUBE_TEMP}/master-start.sh echo "Starting VMs and configuring firewalls" @@ -86,7 +86,7 @@ for (( i=0; i<${#MINION_NAMES[@]}; i++)); do echo "#! /bin/bash" echo "MASTER_NAME=${MASTER_NAME}" echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}" - cat $(dirname $0)/../templates/salt-minion.sh + cat $(dirname $0)/templates/salt-minion.sh ) > ${KUBE_TEMP}/minion-start-${i}.sh gcloud compute instances create ${MINION_NAMES[$i]} \ @@ -132,7 +132,6 @@ done echo echo "Kubernetes cluster is running. Access the master at:" - echo echo " https://${user}:${passwd}@${KUBE_MASTER_IP}" echo diff --git a/src/saltbase/pillar/mine.sls b/cluster/saltbase/pillar/mine.sls similarity index 100% rename from src/saltbase/pillar/mine.sls rename to cluster/saltbase/pillar/mine.sls diff --git a/src/saltbase/pillar/top.sls b/cluster/saltbase/pillar/top.sls similarity index 100% rename from src/saltbase/pillar/top.sls rename to cluster/saltbase/pillar/top.sls diff --git a/src/saltbase/reactor/start.sls b/cluster/saltbase/reactor/start.sls similarity index 100% rename from src/saltbase/reactor/start.sls rename to cluster/saltbase/reactor/start.sls diff --git a/src/saltbase/salt/_states/container_bridge.py b/cluster/saltbase/salt/_states/container_bridge.py similarity index 100% rename from src/saltbase/salt/_states/container_bridge.py rename to cluster/saltbase/salt/_states/container_bridge.py diff --git a/src/saltbase/salt/apiserver/default b/cluster/saltbase/salt/apiserver/default similarity index 100% rename from src/saltbase/salt/apiserver/default rename to cluster/saltbase/salt/apiserver/default diff --git a/src/saltbase/salt/apiserver/init.sls b/cluster/saltbase/salt/apiserver/init.sls similarity index 100% rename from src/saltbase/salt/apiserver/init.sls rename to cluster/saltbase/salt/apiserver/init.sls diff --git a/src/saltbase/salt/apiserver/initd b/cluster/saltbase/salt/apiserver/initd similarity index 100% rename from src/saltbase/salt/apiserver/initd rename to cluster/saltbase/salt/apiserver/initd diff --git a/src/saltbase/salt/base.sls b/cluster/saltbase/salt/base.sls similarity index 100% rename from src/saltbase/salt/base.sls rename to cluster/saltbase/salt/base.sls diff --git a/src/saltbase/salt/controller-manager/default b/cluster/saltbase/salt/controller-manager/default similarity index 100% rename from src/saltbase/salt/controller-manager/default rename to cluster/saltbase/salt/controller-manager/default diff --git a/src/saltbase/salt/controller-manager/init.sls b/cluster/saltbase/salt/controller-manager/init.sls similarity index 100% rename from src/saltbase/salt/controller-manager/init.sls rename to cluster/saltbase/salt/controller-manager/init.sls diff --git a/src/saltbase/salt/controller-manager/initd b/cluster/saltbase/salt/controller-manager/initd similarity index 100% rename from src/saltbase/salt/controller-manager/initd rename to cluster/saltbase/salt/controller-manager/initd diff --git a/src/saltbase/salt/docker/docker-defaults b/cluster/saltbase/salt/docker/docker-defaults similarity index 100% rename from src/saltbase/salt/docker/docker-defaults rename to cluster/saltbase/salt/docker/docker-defaults diff --git a/src/saltbase/salt/docker/init.sls b/cluster/saltbase/salt/docker/init.sls similarity index 100% rename from src/saltbase/salt/docker/init.sls rename to cluster/saltbase/salt/docker/init.sls diff --git a/src/saltbase/salt/etcd/etcd.conf b/cluster/saltbase/salt/etcd/etcd.conf similarity index 100% rename from src/saltbase/salt/etcd/etcd.conf rename to cluster/saltbase/salt/etcd/etcd.conf diff --git a/src/saltbase/salt/etcd/init.sls b/cluster/saltbase/salt/etcd/init.sls similarity index 100% rename from src/saltbase/salt/etcd/init.sls rename to cluster/saltbase/salt/etcd/init.sls diff --git a/src/saltbase/salt/etcd/initd b/cluster/saltbase/salt/etcd/initd similarity index 100% rename from src/saltbase/salt/etcd/initd rename to cluster/saltbase/salt/etcd/initd diff --git a/src/saltbase/salt/golang.sls b/cluster/saltbase/salt/golang.sls similarity index 100% rename from src/saltbase/salt/golang.sls rename to cluster/saltbase/salt/golang.sls diff --git a/src/saltbase/salt/kube-proxy/default b/cluster/saltbase/salt/kube-proxy/default similarity index 100% rename from src/saltbase/salt/kube-proxy/default rename to cluster/saltbase/salt/kube-proxy/default diff --git a/src/saltbase/salt/kube-proxy/init.sls b/cluster/saltbase/salt/kube-proxy/init.sls similarity index 100% rename from src/saltbase/salt/kube-proxy/init.sls rename to cluster/saltbase/salt/kube-proxy/init.sls diff --git a/src/saltbase/salt/kube-proxy/initd b/cluster/saltbase/salt/kube-proxy/initd similarity index 100% rename from src/saltbase/salt/kube-proxy/initd rename to cluster/saltbase/salt/kube-proxy/initd diff --git a/src/saltbase/salt/kubelet/default b/cluster/saltbase/salt/kubelet/default similarity index 100% rename from src/saltbase/salt/kubelet/default rename to cluster/saltbase/salt/kubelet/default diff --git a/src/saltbase/salt/kubelet/init.sls b/cluster/saltbase/salt/kubelet/init.sls similarity index 100% rename from src/saltbase/salt/kubelet/init.sls rename to cluster/saltbase/salt/kubelet/init.sls diff --git a/src/saltbase/salt/kubelet/initd b/cluster/saltbase/salt/kubelet/initd similarity index 100% rename from src/saltbase/salt/kubelet/initd rename to cluster/saltbase/salt/kubelet/initd diff --git a/src/saltbase/salt/nginx/init.sls b/cluster/saltbase/salt/nginx/init.sls similarity index 100% rename from src/saltbase/salt/nginx/init.sls rename to cluster/saltbase/salt/nginx/init.sls diff --git a/src/saltbase/salt/nginx/kubernetes-site b/cluster/saltbase/salt/nginx/kubernetes-site similarity index 100% rename from src/saltbase/salt/nginx/kubernetes-site rename to cluster/saltbase/salt/nginx/kubernetes-site diff --git a/src/saltbase/salt/nginx/make-cert.sh b/cluster/saltbase/salt/nginx/make-cert.sh similarity index 100% rename from src/saltbase/salt/nginx/make-cert.sh rename to cluster/saltbase/salt/nginx/make-cert.sh diff --git a/src/saltbase/salt/nginx/nginx.conf b/cluster/saltbase/salt/nginx/nginx.conf similarity index 100% rename from src/saltbase/salt/nginx/nginx.conf rename to cluster/saltbase/salt/nginx/nginx.conf diff --git a/src/saltbase/salt/top.sls b/cluster/saltbase/salt/top.sls similarity index 100% rename from src/saltbase/salt/top.sls rename to cluster/saltbase/salt/top.sls diff --git a/src/templates/download-release.sh b/cluster/templates/download-release.sh similarity index 100% rename from src/templates/download-release.sh rename to cluster/templates/download-release.sh diff --git a/src/templates/salt-master.sh b/cluster/templates/salt-master.sh similarity index 100% rename from src/templates/salt-master.sh rename to cluster/templates/salt-master.sh diff --git a/src/templates/salt-minion.sh b/cluster/templates/salt-minion.sh similarity index 100% rename from src/templates/salt-minion.sh rename to cluster/templates/salt-minion.sh diff --git a/src/scripts/util.sh b/cluster/util.sh similarity index 100% rename from src/scripts/util.sh rename to cluster/util.sh diff --git a/examples/guestbook/guestbook.md b/examples/guestbook/guestbook.md index 12265de556e..9c678dc5b42 100644 --- a/examples/guestbook/guestbook.md +++ b/examples/guestbook/guestbook.md @@ -10,7 +10,7 @@ This example assumes that you have forked the repository and turned up a Kuberne ### Step One: Turn up the redis master. -Create a file named redis-master.json, this file is describes a single task, which runs a redis key-value server in a container. +Create a file named `redis-master.json`, this file is describes a single task, which runs a redis key-value server in a container. ```javascript { @@ -22,7 +22,7 @@ Create a file named redis-master.json, this file is describes a single task, whi "image": "dockerfile/redis", "ports": [{ "containerPort": 6379, - "hostPort": 6379 + "hostPort": 6379 }] }] } @@ -33,16 +33,16 @@ Create a file named redis-master.json, this file is describes a single task, whi } ``` -Once you have that task file, you can create the redis task in your Kubernetes cluster using the cloudcfg cli: +Once you have that task file, you can create the redis task in your Kubernetes cluster using the `cloudcfg` cli: ```shell -./src/scripts/cloudcfg.sh -c redis-master.json create /tasks +cluster/cloudcfg.sh -c redis-master.json create /tasks ``` Once that's up you can list the tasks in the cluster, to verify that the master is running: ```shell -./src/scripts/cloudcfg.sh list /tasks +cluster/cloudcfg.sh list /tasks ``` You should see a single redis master task. It will also display the machine that the task is running on. If you ssh to that machine, you can run @@ -50,10 +50,10 @@ You should see a single redis master task. It will also display the machine tha sudo docker ps ``` -And see the actual task. (Note that initial ```docker pull``` may take a few minutes, depending on network conditions.) +And see the actual task. (Note that initial `docker pull` may take a few minutes, depending on network conditions.) ### Step Two: Turn up the master service. -A Kubernetes 'service' is named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via environment variables. Services find the containers to load balance based on task labels. The task that you created in Step One has the label "name=redis-master", so the corresponding service is defined by that label. Create a file named redis-master-service.json that contains: +A Kubernetes 'service' is a named load balancer that proxies traffic to one or more containers. The services in a Kubernetes cluster are discoverable inside other containers via environment variables. Services find the containers to load balance based on task labels. The task that you created in Step One has the label `name=redis-master`, so the corresponding service is defined by that label. Create a file named `redis-master-service.json` that contains: ```javascript { @@ -65,16 +65,16 @@ A Kubernetes 'service' is named load balancer that proxies traffic to one or mor } ``` -Once you have that service description, you can create the service with the cloudcfg cli: +Once you have that service description, you can create the service with the `cloudcfg` cli: ```shell -./src/scripts/cloudcfg.sh -c redis-master-service.json create /services +cluster/cloudcfg.sh -c redis-master-service.json create /services ``` Once created, the service proxy on each minion is configured to set up a proxy on the specified port (in this case port 10000). ### Step Three: Turn up the replicated slave service. -Although the redis master is a single task, the redis read slaves are a 'replicated' task, in Kubernetes, a replication controller is responsible for managing multiple instances of a replicated task. Create a file named redis-slave-controller.json that contains: +Although the redis master is a single task, the redis read slaves are a 'replicated' task, in Kubernetes, a replication controller is responsible for managing multiple instances of a replicated task. Create a file named `redis-slave-controller.json` that contains: ```javascript { @@ -100,7 +100,7 @@ Although the redis master is a single task, the redis read slaves are a 'replica Then you can create the service by running: ```shell -./src/scripts/cloudcfg.sh -c redis-slave-controller.json create /replicationControllers +cluster/cloudcfg.sh -c redis-slave-controller.json create /replicationControllers ``` The redis slave configures itself by looking for the Kubernetes service environment variables in the container environment. In particular, the redis slave is started with the following command: @@ -112,7 +112,7 @@ redis-server --slaveof $SERVICE_HOST $REDISMASTER_SERVICE_PORT Once that's up you can list the tasks in the cluster, to verify that the master and slaves are running: ```shell -./src/scripts/cloudcfg.sh list /tasks +cluster/cloudcfg.sh list /tasks ``` You should see a single redis master task, and two redis slave tasks. @@ -131,17 +131,17 @@ Just like the master, we want to have a service to proxy connections to the read } ``` -This time the label query for the service is 'name=redis-slave'. +This time the label query for the service is `name=redis-slave`. -Now that you have created the service specification, create it in your cluster with the cloudcfg cli: +Now that you have created the service specification, create it in your cluster with the `cloudcfg` cli: ```shell -./src/scripts/cloudcfg.sh -c redis-slave-service.json create /services +cluster/cloudcfg.sh -c redis-slave-service.json create /services ``` ### Step Five: Create the frontend service. -This is a simple PHP server that is configured to talk to both the slave and master services depdending on if the request is a read or a write. It exposes a simple AJAX interface, and serves an angular based U/X. Like the redis read slaves it is a replicated service instantiated by a replication controller. Create a file named frontend-controller.json: +This is a simple PHP server that is configured to talk to both the slave and master services depdending on if the request is a read or a write. It exposes a simple AJAX interface, and serves an angular based U/X. Like the redis read slaves it is a replicated service instantiated by a replication controller. Create a file named `frontend-controller.json`: ```javascript { @@ -167,13 +167,13 @@ This is a simple PHP server that is configured to talk to both the slave and mas With this file, you can turn up your frontend with: ```shell -./src/scripts/cloudcfg.sh -c frontend-controller.json create /replicationControllers +cluster/cloudcfg.sh -c frontend-controller.json create /replicationControllers ``` Once that's up you can list the tasks in the cluster, to verify that the master, slaves and frontends are running: ```shell -./src/scripts/cloudcfg.sh list /tasks +cluster/cloudcfg.sh list /tasks ``` You should see a single redis master task, two redis slave and three frontend tasks. diff --git a/src/scripts/build-go.sh b/hack/build-go.sh similarity index 100% rename from src/scripts/build-go.sh rename to hack/build-go.sh diff --git a/src/scripts/config-go.sh b/hack/config-go.sh similarity index 94% rename from src/scripts/config-go.sh rename to hack/config-go.sh index 1f9d6e3c410..dc79f0ea292 100755 --- a/src/scripts/config-go.sh +++ b/hack/config-go.sh @@ -15,9 +15,9 @@ # This script sets up a go workspace locally and builds all go components. # You can 'source' this file if you want to set up GOPATH in your local shell. -pushd $(dirname "${BASH_SOURCE}")/../.. >/dev/null +pushd $(dirname "${BASH_SOURCE}")/.. >/dev/null KUBE_REPO_ROOT="${PWD}" -KUBE_TARGET="${KUBE_REPO_ROOT}/target" +KUBE_TARGET="${KUBE_REPO_ROOT}/output/go" popd >/dev/null mkdir -p "${KUBE_TARGET}" diff --git a/src/scripts/dev-build-and-push.sh b/hack/dev-build-and-push.sh similarity index 95% rename from src/scripts/dev-build-and-push.sh rename to hack/dev-build-and-push.sh index 98352b3c346..a5a83ae7ad1 100755 --- a/src/scripts/dev-build-and-push.sh +++ b/hack/dev-build-and-push.sh @@ -20,4 +20,4 @@ $(dirname $0)/../release/release.sh # Now push this out to the cluster -$(dirname $0)/kube-push.sh +$(dirname $0)/../cluster/kube-push.sh diff --git a/src/scripts/dev-build-and-up.sh b/hack/dev-build-and-up.sh similarity index 95% rename from src/scripts/dev-build-and-up.sh rename to hack/dev-build-and-up.sh index 12c25242916..c346a5dfe94 100755 --- a/src/scripts/dev-build-and-up.sh +++ b/hack/dev-build-and-up.sh @@ -21,4 +21,4 @@ $(dirname $0)/../release/release.sh # Now bring a new cluster up with that release. -$(dirname $0)/kube-up.sh +$(dirname $0)/../cluster/kube-up.sh diff --git a/src/scripts/e2e-test.sh b/hack/e2e-test.sh similarity index 87% rename from src/scripts/e2e-test.sh rename to hack/e2e-test.sh index 457b763e23f..ff59d8791d7 100755 --- a/src/scripts/e2e-test.sh +++ b/hack/e2e-test.sh @@ -22,23 +22,23 @@ set -e # Use testing config export KUBE_CONFIG_FILE="config-test.sh" -source $(dirname $0)/util.sh +source $(dirname $0)/../cluster/util.sh # Build a release $(dirname $0)/../release/release.sh # Now bring a test cluster up with that release. -$(dirname $0)/kube-up.sh +$(dirname $0)/../cluster/kube-up.sh # Auto shutdown cluster when we exit function shutdown-test-cluster () { echo "Shutting down test cluster in background." - $(dirname $0)/kube-down.sh > /dev/null & + $(dirname $0)/../cluster/kube-down.sh > /dev/null & } trap shutdown-test-cluster EXIT # Launch a container -$(dirname $0)/cloudcfg.sh -p 8080:80 run dockerfile/nginx 2 myNginx +$(dirname $0)/../cluster/cloudcfg.sh -p 8080:80 run dockerfile/nginx 2 myNginx # Get minion IP addresses detect-minions diff --git a/src/scripts/integration-test.sh b/hack/integration-test.sh similarity index 76% rename from src/scripts/integration-test.sh rename to hack/integration-test.sh index 6fdd5f98113..cc0b79f8485 100755 --- a/src/scripts/integration-test.sh +++ b/hack/integration-test.sh @@ -22,13 +22,16 @@ fi # Stop right away if the build fails set -e -./src/scripts/build-go.sh +$(dirname $0)/build-go.sh -etcd -name test -data-dir /tmp/foo > /tmp/etcd.log & +ETCD_DIR=$(mktemp -d -t kube-integration.XXXXXX) +trap "rm -rf ${ETCD_DIR}" EXIT + +etcd -name test -data-dir ${ETCD_DIR} > /tmp/etcd.log & +ETCD_PID=$! sleep 5 -./target/integration +$(dirname $0)/../output/go/integration -killall etcd -rm -rf /tmp/foo +kill $ETCD_PID diff --git a/src/scripts/test-go.sh b/hack/test-go.sh similarity index 100% rename from src/scripts/test-go.sh rename to hack/test-go.sh diff --git a/src/release/config.sh b/release/config.sh similarity index 100% rename from src/release/config.sh rename to release/config.sh diff --git a/src/release/launch-kubernetes-base.sh b/release/launch-kubernetes-base.sh similarity index 100% rename from src/release/launch-kubernetes-base.sh rename to release/launch-kubernetes-base.sh diff --git a/release/make-public-gcs-acl.py b/release/make-public-gcs-acl.py new file mode 100644 index 00000000000..301616c3076 --- /dev/null +++ b/release/make-public-gcs-acl.py @@ -0,0 +1,26 @@ +# Copyright 2014 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This is a quick script that adds AllUsers as READER to a JSON file +# representing an ACL on a GCS object. This is a quick workaround for a bug in +# gsutil. +import json +import sys + +acl = json.load(sys.stdin) +acl.append({ + "entity": "allUsers", + "role": "READER" + }) +json.dump(acl, sys.stdout) diff --git a/src/release/master-release-install.sh b/release/master-release-install.sh similarity index 100% rename from src/release/master-release-install.sh rename to release/master-release-install.sh diff --git a/src/release/release.sh b/release/release.sh similarity index 58% rename from src/release/release.sh rename to release/release.sh index 7de75467dc8..c1272d681cb 100755 --- a/src/release/release.sh +++ b/release/release.sh @@ -25,21 +25,21 @@ set -e source $(dirname $0)/config.sh -cd $(dirname $0)/../.. +cd $(dirname $0)/.. # First build the release tar. This gets copied on to the master and installed # from there. It includes the go source for the necessary servers along with # the salt configs. -rm -rf release/* +rm -rf output/release/* -MASTER_RELEASE_DIR=release/master-release +MASTER_RELEASE_DIR=output/release/master-release mkdir -p $MASTER_RELEASE_DIR/bin mkdir -p $MASTER_RELEASE_DIR/src/scripts mkdir -p $MASTER_RELEASE_DIR/third_party/go echo "Building release tree" -cp src/release/master-release-install.sh $MASTER_RELEASE_DIR/src/scripts/master-release-install.sh -cp -r src/saltbase $MASTER_RELEASE_DIR/src/saltbase +cp release/master-release-install.sh $MASTER_RELEASE_DIR/src/scripts/master-release-install.sh +cp -r cluster/saltbase $MASTER_RELEASE_DIR/src/saltbase cp -r third_party $MASTER_RELEASE_DIR/third_party/go/src function find_go_files() { @@ -56,41 +56,44 @@ for f in $(find_go_files); do done echo "Packaging release" -tar cz -C release -f release/master-release.tgz master-release +tar cz -C output/release -f output/release/master-release.tgz master-release echo "Building launch script" # Create the local install script. These are the tools to install the local # tools and launch a new cluster. -LOCAL_RELEASE_DIR=release/local-release -mkdir -p $LOCAL_RELEASE_DIR/src +LOCAL_RELEASE_DIR=output/release/local-release +mkdir -p $LOCAL_RELEASE_DIR/src/scripts -cp -r src/templates $LOCAL_RELEASE_DIR/src/templates -cp -r src/scripts $LOCAL_RELEASE_DIR/src/scripts +cp -r cluster/templates $LOCAL_RELEASE_DIR/src/templates +cp -r cluster/*.sh $LOCAL_RELEASE_DIR/src/scripts -tar cz -C $LOCAL_RELEASE_DIR -f release/launch-kubernetes.tgz . +tar cz -C $LOCAL_RELEASE_DIR -f output/release/launch-kubernetes.tgz . -echo "#!/bin/bash" >> release/launch-kubernetes.sh -echo "RELEASE_TAG=$RELEASE_TAG" >> release/launch-kubernetes.sh -echo "RELEASE_PREFIX=$RELEASE_PREFIX" >> release/launch-kubernetes.sh -echo "RELEASE_NAME=$RELEASE_NAME" >> release/launch-kubernetes.sh -echo "RELEASE_FULL_PATH=$RELEASE_FULL_PATH" >> release/launch-kubernetes.sh -cat src/release/launch-kubernetes-base.sh >> release/launch-kubernetes.sh -chmod a+x release/launch-kubernetes.sh +echo "#!/bin/bash" >> output/release/launch-kubernetes.sh +echo "RELEASE_TAG=$RELEASE_TAG" >> output/release/launch-kubernetes.sh +echo "RELEASE_PREFIX=$RELEASE_PREFIX" >> output/release/launch-kubernetes.sh +echo "RELEASE_NAME=$RELEASE_NAME" >> output/release/launch-kubernetes.sh +echo "RELEASE_FULL_PATH=$RELEASE_FULL_PATH" >> output/release/launch-kubernetes.sh +cat release/launch-kubernetes-base.sh >> output/release/launch-kubernetes.sh +chmod a+x output/release/launch-kubernetes.sh # Now copy everything up to the release structure on GS echo "Uploading to Google Storage" -if ! gsutil ls $RELEASE_BUCKET > /dev/null; then +if ! gsutil ls $RELEASE_BUCKET > /dev/null 2>&1 ; then echo "Creating $RELEASE_BUCKET" gsutil mb $RELEASE_BUCKET fi for x in master-release.tgz launch-kubernetes.tgz launch-kubernetes.sh; do - gsutil -q cp release/$x $RELEASE_FULL_PATH/$x + gsutil -q cp output/release/$x $RELEASE_FULL_PATH/$x make_public_readable $RELEASE_FULL_PATH/$x done set_tag $RELEASE_FULL_TAG_PATH $RELEASE_FULL_PATH -echo "Release pushed ($RELEASE_PREFIX$RELEASE_NAME). Launch with:" -echo -echo " curl -s -L ${RELEASE_FULL_PATH/gs:\/\//http://storage.googleapis.com/}/launch-kubernetes.sh | bash" -echo +echo "Release pushed ($RELEASE_PREFIX$RELEASE_NAME)." + +# This isn't quite working right now. Need to figure out packaging the cloudcfg tool. +# echo " Launch with:" +# echo +# echo " curl -s -L ${RELEASE_FULL_PATH/gs:\/\//http://storage.googleapis.com/}/launch-kubernetes.sh | bash" +# echo diff --git a/src/release/make-public-gcs-acl.py b/src/release/make-public-gcs-acl.py deleted file mode 100644 index de58e7dd5ca..00000000000 --- a/src/release/make-public-gcs-acl.py +++ /dev/null @@ -1,12 +0,0 @@ -# This is a quick script that adds AllUsers as READER to a JSON file -# representing an ACL on a GCS object. This is a quick workaround for a bug in -# gsutil. -import json -import sys - -acl = json.load(sys.stdin) -acl.append({ - "entity": "allUsers", - "role": "READER" - }) -json.dump(acl, sys.stdout)