mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Implementing standalone etcd for openshift
This commit is contained in:
parent
5b216d8a51
commit
ea3998e981
@ -283,8 +283,14 @@ func TestExampleObjectSchemas(t *testing.T) {
|
||||
"pod": &api.Pod{},
|
||||
},
|
||||
"../examples/openshift-origin": {
|
||||
"openshift-controller": &api.ReplicationController{},
|
||||
"openshift-service": &api.Service{},
|
||||
"openshift-origin-namespace": &api.Namespace{},
|
||||
"openshift-controller": &api.ReplicationController{},
|
||||
"openshift-service": &api.Service{},
|
||||
"etcd-controller": &api.ReplicationController{},
|
||||
"etcd-service": &api.Service{},
|
||||
"etcd-discovery-controller": &api.ReplicationController{},
|
||||
"etcd-discovery-service": &api.Service{},
|
||||
"secret": nil,
|
||||
},
|
||||
"../examples/phabricator": {
|
||||
"authenticator-controller": &api.ReplicationController{},
|
||||
|
2
examples/openshift-origin/.gitignore
vendored
2
examples/openshift-origin/.gitignore
vendored
@ -1,3 +1 @@
|
||||
config/
|
||||
secret.json
|
||||
*.log
|
||||
|
@ -35,12 +35,11 @@ Documentation for other releases can be found at
|
||||
|
||||
This example shows how to run OpenShift Origin as a pod on an existing Kubernetes cluster.
|
||||
|
||||
OpenShift Origin runs with a rich set of role based policy rules out of the box that requires authentication from users
|
||||
via certificates. When run as a pod on an existing Kubernetes cluster, it proxies access to the underlying Kubernetes services
|
||||
to provide security.
|
||||
OpenShift Origin runs with a rich set of role based policy rules out of the box that requires authentication from users via certificates. When run as a pod on an existing Kubernetes cluster, it proxies access to the underlying Kubernetes services to provide security.
|
||||
|
||||
As a result, this example is a complex end-to-end configuration that shows how to configure certificates for a service that runs
|
||||
on Kubernetes, and requires a number of configuration files to be injected dynamically via a secret volume to the pod.
|
||||
As a result, this example is a complex end-to-end configuration that shows how to configure certificates for a service that runs on Kubernetes, and requires a number of configuration files to be injected dynamically via a secret volume to the pod.
|
||||
|
||||
This example will create a pod running the OpenShift Origin master. In addition, it will run a three-pod etcd setup to hold OpenShift content. OpenShift embeds Kubernetes in the stand-alone setup, so the configuration for OpenShift when it is running against an external Kubernetes cluster is different: content specific to Kubernetes will be stored in the Kubernetes etcd repository (i.e. pods, services, replication controllers, etc.), but OpenShift specific content (builds, images, users, policies, etc.) are stored in its etcd setup.
|
||||
|
||||
### Step 0: Prerequisites
|
||||
|
||||
@ -59,10 +58,11 @@ $ vi cluster/saltbase/pillar/privilege.sls
|
||||
allow_privileged: true
|
||||
```
|
||||
|
||||
Now spin up a cluster using your preferred KUBERNETES_PROVIDER
|
||||
Now spin up a cluster using your preferred KUBERNETES_PROVIDER. Remember that `kube-up.sh` may start other pods on your minion nodes, so ensure that you have enough resources to run the five pods for this example.
|
||||
|
||||
|
||||
```sh
|
||||
$ export KUBERNETES_PROVIDER=gce
|
||||
$ export KUBERNETES_PROVIDER=${YOUR_PROVIDER}
|
||||
$ cluster/kube-up.sh
|
||||
```
|
||||
|
||||
@ -72,6 +72,20 @@ Next, let's setup some variables, and create a local folder that will hold gener
|
||||
$ export OPENSHIFT_EXAMPLE=$(pwd)/examples/openshift-origin
|
||||
$ export OPENSHIFT_CONFIG=${OPENSHIFT_EXAMPLE}/config
|
||||
$ mkdir ${OPENSHIFT_CONFIG}
|
||||
|
||||
$ export ETCD_INITIAL_CLUSTER_TOKEN=$(python -c "import string; import random; print(''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(40)))")
|
||||
$ export ETCD_DISCOVERY_TOKEN=$(python -c "import string; import random; print(\"etcd-cluster-\" + ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(5)))")
|
||||
$ sed -i.bak -e "s/INSERT_ETCD_INITIAL_CLUSTER_TOKEN/\"${ETCD_INITIAL_CLUSTER_TOKEN}\"/g" -e "s/INSERT_ETCD_DISCOVERY_TOKEN/\"${ETCD_DISCOVERY_TOKEN}\"/g" ${OPENSHIFT_EXAMPLE}/etcd-controller.yaml
|
||||
```
|
||||
|
||||
This will have created a `etcd-controller.yaml.bak` file in your directory, which you should remember to restore when doing cleanup (or use the given `cleanup.sh`). Finally, let's start up the external etcd pods and the discovery service necessary for their initialization:
|
||||
|
||||
```sh
|
||||
$ kubectl create -f examples/openshift-origin/openshift-origin-namespace.yaml
|
||||
$ kubectl create -f examples/openshift-origin/etcd-discovery-controller.yaml --namespace="openshift-origin"
|
||||
$ kubectl create -f examples/openshift-origin/etcd-discovery-service.yaml --namespace="openshift-origin"
|
||||
$ kubectl create -f examples/openshift-origin/etcd-controller.yaml --namespace="openshift-origin"
|
||||
$ kubectl create -f examples/openshift-origin/etcd-service.yaml --namespace="openshift-origin"
|
||||
```
|
||||
|
||||
### Step 1: Export your Kubernetes configuration file for use by OpenShift pod
|
||||
@ -82,45 +96,74 @@ OpenShift Origin uses a configuration file to know how to access your Kubernetes
|
||||
$ cluster/kubectl.sh config view --output=yaml --flatten=true --minify=true > ${OPENSHIFT_CONFIG}/kubeconfig
|
||||
```
|
||||
|
||||
The output from this command will contain a single file that has all the required information needed to connect to your
|
||||
Kubernetes cluster that you previously provisioned. This file should be considered sensitive, so do not share this file with
|
||||
untrusted parties.
|
||||
The output from this command will contain a single file that has all the required information needed to connect to your Kubernetes cluster that you previously provisioned. This file should be considered sensitive, so do not share this file with untrusted parties.
|
||||
|
||||
We will later use this file to tell OpenShift how to bootstap its own configuration.
|
||||
|
||||
### Step 2: Create an External Load Balancer to Route Traffic to OpenShift
|
||||
|
||||
An external load balancer is needed to route traffic to our OpenShift master service that will run as a pod on your
|
||||
Kubernetes cluster.
|
||||
An external load balancer is needed to route traffic to our OpenShift master service that will run as a pod on your Kubernetes cluster.
|
||||
|
||||
|
||||
```sh
|
||||
$ cluster/kubectl.sh create -f $OPENSHIFT_EXAMPLE/openshift-service.yaml
|
||||
$ cluster/kubectl.sh create -f $OPENSHIFT_EXAMPLE/openshift-service.yaml --namespace="openshift-origin"
|
||||
```
|
||||
|
||||
### Step 3: Generate configuration file for your OpenShift master pod
|
||||
|
||||
The OpenShift master requires a configuration file as input to know how to bootstrap the system.
|
||||
|
||||
In order to build this configuration file, we need to know the public IP address of our external load balancer in order to
|
||||
build default certificates.
|
||||
In order to build this configuration file, we need to know the public IP address of our external load balancer in order to build default certificates.
|
||||
|
||||
Grab the public IP address of the service we previously created: the two-line script below will attempt to do so, but make sure to check that the IP was set as a result - if it was not, try again after a couple seconds.
|
||||
|
||||
Grab the public IP address of the service we previously created.
|
||||
|
||||
```sh
|
||||
$ export PUBLIC_IP=$(cluster/kubectl.sh get services openshift --template="{{ index .status.loadBalancer.ingress 0 \"ip\" }}")
|
||||
$ echo $PUBLIC_IP
|
||||
$ export PUBLIC_OPENSHIFT_IP=$(kubectl get services openshift --namespace="openshift-origin" --template="{{ index .status.loadBalancer.ingress 0 \"ip\" }}")
|
||||
$ echo ${PUBLIC_OPENSHIFT_IP}
|
||||
```
|
||||
|
||||
You can automate the process with the following script, as it might take more than a minute for the IP to be set and discoverable.
|
||||
|
||||
```shell
|
||||
$ while [ ${#PUBLIC_OPENSHIFT_IP} -lt 1 ]; do
|
||||
echo -n .
|
||||
sleep 1
|
||||
{
|
||||
export PUBLIC_OPENSHIFT_IP=$(kubectl get services openshift --namespace="openshift-origin" --template="{{ index .status.loadBalancer.ingress 0 \"ip\" }}")
|
||||
} 2> ${OPENSHIFT_EXAMPLE}/openshift-startup.log
|
||||
if [[ ! ${PUBLIC_OPENSHIFT_IP} =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||
export PUBLIC_OPENSHIFT_IP=""
|
||||
fi
|
||||
done
|
||||
$ echo
|
||||
$ echo "Public OpenShift IP set to: ${PUBLIC_OPENSHIFT_IP}"
|
||||
```
|
||||
|
||||
Ensure you have a valid PUBLIC_IP address before continuing in the example.
|
||||
|
||||
We now need to run a command on your host to generate a proper OpenShift configuration. To do this, we will volume mount the configuration directory that holds your Kubernetes kubeconfig file from the prior step.
|
||||
|
||||
|
||||
```sh
|
||||
docker run --privileged -v ${OPENSHIFT_CONFIG}:/config openshift/origin start master --write-config=/config --kubeconfig='/config/kubeconfig' --master='https://localhost:8443' --public-master='https://${PUBLIC_IP}:8443'
|
||||
$ docker run --privileged -v ${OPENSHIFT_CONFIG}:/config openshift/origin start master --write-config=/config --kubeconfig=/config/kubeconfig --master=https://localhost:8443 --public-master=https://${PUBLIC_OPENSHIFT_IP}:8443 --etcd=http://etcd:2379
|
||||
```
|
||||
|
||||
You should now see a number of certificates minted in your configuration directory, as well as a master-config.yaml file that tells the OpenShift master how to execute. In the next step, we will bundle this into a Kubernetes Secret that our OpenShift master pod will consume.
|
||||
You should now see a number of certificates minted in your configuration directory, as well as a master-config.yaml file that tells the OpenShift master how to execute. We need to make some adjustments to this configuration directory in order to allow the OpenShift cluster to use Kubernetes serviceaccounts. First, write the Kubernetes service account key to the `${OPENSHIFT_CONFIG}` directory. The following script assumes you are using GCE. If you are not, use `scp` or `ssh` to get the key from the master node running Kubernetes. It is usually located at `/srv/kubernetes/server.key`.
|
||||
|
||||
```shell
|
||||
$ export ZONE=$(gcloud compute instances list | grep "${KUBE_GCE_INSTANCE_PREFIX}\-master" | awk '{print $2}' | head -1)
|
||||
$ echo "sudo cat /srv/kubernetes/server.key; exit;" | gcloud compute ssh ${KUBE_GCE_INSTANCE_PREFIX}-master --zone ${ZONE} | grep -Ex "(^\-.*\-$|^\S+$)" > ${OPENSHIFT_CONFIG}/serviceaccounts.private.key
|
||||
|
||||
```
|
||||
|
||||
Although we are retrieving the private key from the Kubernetes master, OpenShift will take care of the conversion for us so that serviceaccounts are created with the public key. Edit your `master-config.yaml` file in the `${OPENSHIFT_CONFIG}` directory to add `serviceaccounts.private.key` to the list of `publicKeyFiles`:
|
||||
|
||||
```shell
|
||||
$ sed -i -e 's/publicKeyFiles:.*$/publicKeyFiles:/g' -e '/publicKeyFiles:/a \ \ - serviceaccounts.private.key' ${OPENSHIFT_CONFIG}/master-config.yaml
|
||||
```
|
||||
|
||||
Now, the configuration files are complete. In the next step, we will bundle the resulting configuration into a Kubernetes Secret that our OpenShift master pod will consume.
|
||||
|
||||
### Step 4: Bundle the configuration into a Secret
|
||||
|
||||
@ -137,13 +180,13 @@ $ sudo -E chown -R ${USER} ${OPENSHIFT_CONFIG}
|
||||
Then run the following command to collapse them into a Kubernetes secret.
|
||||
|
||||
```sh
|
||||
docker run -i -t --privileged -e="OPENSHIFTCONFIG=/config/admin.kubeconfig" -v ${OPENSHIFT_CONFIG}:/config openshift/origin ex bundle-secret openshift-config -f /config &> ${OPENSHIFT_EXAMPLE}/secret.json
|
||||
$ docker run -it --privileged -e="KUBECONFIG=/config/admin.kubeconfig" -v ${OPENSHIFT_CONFIG}:/config openshift/origin cli secrets new openshift-config /config -o json &> examples/openshift-origin/secret.json
|
||||
```
|
||||
|
||||
Now, lets create the secret in your Kubernetes cluster.
|
||||
|
||||
```sh
|
||||
$ cluster/kubectl.sh create -f ${OPENSHIFT_EXAMPLE}/secret.json
|
||||
$ cluster/kubectl.sh create -f examples/openshift-origin/secret.json --namespace="openshift-origin"
|
||||
```
|
||||
|
||||
**NOTE: This secret is secret and should not be shared with untrusted parties.**
|
||||
@ -156,7 +199,7 @@ We will deploy a pod that runs the OpenShift master. The OpenShift master will
|
||||
system to manage Kubernetes specific resources. For the sake of simplicity, the OpenShift master will run with an embedded etcd to hold OpenShift specific content. This demonstration will evolve in the future to show how to run etcd in a pod so that content is not destroyed if the OpenShift master fails.
|
||||
|
||||
```sh
|
||||
$ cluster/kubectl.sh create -f ${OPENSHIFT_EXAMPLE}/openshift-controller.yaml
|
||||
$ cluster/kubectl.sh create -f ${OPENSHIFT_EXAMPLE}/openshift-controller.yaml --namespace="openshift-origin"
|
||||
```
|
||||
|
||||
You should now get a pod provisioned whose name begins with openshift.
|
||||
@ -172,7 +215,7 @@ Running: cluster/../cluster/gce/../../cluster/../_output/dockerized/bin/linux/am
|
||||
Depending upon your cloud provider, you may need to open up an external firewall rule for tcp:8443. For GCE, you can run the following:
|
||||
|
||||
```sh
|
||||
gcloud compute --project "your-project" firewall-rules create "origin" --allow tcp:8443 --network "your-network" --source-ranges "0.0.0.0/0"
|
||||
$ gcloud compute --project "your-project" firewall-rules create "origin" --allow tcp:8443 --network "your-network" --source-ranges "0.0.0.0/0"
|
||||
```
|
||||
|
||||
Consult your cloud provider's documentation for more information.
|
||||
@ -187,6 +230,14 @@ $ osc config use-context public-default
|
||||
$ osc --help
|
||||
```
|
||||
|
||||
## Cleanup
|
||||
|
||||
Clean up your cluster from resources created with this example:
|
||||
|
||||
```sh
|
||||
$ ${OPENSHIFT_EXAMPLE}/cleanup.sh
|
||||
```
|
||||
|
||||
|
||||
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
|
||||
[]()
|
||||
|
@ -15,11 +15,29 @@
|
||||
# limitations under the License.
|
||||
|
||||
# Cleans up resources from the example, assumed to be run from Kubernetes repo root
|
||||
|
||||
echo
|
||||
echo
|
||||
export OPENSHIFT_EXAMPLE=$(pwd)/examples/openshift-origin
|
||||
export OPENSHIFT_CONFIG=${OPENSHIFT_EXAMPLE}/config
|
||||
rm -fr ${OPENSHIFT_CONFIG}
|
||||
cluster/kubectl.sh delete secrets openshift-config
|
||||
cluster/kubectl.sh stop rc openshift
|
||||
cluster/kubectl.sh delete rc openshift
|
||||
cluster/kubectl.sh delete services openshift
|
||||
|
||||
echo "===> Removing the OpenShift namespace:"
|
||||
kubectl delete namespace openshift-origin
|
||||
echo
|
||||
|
||||
echo "===> Removing local files:"
|
||||
rm -rf ${OPENSHIFT_CONFIG}
|
||||
rm ${OPENSHIFT_EXAMPLE}/openshift-startup.log
|
||||
rm ${OPENSHIFT_EXAMPLE}/secret.json
|
||||
touch ${OPENSHIFT_EXAMPLE}/secret.json
|
||||
echo
|
||||
|
||||
echo "===> Restoring changed YAML specifcations:"
|
||||
if [ -f "${OPENSHIFT_EXAMPLE}/etcd-controller.yaml.bak" ]; then
|
||||
rm ${OPENSHIFT_EXAMPLE}/etcd-controller.yaml
|
||||
mv -v ${OPENSHIFT_EXAMPLE}/etcd-controller.yaml.bak ${OPENSHIFT_EXAMPLE}/etcd-controller.yaml
|
||||
else
|
||||
echo "No changed specifications found."
|
||||
fi
|
||||
echo
|
||||
|
||||
echo Done.
|
||||
|
@ -14,18 +14,108 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -e
|
||||
|
||||
# Creates resources from the example, assumed to be run from Kubernetes repo root
|
||||
echo
|
||||
echo "===> Initializing:"
|
||||
if [ ! $(which python) ]
|
||||
then
|
||||
echo "Python is a prerequisite for running this script. Please install Python and try running again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! $(which gcloud) ]
|
||||
then
|
||||
echo "gcloud is a prerequisite for running this script. Please install gcloud and try running again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
gcloud_instances=$(gcloud compute instances list | grep "\-master")
|
||||
if [ -z "$gcloud_instances" ] || [ -z "${KUBE_GCE_INSTANCE_PREFIX}" ]
|
||||
then
|
||||
echo "This script is only able to supply the necessary serviceaccount key if you are running on Google"
|
||||
echo "Compute Engine using a cluster/kube-up.sh script with KUBE_GCE_INSTANCE_PREFIX set. If this is not"
|
||||
echo "the case, be ready to supply a path to the serviceaccount public key."
|
||||
if [ -z "${KUBE_GCE_INSTANCE_PREFIX}" ]
|
||||
then
|
||||
echo "Please provide your KUBE_GCE_INSTANCE_PREFIX now:"
|
||||
read KUBE_GCE_INSTANCE_PREFIX
|
||||
fi
|
||||
fi
|
||||
|
||||
export OPENSHIFT_EXAMPLE=$(pwd)/examples/openshift-origin
|
||||
echo Set OPENSHIFT_EXAMPLE=${OPENSHIFT_EXAMPLE}
|
||||
export OPENSHIFT_CONFIG=${OPENSHIFT_EXAMPLE}/config
|
||||
echo Set OPENSHIFT_CONFIG=${OPENSHIFT_CONFIG}
|
||||
mkdir ${OPENSHIFT_CONFIG}
|
||||
cluster/kubectl.sh config view --output=yaml --flatten=true --minify=true > ${OPENSHIFT_CONFIG}/kubeconfig
|
||||
cluster/kubectl.sh create -f $OPENSHIFT_EXAMPLE/openshift-service.yaml
|
||||
sleep 60
|
||||
export PUBLIC_IP=$(cluster/kubectl.sh get services openshift --template="{{ index .status.loadBalancer.ingress 0 \"ip\" }}")
|
||||
echo "PUBLIC IP: ${PUBLIC_IP}"
|
||||
docker run --privileged -v ${OPENSHIFT_CONFIG}:/config openshift/origin start master --write-config=/config --kubeconfig=/config/kubeconfig --master=https://localhost:8443 --public-master=https://${PUBLIC_IP}:8443
|
||||
sudo -E chown ${USER} -R ${OPENSHIFT_CONFIG}
|
||||
docker run -i -t --privileged -e="OPENSHIFTCONFIG=/config/admin.kubeconfig" -v ${OPENSHIFT_CONFIG}:/config openshift/origin ex bundle-secret openshift-config -f /config &> ${OPENSHIFT_EXAMPLE}/secret.json
|
||||
cluster/kubectl.sh create -f ${OPENSHIFT_EXAMPLE}/secret.json
|
||||
cluster/kubectl.sh create -f ${OPENSHIFT_EXAMPLE}/openshift-controller.yaml
|
||||
cluster/kubectl.sh get pods | grep openshift
|
||||
echo Made dir ${OPENSHIFT_CONFIG}
|
||||
echo
|
||||
|
||||
echo "===> Setting up OpenShift-Origin namespace:"
|
||||
kubectl create -f ${OPENSHIFT_EXAMPLE}/openshift-origin-namespace.yaml
|
||||
echo
|
||||
|
||||
echo "===> Setting up etcd-discovery:"
|
||||
# A token etcd uses to generate unique cluster ID and member ID. Conforms to [a-z0-9]{40}
|
||||
export ETCD_INITIAL_CLUSTER_TOKEN=$(python -c "import string; import random; print(''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(40)))")
|
||||
|
||||
# A unique token used by the discovery service. Conforms to etcd-cluster-[a-z0-9]{5}
|
||||
export ETCD_DISCOVERY_TOKEN=$(python -c "import string; import random; print(\"etcd-cluster-\" + ''.join(random.SystemRandom().choice(string.ascii_lowercase + string.digits) for _ in range(5)))")
|
||||
sed -i.bak -e "s/INSERT_ETCD_INITIAL_CLUSTER_TOKEN/\"${ETCD_INITIAL_CLUSTER_TOKEN}\"/g" -e "s/INSERT_ETCD_DISCOVERY_TOKEN/\"${ETCD_DISCOVERY_TOKEN}\"/g" ${OPENSHIFT_EXAMPLE}/etcd-controller.yaml
|
||||
|
||||
kubectl create -f ${OPENSHIFT_EXAMPLE}/etcd-discovery-controller.yaml --namespace='openshift-origin'
|
||||
kubectl create -f ${OPENSHIFT_EXAMPLE}/etcd-discovery-service.yaml --namespace='openshift-origin'
|
||||
echo
|
||||
|
||||
echo "===> Setting up etcd:"
|
||||
kubectl create -f ${OPENSHIFT_EXAMPLE}/etcd-controller.yaml --namespace='openshift-origin'
|
||||
kubectl create -f ${OPENSHIFT_EXAMPLE}/etcd-service.yaml --namespace='openshift-origin'
|
||||
echo
|
||||
|
||||
echo "===> Setting up openshift-origin:"
|
||||
kubectl config view --output=yaml --flatten=true --minify=true > ${OPENSHIFT_CONFIG}/kubeconfig
|
||||
kubectl create -f ${OPENSHIFT_EXAMPLE}/openshift-service.yaml --namespace='openshift-origin'
|
||||
echo
|
||||
|
||||
export PUBLIC_OPENSHIFT_IP=""
|
||||
echo "===> Waiting for public IP to be set for the OpenShift Service."
|
||||
echo "Mistakes in service setup can cause this to loop infinitely if an"
|
||||
echo "external IP is never set. Ensure that the OpenShift service"
|
||||
echo "is set to use an external load balancer. This process may take"
|
||||
echo "a few minutes. Errors can be found in the log file found at:"
|
||||
echo ${OPENSHIFT_EXAMPLE}/openshift-startup.log
|
||||
echo "" > ${OPENSHIFT_EXAMPLE}/openshift-startup.log
|
||||
while [ ${#PUBLIC_OPENSHIFT_IP} -lt 1 ]; do
|
||||
echo -n .
|
||||
sleep 1
|
||||
{
|
||||
export PUBLIC_OPENSHIFT_IP=$(kubectl get services openshift --namespace="openshift-origin" --template="{{ index .status.loadBalancer.ingress 0 \"ip\" }}")
|
||||
} >> ${OPENSHIFT_EXAMPLE}/openshift-startup.log 2>&1
|
||||
if [[ ! ${PUBLIC_OPENSHIFT_IP} =~ ^([0-9]{1,3}\.){3}[0-9]{1,3}$ ]]; then
|
||||
export PUBLIC_OPENSHIFT_IP=""
|
||||
fi
|
||||
done
|
||||
echo
|
||||
echo "Public OpenShift IP set to: ${PUBLIC_OPENSHIFT_IP}"
|
||||
echo
|
||||
|
||||
echo "===> Configuring OpenShift:"
|
||||
docker run --privileged -v ${OPENSHIFT_CONFIG}:/config openshift/origin start master --write-config=/config --kubeconfig=/config/kubeconfig --master=https://localhost:8443 --public-master=https://${PUBLIC_OPENSHIFT_IP}:8443 --etcd=http://etcd:2379
|
||||
sudo -E chown -R ${USER} ${OPENSHIFT_CONFIG}
|
||||
|
||||
# The following assumes GCE and that KUBE_GCE_INSTANCE_PREFIX is set
|
||||
export ZONE=$(gcloud compute instances list | grep "${KUBE_GCE_INSTANCE_PREFIX}\-master" | awk '{print $2}' | head -1)
|
||||
echo "sudo cat /srv/kubernetes/server.key; exit;" | gcloud compute ssh ${KUBE_GCE_INSTANCE_PREFIX}-master --zone ${ZONE} | grep -Ex "(^\-.*\-$|^\S+$)" > ${OPENSHIFT_CONFIG}/serviceaccounts.private.key
|
||||
# The following insertion will fail if indentation changes
|
||||
sed -i -e 's/publicKeyFiles:.*$/publicKeyFiles:/g' -e '/publicKeyFiles:/a \ \ - serviceaccounts.private.key' ${OPENSHIFT_CONFIG}/master-config.yaml
|
||||
|
||||
docker run -it --privileged -e="KUBECONFIG=/config/admin.kubeconfig" -v ${OPENSHIFT_CONFIG}:/config openshift/origin cli secrets new openshift-config /config -o json &> ${OPENSHIFT_EXAMPLE}/secret.json
|
||||
kubectl create -f ${OPENSHIFT_EXAMPLE}/secret.json --namespace='openshift-origin'
|
||||
echo
|
||||
|
||||
echo "===> Running OpenShift Master:"
|
||||
kubectl create -f ${OPENSHIFT_EXAMPLE}/openshift-controller.yaml --namespace='openshift-origin'
|
||||
echo
|
||||
|
||||
echo Done.
|
||||
|
57
examples/openshift-origin/etcd-controller.yaml
Normal file
57
examples/openshift-origin/etcd-controller.yaml
Normal file
@ -0,0 +1,57 @@
|
||||
kind: ReplicationController
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: etcd
|
||||
creationTimestamp:
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
resources: {}
|
||||
triggers:
|
||||
- type: ConfigChange
|
||||
replicas: 3
|
||||
selector:
|
||||
name: etcd
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp:
|
||||
labels:
|
||||
name: etcd
|
||||
spec:
|
||||
containers:
|
||||
- name: member
|
||||
image: openshift/etcd-20-centos7
|
||||
ports:
|
||||
- containerPort: 2379
|
||||
protocol: TCP
|
||||
- containerPort: 2380
|
||||
protocol: TCP
|
||||
env:
|
||||
# ETCD_NUM_MEMBERS is the maximum number of members to launch (have to match with # of replicas)
|
||||
- name: ETCD_NUM_MEMBERS
|
||||
value: "3"
|
||||
- name: ETCD_INITIAL_CLUSTER_STATE
|
||||
value: "new"
|
||||
# ETCD_INITIAL_CLUSTER_TOKEN is a token etcd uses to generate unique cluster ID and member ID. Conforms to [a-z0-9]{40}
|
||||
- name: ETCD_INITIAL_CLUSTER_TOKEN
|
||||
value: INSERT_ETCD_INITIAL_CLUSTER_TOKEN
|
||||
# ETCD_DISCOVERY_TOKEN is a unique token used by the discovery service. Conforms to etcd-cluster-[a-z0-9]{5}
|
||||
- name: ETCD_DISCOVERY_TOKEN
|
||||
value: INSERT_ETCD_DISCOVERY_TOKEN
|
||||
# ETCD_DISCOVERY_URL connects etcd instances together by storing a list of peer addresses,
|
||||
# metadata and the initial size of the cluster under a unique address
|
||||
- name: ETCD_DISCOVERY_URL
|
||||
value: "http://etcd-discovery:2379"
|
||||
- name: ETCDCTL_PEERS
|
||||
value: "http://etcd:2379"
|
||||
resources: {}
|
||||
terminationMessagePath: "/dev/termination-log"
|
||||
imagePullPolicy: IfNotPresent
|
||||
capabilities: {}
|
||||
securityContext:
|
||||
capabilities: {}
|
||||
privileged: false
|
||||
restartPolicy: Always
|
||||
dnsPolicy: ClusterFirst
|
||||
serviceAccount: ''
|
||||
status: {}
|
39
examples/openshift-origin/etcd-discovery-controller.yaml
Normal file
39
examples/openshift-origin/etcd-discovery-controller.yaml
Normal file
@ -0,0 +1,39 @@
|
||||
kind: ReplicationController
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: etcd-discovery
|
||||
creationTimestamp:
|
||||
spec:
|
||||
strategy:
|
||||
type: Recreate
|
||||
resources: {}
|
||||
triggers:
|
||||
- type: ConfigChange
|
||||
replicas: 1
|
||||
selector:
|
||||
name: etcd-discovery
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp:
|
||||
labels:
|
||||
name: etcd-discovery
|
||||
spec:
|
||||
containers:
|
||||
- name: discovery
|
||||
image: openshift/etcd-20-centos7
|
||||
args:
|
||||
- etcd-discovery.sh
|
||||
ports:
|
||||
- containerPort: 2379
|
||||
protocol: TCP
|
||||
resources: {}
|
||||
terminationMessagePath: "/dev/termination-log"
|
||||
imagePullPolicy: IfNotPresent
|
||||
capabilities: {}
|
||||
securityContext:
|
||||
capabilities: {}
|
||||
privileged: false
|
||||
restartPolicy: Always
|
||||
dnsPolicy: ClusterFirst
|
||||
serviceAccount: ''
|
||||
status: {}
|
19
examples/openshift-origin/etcd-discovery-service.yaml
Normal file
19
examples/openshift-origin/etcd-discovery-service.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: etcd-discovery
|
||||
creationTimestamp:
|
||||
labels:
|
||||
name: etcd-discovery
|
||||
spec:
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 2379
|
||||
targetPort: 2379
|
||||
nodePort: 0
|
||||
selector:
|
||||
name: etcd-discovery
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
25
examples/openshift-origin/etcd-service.yaml
Normal file
25
examples/openshift-origin/etcd-service.yaml
Normal file
@ -0,0 +1,25 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: etcd
|
||||
creationTimestamp:
|
||||
labels:
|
||||
name: etcd
|
||||
spec:
|
||||
ports:
|
||||
- name: client
|
||||
protocol: TCP
|
||||
port: 2379
|
||||
targetPort: 2379
|
||||
nodePort: 0
|
||||
- name: server
|
||||
protocol: TCP
|
||||
port: 2380
|
||||
targetPort: 2380
|
||||
nodePort: 0
|
||||
selector:
|
||||
name: etcd
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
||||
status:
|
||||
loadBalancer: {}
|
@ -0,0 +1,6 @@
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: "openshift-origin"
|
||||
labels:
|
||||
name: "openshift-origin"
|
@ -4,8 +4,8 @@ metadata:
|
||||
name: openshift
|
||||
spec:
|
||||
ports:
|
||||
- port: 8443
|
||||
name: openshift
|
||||
- name: openshift
|
||||
port: 8443
|
||||
targetPort: 8443
|
||||
selector:
|
||||
name: openshift
|
||||
|
36
tmp-valid-pod.json
Normal file
36
tmp-valid-pod.json
Normal file
@ -0,0 +1,36 @@
|
||||
{
|
||||
"kind": "Pod",
|
||||
"apiVersion": "v1",
|
||||
"metadata": {
|
||||
"name": "valid-pod",
|
||||
"namespace": "default",
|
||||
"selfLink": "/api/v1/namespaces/default/pods/valid-pod",
|
||||
"uid": "20f4f1f5-1e67-11e5-b84d-54ee753e2644",
|
||||
"resourceVersion": "474",
|
||||
"creationTimestamp": "2015-06-29T13:59:58Z",
|
||||
"labels": {
|
||||
"name": "valid-pod"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "update-k8s-serve-hostname",
|
||||
"image": "nginx",
|
||||
"resources": {
|
||||
"limits": {
|
||||
"cpu": "1",
|
||||
"memory": "6Mi"
|
||||
}
|
||||
},
|
||||
"terminationMessagePath": "/dev/termination-log",
|
||||
"imagePullPolicy": "IfNotPresent"
|
||||
}
|
||||
],
|
||||
"restartPolicy": "Always",
|
||||
"dnsPolicy": "ClusterFirst"
|
||||
},
|
||||
"status": {
|
||||
"phase": "Pending"
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user