From c7b447b9fc705e6db56f477dfae676ed456ef037 Mon Sep 17 00:00:00 2001 From: OHTAKE Tomohiro Date: Wed, 24 Feb 2016 20:49:56 +0900 Subject: [PATCH] Add Heat template to create a Kubernetes stack on OpenStack --- .../fragments/configure-salt.yaml | 47 +++ .../deploy-kube-auth-files-master.yaml | 43 +++ .../deploy-kube-auth-files-node.yaml | 44 +++ .../kubernetes-heat/fragments/kube-user.yaml | 10 + .../fragments/provision-network-master.sh | 67 ++++ .../fragments/provision-network-node.sh | 43 +++ .../kubernetes-heat/fragments/run-salt.sh | 48 +++ .../fragments/write-heat-params.yaml | 11 + .../kubernetes-heat/kubecluster.yaml | 326 ++++++++++++++++++ .../openstack/kubernetes-heat/kubeminion.yaml | 201 +++++++++++ 10 files changed, 840 insertions(+) create mode 100644 cluster/openstack/kubernetes-heat/fragments/configure-salt.yaml create mode 100644 cluster/openstack/kubernetes-heat/fragments/deploy-kube-auth-files-master.yaml create mode 100644 cluster/openstack/kubernetes-heat/fragments/deploy-kube-auth-files-node.yaml create mode 100644 cluster/openstack/kubernetes-heat/fragments/kube-user.yaml create mode 100644 cluster/openstack/kubernetes-heat/fragments/provision-network-master.sh create mode 100644 cluster/openstack/kubernetes-heat/fragments/provision-network-node.sh create mode 100644 cluster/openstack/kubernetes-heat/fragments/run-salt.sh create mode 100644 cluster/openstack/kubernetes-heat/fragments/write-heat-params.yaml create mode 100644 cluster/openstack/kubernetes-heat/kubecluster.yaml create mode 100644 cluster/openstack/kubernetes-heat/kubeminion.yaml diff --git a/cluster/openstack/kubernetes-heat/fragments/configure-salt.yaml b/cluster/openstack/kubernetes-heat/fragments/configure-salt.yaml new file mode 100644 index 00000000000..e35001466b7 --- /dev/null +++ b/cluster/openstack/kubernetes-heat/fragments/configure-salt.yaml @@ -0,0 +1,47 @@ +#cloud-config +merge_how: dict(recurse_array)+list(append) +bootcmd: + - mkdir -p /etc/salt/minion.d + - mkdir -p /srv/salt-overlay/pillar +write_files: + - path: /etc/salt/minion.d/log-level-debug.conf + content: | + log_level: warning + log_level_logfile: warning + - path: /etc/salt/minion.d/grains.conf + content: | + grains: + node_ip: $MASTER_IP + publicAddressOverride: $MASTER_IP + network_mode: openvswitch + networkInterfaceName: eth0 + api_servers: $MASTER_IP + cloud: vagrant # It's not vagrant, but required to install Docker + roles: + - $role + runtime_config: "" + docker_opts: "" + master_extra_sans: "DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local,DNS:kubernetes-master" + keep_host_etcd: true + - path: /srv/salt-overlay/pillar/cluster-params.sls + content: | + service_cluster_ip_range: 10.246.0.0/16 + cert_ip: $MASTER_IP + enable_cluster_monitoring: none + enable_cluster_logging: "false" + enable_cluster_ui: "false" + enable_node_logging: "false" + logging_destination: elasticsearch + elasticsearch_replicas: "1" + enable_cluster_dns: "true" + dns_replicas: "1" + dns_server: 10.246.0.10 + dns_domain: cluster.local + instance_prefix: kubernetes + admission_control: NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota + enable_cpu_cfs_quota: "true" + network_provider: none + opencontrail_tag: R2.20 + opencontrail_kubernetes_tag: master + opencontrail_public_subnet: 10.1.0.0/16 + e2e_storage_test_environment: "false" diff --git a/cluster/openstack/kubernetes-heat/fragments/deploy-kube-auth-files-master.yaml b/cluster/openstack/kubernetes-heat/fragments/deploy-kube-auth-files-master.yaml new file mode 100644 index 00000000000..10ba9ae8861 --- /dev/null +++ b/cluster/openstack/kubernetes-heat/fragments/deploy-kube-auth-files-master.yaml @@ -0,0 +1,43 @@ +#cloud-config +merge_how: dict(recurse_array)+list(append) +bootcmd: + - mkdir -p /srv/salt-overlay/salt/kube-apiserver + - mkdir -p /srv/salt-overlay/salt/kubelet +write_files: + - path: /srv/salt-overlay/salt/kube-apiserver/basic_auth.csv + permissions: "0600" + content: | + $apiserver_user,$apiserver_password,admin + - path: /srv/salt-overlay/salt/kube-apiserver/known_tokens.csv + permissions: "0600" + content: | + $token_kubelet,kubelet,kubelet + $token_kube_proxy,kube_proxy,kube_proxy + TokenSystemScheduler,system:scheduler,system:scheduler + TokenSystemControllerManager,system:controller_manager,system:controller_manager + TokenSystemLogging,system:logging,system:logging + TokenSystemMonitoring,system:monitoring,system:monitoring + TokenSystemDns,system:dns,system:dns + - path: /srv/salt-overlay/salt/kubelet/kubernetes_auth + permissions: "0600" + content: | + {"BearerToken": "$token_kubelet", "Insecure": true } + - path: /srv/salt-overlay/salt/kubelet/kubeconfig + permissions: "0600" + content: | + apiVersion: v1 + kind: Config + users: + - name: kubelet + user: + token: $token_kubelet + clusters: + - name: local + cluster: + insecure-skip-tls-verify: true + contexts: + - context: + cluster: local + user: kubelet + name: service-account-context + current-context: service-account-context diff --git a/cluster/openstack/kubernetes-heat/fragments/deploy-kube-auth-files-node.yaml b/cluster/openstack/kubernetes-heat/fragments/deploy-kube-auth-files-node.yaml new file mode 100644 index 00000000000..7f265d2b8a6 --- /dev/null +++ b/cluster/openstack/kubernetes-heat/fragments/deploy-kube-auth-files-node.yaml @@ -0,0 +1,44 @@ +#cloud-config +merge_how: dict(recurse_array)+list(append) +bootcmd: + - mkdir -p /srv/salt-overlay/salt/kubelet + - mkdir -p /srv/salt-overlay/salt/kube-proxy +write_files: + - path: /srv/salt-overlay/salt/kubelet/kubeconfig + permissions: "0600" + content: | + apiVersion: v1 + kind: Config + users: + - name: kubelet + user: + token: $token_kubelet + clusters: + - name: local + cluster: + insecure-skip-tls-verify: true + contexts: + - context: + cluster: local + user: kubelet + name: service-account-context + current-context: service-account-context + - path: /srv/salt-overlay/salt/kube-proxy/kubeconfig + permissions: "0600" + content: | + apiVersion: v1 + kind: Config + users: + - name: kube-proxy + user: + token: $token_kube_proxy + clusters: + - name: local + cluster: + insecure-skip-tls-verify: true + contexts: + - context: + cluster: local + user: kube-proxy + name: service-account-context + current-context: service-account-context diff --git a/cluster/openstack/kubernetes-heat/fragments/kube-user.yaml b/cluster/openstack/kubernetes-heat/fragments/kube-user.yaml new file mode 100644 index 00000000000..4e7477d6401 --- /dev/null +++ b/cluster/openstack/kubernetes-heat/fragments/kube-user.yaml @@ -0,0 +1,10 @@ +#cloud-config +system_info: + default_user: + name: minion + lock_passwd: true + gecos: Kubernetes Interactive User + groups: [wheel, adm, systemd-journal] + sudo: ["ALL=(ALL) NOPASSWD:ALL"] + shell: /bin/bash + diff --git a/cluster/openstack/kubernetes-heat/fragments/provision-network-master.sh b/cluster/openstack/kubernetes-heat/fragments/provision-network-master.sh new file mode 100644 index 00000000000..3cdf9b8b2bc --- /dev/null +++ b/cluster/openstack/kubernetes-heat/fragments/provision-network-master.sh @@ -0,0 +1,67 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +. /etc/sysconfig/heat-params + +FLANNEL_ETCD_URL="http://${MASTER_IP}:4379" + +# Install etcd for flannel data +if ! which etcd > /dev/null 2>&1; then + yum install -y etcd +fi + +cat < /etc/etcd/etcd.conf +ETCD_NAME=flannel +ETCD_DATA_DIR="/var/lib/etcd/flannel.etcd" +ETCD_LISTEN_PEER_URLS="http://${MASTER_IP}:4380" +ETCD_LISTEN_CLIENT_URLS="http://${MASTER_IP}:4379" +ETCD_INITIAL_ADVERTISE_PEER_URLS="http://${MASTER_IP}:4380" +ETCD_INITIAL_CLUSTER="flannel=http://${MASTER_IP}:4380" +ETCD_ADVERTISE_CLIENT_URLS="${FLANNEL_ETCD_URL}" +EOF +systemctl enable etcd +systemctl restart etcd + +# Install flannel for overlay +if ! which flanneld > /dev/null 2>&1; then + yum install -y flannel +fi + +cat < /etc/flannel-config.json +{ + "Network": "${CONTAINER_SUBNET}", + "SubnetLen": 24, + "Backend": { + "Type": "udp", + "Port": 8285 + } +} +EOF + +etcdctl -C ${FLANNEL_ETCD_URL} set /coreos.com/network/config < /etc/flannel-config.json + +cat < /etc/sysconfig/flanneld +FLANNEL_ETCD="${FLANNEL_ETCD_URL}" +FLANNEL_ETCD_KEY="/coreos.com/network" +FLANNEL_OPTIONS="-iface=eth0 --ip-masq" +EOF + +systemctl enable flanneld +systemctl restart flanneld diff --git a/cluster/openstack/kubernetes-heat/fragments/provision-network-node.sh b/cluster/openstack/kubernetes-heat/fragments/provision-network-node.sh new file mode 100644 index 00000000000..7fd78598e20 --- /dev/null +++ b/cluster/openstack/kubernetes-heat/fragments/provision-network-node.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +. /etc/sysconfig/heat-params + +FLANNEL_ETCD_URL="http://${MASTER_IP}:4379" + +# Install flannel for overlay +if ! which flanneld >/dev/null 2>&1; then + yum install -y flannel +fi + +cat </etc/sysconfig/flanneld +FLANNEL_ETCD="${FLANNEL_ETCD_URL}" +FLANNEL_ETCD_KEY="/coreos.com/network" +FLANNEL_OPTIONS="-iface=eth0 --ip-masq" +EOF + +systemctl enable flanneld +systemctl restart flanneld + +# Kubernetes node shoud be able to resolve its hostname. +# In some cloud providers, myhostname is not enabled by default. +grep '^hosts:.*myhostname' /etc/nsswitch.conf || ( + sed -e 's/^hosts:\(.*\)/hosts:\1 myhostname/' -i /etc/nsswitch.conf +) diff --git a/cluster/openstack/kubernetes-heat/fragments/run-salt.sh b/cluster/openstack/kubernetes-heat/fragments/run-salt.sh new file mode 100644 index 00000000000..97d521eac74 --- /dev/null +++ b/cluster/openstack/kubernetes-heat/fragments/run-salt.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Copyright 2015 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit +set -o nounset +set -o pipefail + +. /etc/sysconfig/heat-params + +rm -rf /kube-install +mkdir -p /kube-install +cd /kube-install + +curl "$KUBERNETES_SERVER_URL" -o kubernetes-server.tar.gz +curl "$KUBERNETES_SALT_URL" -o kubernetes-salt.tar.gz + +tar xzf kubernetes-salt.tar.gz +./kubernetes/saltbase/install.sh kubernetes-server.tar.gz + +if ! which salt-call >/dev/null 2>&1; then + # Install salt binaries + curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s +fi + +# Salt server runs at locahost +echo "127.0.0.1 salt" >> /etc/hosts + +# Currently heat template tells a lie that the target is Vagrant. If Vagrant cloud provider is enabled, "Unable to construct api.Node object for kubelet" error will occur. +sed -e 's/{{cloud_provider}}//' -i /srv/salt/kubelet/default + +# Run salt-call +# salt-call wants to start docker daemon but is unable to. +# See . +# Run salt-call in background and make cloud-final finished. +salt-call --local state.highstate && $$wc_notify --data-binary '{"status": "SUCCESS"}' || $$wc_notify --data-binary '{"status": "FAILURE"}' & diff --git a/cluster/openstack/kubernetes-heat/fragments/write-heat-params.yaml b/cluster/openstack/kubernetes-heat/fragments/write-heat-params.yaml new file mode 100644 index 00000000000..d8ee9da0f9f --- /dev/null +++ b/cluster/openstack/kubernetes-heat/fragments/write-heat-params.yaml @@ -0,0 +1,11 @@ +#cloud-config +merge_how: dict(recurse_array)+list(append) +write_files: + - path: /etc/sysconfig/heat-params + owner: "root:root" + permissions: "0644" + content: | + KUBERNETES_SERVER_URL="$KUBERNETES_SERVER_URL" + KUBERNETES_SALT_URL="$KUBERNETES_SALT_URL" + MASTER_IP=$MASTER_IP + CONTAINER_SUBNET=10.246.0.0/16 diff --git a/cluster/openstack/kubernetes-heat/kubecluster.yaml b/cluster/openstack/kubernetes-heat/kubecluster.yaml new file mode 100644 index 00000000000..3832f4c4fda --- /dev/null +++ b/cluster/openstack/kubernetes-heat/kubecluster.yaml @@ -0,0 +1,326 @@ +heat_template_version: 2014-10-16 + +description: > + Kubernetes cluster with one master and one or more worker nodes + (as specified by the number_of_minions parameter, which defaults to 2). + +parameters: + plugin_version: + type: string + description: version of CLC plugin. It is used when you show cluster list view. + default: 1.0 + constraints: + - allowed_values: + - 1.0 + + ssh_key_name: + type: string + description: name of ssh key to be provisioned on our server + + external_network: + type: string + description: uuid/name of a network to use for floating ip addresses + default: public + + server_image: + type: string + description: glance image used to boot the server + + master_flavor: + type: string + default: m1.small + description: flavor to use when booting the server + + minion_flavor: + type: string + default: m1.small + description: flavor to use when booting the server + + dns_nameserver: + type: string + description: address of a dns nameserver reachable in your environment + default: 8.8.8.8 + + number_of_minions: + type: number + description: how many kubernetes minions to spawn initially + default: 1 + + max_number_of_minions: + type: number + description: maximum number of kubernetes minions to spawn + default: 10 + + fixed_network_cidr: + type: string + description: network range for fixed ip network + default: 10.0.0.0/24 + + kubernetes_server_url: + type: string + description: URL of kubernetes server binary. Must be tar.gz. + + kubernetes_salt_url: + type: string + description: URL of kubernetes salt scripts. Must be tar.gz. + + apiserver_user: + type: string + description: User name used for api-server + default: user + + apiserver_password: + type: string + description: Password used for api-server + default: password + + token_kubelet: + type: string + description: Token used by kubelet + default: TokenKubelet + + token_kube_proxy: + type: string + description: Token used by kube-proxy + default: TokenKubeproxy + + wait_condition_timeout: + type: number + description : > + timeout for the Wait Conditions + default: 6000 + +resources: + + master_wait_handle: + type: OS::Heat::WaitConditionHandle + + master_wait_condition: + type: OS::Heat::WaitCondition + depends_on: kube_master + properties: + handle: {get_resource: master_wait_handle} + timeout: {get_param: wait_condition_timeout} + + ###################################################################### + # + # network resources. allocate a network and router for our server. + # + + fixed_network: + type: OS::Neutron::Net + + fixed_subnet: + type: OS::Neutron::Subnet + properties: + cidr: {get_param: fixed_network_cidr} + network: {get_resource: fixed_network} + dns_nameservers: + - {get_param: dns_nameserver} + + extrouter: + type: OS::Neutron::Router + properties: + external_gateway_info: + network: {get_param: external_network} + + extrouter_inside: + type: OS::Neutron::RouterInterface + properties: + router_id: {get_resource: extrouter} + subnet: {get_resource: fixed_subnet} + + ###################################################################### + # + # security groups. we need to permit network traffic of various + # sorts. + # + + secgroup_base: + type: OS::Neutron::SecurityGroup + properties: + rules: + - protocol: icmp + - protocol: tcp + port_range_min: 22 + port_range_max: 22 + + secgroup_kubernetes: + type: OS::Neutron::SecurityGroup + properties: + rules: + - protocol: tcp # etcd for flanneld + port_range_min: 4379 + port_range_max: 4380 + - protocol: udp # flannel backend + port_range_min: 8285 + port_range_max: 8285 + - protocol: tcp # api-server + port_range_min: 443 + port_range_max: 443 + + + ###################################################################### + # + # software configs. these are components that are combined into + # a multipart MIME user-data archive. + # + + write_heat_params: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: + str_replace: + template: {get_file: fragments/write-heat-params.yaml} + params: + "$KUBERNETES_SERVER_URL": {get_param: kubernetes_server_url} + "$KUBERNETES_SALT_URL": {get_param: kubernetes_salt_url} + "$MASTER_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} + + kube_user: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/kube-user.yaml} + + provision_network_master: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/provision-network-master.sh} + + deploy_kube_auth_files_master: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: + str_replace: + template: {get_file: fragments/deploy-kube-auth-files-master.yaml} + params: + "$apiserver_user": {get_param: apiserver_user} + "$apiserver_password": {get_param: apiserver_password} + "$token_kubelet": {get_param: token_kubelet} + "$token_kube_proxy": {get_param: token_kube_proxy} + + configure_salt_master: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: + str_replace: + template: {get_file: fragments/configure-salt.yaml} + params: + "$MASTER_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} + "$role": "kubernetes-master" + + run_salt: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: + str_replace: + template: {get_file: fragments/run-salt.sh} + params: + "$$wc_notify": {get_attr: [master_wait_handle, curl_cli]} + + kube_master_init: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: write_heat_params} + - config: {get_resource: kube_user} + - config: {get_resource: provision_network_master} + - config: {get_resource: deploy_kube_auth_files_master} + - config: {get_resource: configure_salt_master} + - config: {get_resource: run_salt} + + ###################################################################### + # + # kubernetes master server. + # + + kube_master: + type: OS::Nova::Server + depends_on: + - extrouter_inside + properties: + image: {get_param: server_image} + flavor: {get_param: master_flavor} + key_name: {get_param: ssh_key_name} + user_data_format: RAW + user_data: {get_resource: kube_master_init} + networks: + - port: {get_resource: kube_master_eth0} + name: + list_join: [-, [{get_param: "OS::stack_name"}, master]] + + kube_master_eth0: + type: OS::Neutron::Port + properties: + network: {get_resource: fixed_network} + security_groups: + - {get_resource: secgroup_base} + - {get_resource: secgroup_kubernetes} + fixed_ips: + - subnet: {get_resource: fixed_subnet} + replacement_policy: AUTO + + kube_master_floating: + type: OS::Neutron::FloatingIP + properties: + floating_network: {get_param: external_network} + port_id: {get_resource: kube_master_eth0} + + ###################################################################### + # + # kubernetes minions. This is an autoscaling group that will initially + # create minions, and will scale up to + # based on CPU utilization. + # + + kube_minions: + type: OS::Heat::AutoScalingGroup + depends_on: + - extrouter_inside + - master_wait_condition + properties: + resource: + type: kubeminion.yaml + properties: + kubernetes_server_url: {get_param: kubernetes_server_url} + kubernetes_salt_url: {get_param: kubernetes_salt_url} + ssh_key_name: {get_param: ssh_key_name} + server_image: {get_param: server_image} + minion_flavor: {get_param: minion_flavor} + token_kubelet: {get_param: token_kubelet} + token_kube_proxy: {get_param: token_kube_proxy} + fixed_network: {get_resource: fixed_network} + fixed_subnet: {get_resource: fixed_subnet} + kube_master_ip: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]} + external_network: {get_param: external_network} + wait_condition_timeout: {get_param: wait_condition_timeout} + metadata: {"metering.stack": {get_param: "OS::stack_id"}} + cluster_name: {get_param: "OS::stack_name"} + min_size: {get_param: number_of_minions} + desired_capacity: {get_param: number_of_minions} + max_size: {get_param: max_number_of_minions} + +outputs: + + kube_master: + value: {get_attr: [kube_master_floating, floating_ip_address]} + description: > + This is the "public" IP address of the Kubernetes master node. Use this IP address + to log in to the Kubernetes master via ssh or to access the Kubernetes API + from outside the cluster. + + kube_minions: + value: {get_attr: [kube_minions, outputs_list, kube_minion_ip]} + description: > + Here is the list of the "private" addresses of all Kubernetes worker nodes. + + kube_minions_external: + value: {get_attr: [kube_minions, outputs_list, kube_minion_external_ip]} + description: > + Here is the list of the "public" addresses of all Kubernetes worker nodes. diff --git a/cluster/openstack/kubernetes-heat/kubeminion.yaml b/cluster/openstack/kubernetes-heat/kubeminion.yaml new file mode 100644 index 00000000000..76075c76079 --- /dev/null +++ b/cluster/openstack/kubernetes-heat/kubeminion.yaml @@ -0,0 +1,201 @@ +heat_template_version: 2014-10-16 + +description: > + This is a nested stack that defines a single Kubernetes minion, This stack is + included by an AutoScalingGroup resource in the parent template + (kubecluster.yaml). + +parameters: + + server_image: + type: string + description: glance image used to boot the server + + minion_flavor: + type: string + default: m1.small + description: flavor to use when booting the server + + ssh_key_name: + type: string + description: name of ssh key to be provisioned on our server + default: lars + + external_network: + type: string + description: uuid/name of a network to use for floating ip addresses + + kubernetes_server_url: + type: string + description: URL of kubernetes server binary. Must be tar.gz. + + kubernetes_salt_url: + type: string + description: URL of kubernetes salt scripts. Must be tar.gz. + + token_kubelet: + type: string + description: Token used by kubelet + + token_kube_proxy: + type: string + description: Token used by kube-proxy + + # The following are all generated in the parent template. + kube_master_ip: + type: string + description: IP address of the Kubernetes master server. + fixed_network: + type: string + description: Network from which to allocate fixed addresses. + fixed_subnet: + type: string + description: Subnet from which to allocate fixed addresses. + wait_condition_timeout: + type: number + description : > + timeout for the Wait Conditions + metadata: + type: json + description: metadata for ceilometer query + cluster_name: + type: string + +resources: + + minion_wait_handle: + type: OS::Heat::WaitConditionHandle + + minion_wait_condition: + type: OS::Heat::WaitCondition + depends_on: kube_minion + properties: + handle: {get_resource: minion_wait_handle} + timeout: {get_param: wait_condition_timeout} + + secgroup_all_open: + type: OS::Neutron::SecurityGroup + properties: + rules: + - protocol: icmp + - protocol: tcp + - protocol: udp + + ###################################################################### + # + # software configs. these are components that are combined into + # a multipart MIME user-data archive. + # + + write_heat_params: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: + str_replace: + template: {get_file: fragments/write-heat-params.yaml} + params: + "$KUBERNETES_SERVER_URL": {get_param: kubernetes_server_url} + "$KUBERNETES_SALT_URL": {get_param: kubernetes_salt_url} + "$MASTER_IP": {get_param: kube_master_ip} + + kube_user: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/kube-user.yaml} + + provision_network_node: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/provision-network-node.sh} + + deploy_kube_auth_files_node: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: + str_replace: + template: {get_file: fragments/deploy-kube-auth-files-node.yaml} + params: + "$token_kubelet": {get_param: token_kubelet} + "$token_kube_proxy": {get_param: token_kube_proxy} + + configure_salt_node: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: + str_replace: + template: {get_file: fragments/configure-salt.yaml} + params: + "$MASTER_IP": {get_param: kube_master_ip} + "$role": "kubernetes-pool" + + run_salt: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: + str_replace: + template: {get_file: fragments/run-salt.sh} + params: + "$$wc_notify": {get_attr: [minion_wait_handle, curl_cli]} + + kube_minion_init: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: write_heat_params} + - config: {get_resource: kube_user} + - config: {get_resource: provision_network_node} + - config: {get_resource: deploy_kube_auth_files_node} + - config: {get_resource: configure_salt_node} + - config: {get_resource: run_salt} + + ###################################################################### + # + # a single kubernetes minion. + # + server_name_post_fix: + type: OS::Heat::RandomString + properties: + length: 8 + + kube_minion: + type: OS::Nova::Server + properties: + image: {get_param: server_image} + flavor: {get_param: minion_flavor} + key_name: {get_param: ssh_key_name} + metadata: {get_param: metadata} + user_data_format: RAW + user_data: {get_resource: kube_minion_init} + networks: + - port: {get_resource: kube_minion_eth0} + name: + list_join: [-, [{get_param: cluster_name}, node, {get_resource: server_name_post_fix}]] + + kube_minion_eth0: + type: OS::Neutron::Port + properties: + network: {get_param: fixed_network} + security_groups: + - get_resource: secgroup_all_open + fixed_ips: + - subnet: {get_param: fixed_subnet} + replacement_policy: AUTO + + kube_minion_floating: + type: OS::Neutron::FloatingIP + properties: + floating_network: {get_param: external_network} + port_id: {get_resource: kube_minion_eth0} + +outputs: + + kube_minion_ip: + value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]} + kube_minion_external_ip: + value: {get_attr: [kube_minion_floating, floating_ip_address]}