Merge pull request #21737 from FujitsuEnablingSoftwareTechnologyGmbH/openstack-provider

Automatic merge from submit-queue

Openstack provider

Our pull request delivers solution to create Kubernetes cluster on the top of OpenStack. Heat OpenStack Orchestration engine describes the infrastructure for Kubernetes cluster. CentoOS images are used for Kubernetes host machines.

We tested our solution with DevStack and Citycloud provider.

We believe that our solution will fill the gap that which is on the market.

<!-- Reviewable:start -->
---
This change is [<img src="http://reviewable.k8s.io/review_button.svg" height="35" align="absmiddle" alt="Reviewable"/>](http://reviewable.k8s.io/reviews/kubernetes/kubernetes/21737)
<!-- Reviewable:end -->
This commit is contained in:
k8s-merge-robot 2016-05-10 23:56:47 -07:00
commit 17c3f19c64
28 changed files with 1632 additions and 17 deletions

View File

@ -38,6 +38,8 @@
# * export KUBERNETES_PROVIDER=photon-controller; wget -q -O - https://get.k8s.io | bash
# Rackspace
# * export KUBERNETES_PROVIDER=rackspace; wget -q -O - https://get.k8s.io | bash
# OpenStack-Heat
# * export KUBERNETES_PROVIDER=openstack-heat; wget -q -O - https://get.k8s.io | bash
#
# Set KUBERNETES_SKIP_DOWNLOAD to non-empty to skip downloading a release.
# Set KUBERNETES_SKIP_CONFIRM to skip the installation confirmation prompt.

View File

@ -64,6 +64,15 @@ elif [[ "${validate_result}" == "2" ]]; then
echo "...ignoring non-fatal errors in validate-cluster" >&2
fi
if [[ "${ENABLE_PROXY:-}" == "true" ]]; then
. /tmp/kube-proxy-env
echo ""
echo "*** Please run the following to add the kube-apiserver endpoint to your proxy white-list ***"
cat /tmp/kube-proxy-env
echo "*** ***"
echo ""
fi
echo -e "Done, listing cluster services:\n" >&2
"${KUBE_ROOT}/cluster/kubectl.sh" cluster-info
echo

View File

@ -0,0 +1,69 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Contains configuration values for the Openstack cluster
# Stack name
STACK_NAME=${STACK_NAME:-KubernetesStack}
# Keypair for kubernetes stack
KUBERNETES_KEYPAIR_NAME=${KUBERNETES_KEYPAIR_NAME:-kubernetes_keypair}
# Kubernetes release tar file
KUBERNETES_RELEASE_TAR=${KUBERNETES_RELEASE_TAR:-kubernetes-server-linux-amd64.tar.gz}
NUMBER_OF_MINIONS=${NUMBER_OF_MINIONS-3}
MAX_NUMBER_OF_MINIONS=${MAX_NUMBER_OF_MINIONS:-3}
MASTER_FLAVOR=${MASTER_FLAVOR:-m1.medium}
MINION_FLAVOR=${MINION_FLAVOR:-m1.medium}
EXTERNAL_NETWORK=${EXTERNAL_NETWORK:-public}
SWIFT_SERVER_URL=${SWIFT_SERVER_URL:-}
# Flag indicates if new image must be created. If 'false' then image with IMAGE_ID will be used.
# If 'true' then new image will be created from file config-image.sh
CREATE_IMAGE=${CREATE_IMAGE:-true} # use "true" for devstack
# Flag indicates if image should be downloaded
DOWNLOAD_IMAGE=${DOWNLOAD_IMAGE:-true}
# Image id which will be used for kubernetes stack
IMAGE_ID=${IMAGE_ID:-f0f394b1-5546-4b68-b2bc-8abe8a7e6b8b}
# DNS server address
DNS_SERVER=${DNS_SERVER:-8.8.8.8}
# Public RSA key path
CLIENT_PUBLIC_KEY_PATH=${CLIENT_PUBLIC_KEY_PATH:-~/.ssh/id_rsa.pub}
# Max time period for stack provisioning. Time in minutes.
STACK_CREATE_TIMEOUT=${STACK_CREATE_TIMEOUT:-60}
# Enable Proxy, if true kube-up will apply your current proxy settings(defined by *_PROXY environment variables) to the deployment.
ENABLE_PROXY=${ENABLE_PROXY:-false}
# Per-protocol proxy settings.
FTP_PROXY=${FTP_PROXY:-}
HTTP_PROXY=${HTTP_PROXY:-}
HTTPS_PROXY=${HTTPS_PROXY:-}
SOCKS_PROXY=${SOCKS_PROXY:-}
# IPs and Domains that bypass the proxy.
NO_PROXY=${NO_PROXY:-}

View File

@ -0,0 +1,35 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Contains configuration values for new image. It is skip when CREATE_IMAGE=false
# Image name which will be displayed in OpenStack
OPENSTACK_IMAGE_NAME=${OPENSTACK_IMAGE_NAME:-CentOS7}
# Downloaded image name for Openstack project
IMAGE_FILE=${IMAGE_FILE:-CentOS-7-x86_64-GenericCloud-1510.qcow2}
# Absolute path where image file is stored.
IMAGE_PATH=${IMAGE_PATH:-~/Downloads/openstack}
# The URL basepath for downloading the image
IMAGE_URL_PATH=${IMAGE_URL_PATH:-http://cloud.centos.org/centos/7/images}
# The disk format of the image. Acceptable formats are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso.
IMAGE_FORMAT=${IMAGE_FORMAT:-qcow2}
# The container format of the image. Acceptable formats are ami, ari, aki, bare, docker, and ovf.
CONTAINER_FORMAT=${CONTAINER_FORMAT:-bare}

View File

@ -0,0 +1,19 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Contains configuration values for interacting with the Ubuntu cluster in test mode
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/openstack-heat/config-default.sh"

View File

@ -0,0 +1,70 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# The contents of these variables swapped in by heat via environments presented to kube-up.sh
export ETC_ENVIRONMENT='FTP_PROXY=$FTP_PROXY
HTTP_PROXY=$HTTP_PROXY
HTTPS_PROXY=$HTTPS_PROXY
SOCKS_PROXY=$SOCKS_PROXY
NO_PROXY=$NO_PROXY
ftp_proxy=$FTP_PROXY
http_proxy=$HTTP_PROXY
https_proxy=$HTTPS_PROXY
socks_proxy=$SOCKS_PROXY
no_proxy=$NO_PROXY
'
export ETC_PROFILE_D='export FTP_PROXY=$FTP_PROXY
export HTTP_PROXY=$HTTP_PROXY
export HTTPS_PROXY=$HTTPS_PROXY
export SOCKS_PROXY=$SOCKS_PROXY
export NO_PROXY=$NO_PROXY
export ftp_proxy=$FTP_PROXY
export http_proxy=$HTTP_PROXY
export https_proxy=$HTTPS_PROXY
export socks_proxy=$SOCKS_PROXY
export no_proxy=$NO_PROXY
'
export DOCKER_PROXY='[Service]
Environment="HTTP_PROXY=$HTTP_PROXY"
Environment="HTTPS_PROXY=$HTTPS_PROXY"
Environment="SOCKS_PROXY=$SOCKS_PROXY"
Environment="NO_PROXY=$NO_PROXY"
Environment="ftp_proxy=$FTP_PROXY"
Environment="http_proxy=$HTTP_PROXY"
Environment="https_proxy=$HTTPS_PROXY"
Environment="socks_proxy=$SOCKS_PROXY"
Environment="no_proxy=$NO_PROXY"
'
# This again is set by heat
ENABLE_PROXY='$ENABLE_PROXY'
# Heat itself doesn't have conditionals, so this is how we set up our proxy without breaking non-proxy setups.
if [[ "${ENABLE_PROXY}" == "true" ]]; then
mkdir -p /etc/systemd/system/docker.service.d/
echo "${ETC_ENVIRONMENT}" >> /etc/environment
echo "${ETC_PROFILE_D}" > /etc/profile.d/proxy_config.sh
echo "${DOCKER_PROXY}" > etc/systemd/system/docker.service.d/http-proxy.conf
echo "proxy=$HTTP_PROXY" >> /etc/yum.conf
fi

View File

@ -0,0 +1,56 @@
#cloud-config
merge_how: dict(recurse_array)+list(append)
bootcmd:
- mkdir -p /etc/salt/minion.d
- mkdir -p /srv/salt-overlay/pillar
write_files:
- path: /etc/salt/minion.d/log-level-debug.conf
content: |
log_level: warning
log_level_logfile: warning
- path: /etc/salt/minion.d/grains.conf
content: |
grains:
node_ip: $MASTER_IP
publicAddressOverride: $MASTER_IP
network_mode: openvswitch
networkInterfaceName: eth0
api_servers: $MASTER_IP
cloud: openstack
cloud_config: /srv/kubernetes/openstack.conf
roles:
- $role
runtime_config: ""
docker_opts: ""
master_extra_sans: "DNS:kubernetes,DNS:kubernetes.default,DNS:kubernetes.default.svc,DNS:kubernetes.default.svc.cluster.local,DNS:kubernetes-master"
keep_host_etcd: true
- path: /srv/kubernetes/openstack.conf
content: |
[Global]
auth-url=$OS_AUTH_URL
username=$OS_USERNAME
password=$OS_PASSWORD
region=$OS_REGION_NAME
tenant-id=$OS_TENANT_ID
- path: /srv/salt-overlay/pillar/cluster-params.sls
content: |
service_cluster_ip_range: 10.246.0.0/16
cert_ip: 10.246.0.1
enable_cluster_monitoring: influxdb
enable_cluster_logging: "true"
enable_cluster_ui: "true"
enable_node_logging: "true"
logging_destination: elasticsearch
elasticsearch_replicas: "1"
enable_cluster_dns: "true"
dns_replicas: "1"
dns_server: 10.246.0.10
dns_domain: cluster.local
instance_prefix: kubernetes
admission_control: NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,ResourceQuota
enable_cpu_cfs_quota: "true"
network_provider: none
opencontrail_tag: R2.20
opencontrail_kubernetes_tag: master
opencontrail_public_subnet: 10.1.0.0/16
e2e_storage_test_environment: "false"

View File

@ -0,0 +1,43 @@
#cloud-config
merge_how: dict(recurse_array)+list(append)
bootcmd:
- mkdir -p /srv/salt-overlay/salt/kube-apiserver
- mkdir -p /srv/salt-overlay/salt/kubelet
write_files:
- path: /srv/salt-overlay/salt/kube-apiserver/basic_auth.csv
permissions: "0600"
content: |
$apiserver_password,$apiserver_user,admin
- path: /srv/salt-overlay/salt/kube-apiserver/known_tokens.csv
permissions: "0600"
content: |
$token_kubelet,kubelet,kubelet
$token_kube_proxy,kube_proxy,kube_proxy
TokenSystemScheduler,system:scheduler,system:scheduler
TokenSystemControllerManager,system:controller_manager,system:controller_manager
TokenSystemLogging,system:logging,system:logging
TokenSystemMonitoring,system:monitoring,system:monitoring
TokenSystemDns,system:dns,system:dns
- path: /srv/salt-overlay/salt/kubelet/kubernetes_auth
permissions: "0600"
content: |
{"BearerToken": "$token_kubelet", "Insecure": true }
- path: /srv/salt-overlay/salt/kubelet/kubeconfig
permissions: "0600"
content: |
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
token: $token_kubelet
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context

View File

@ -0,0 +1,44 @@
#cloud-config
merge_how: dict(recurse_array)+list(append)
bootcmd:
- mkdir -p /srv/salt-overlay/salt/kubelet
- mkdir -p /srv/salt-overlay/salt/kube-proxy
write_files:
- path: /srv/salt-overlay/salt/kubelet/kubeconfig
permissions: "0600"
content: |
apiVersion: v1
kind: Config
users:
- name: kubelet
user:
token: $token_kubelet
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
contexts:
- context:
cluster: local
user: kubelet
name: service-account-context
current-context: service-account-context
- path: /srv/salt-overlay/salt/kube-proxy/kubeconfig
permissions: "0600"
content: |
apiVersion: v1
kind: Config
users:
- name: kube-proxy
user:
token: $token_kube_proxy
clusters:
- name: local
cluster:
insecure-skip-tls-verify: true
contexts:
- context:
cluster: local
user: kube-proxy
name: service-account-context
current-context: service-account-context

View File

@ -0,0 +1,23 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
# Workaround for this, which has been fixed but not widely distributed: https://bugs.launchpad.net/cloud-init/+bug/1246485
# See also http://blog.oddbit.com/2014/12/10/cloudinit-and-the-case-of-the-changing-hostname/
hostname > /etc/hostname

View File

@ -0,0 +1,9 @@
#cloud-config
merge_how: dict(recurse_array)+list(append)
write_files:
- path: /etc/cloud/cloud.cfg.d/99_hostname.cfg
owner: "root:root"
permissions: "0644"
content: |
preserve_hostname: true

View File

@ -0,0 +1,10 @@
#cloud-config
system_info:
default_user:
name: minion
lock_passwd: true
gecos: Kubernetes Interactive User
groups: [wheel, adm, systemd-journal]
sudo: ["ALL=(ALL) NOPASSWD:ALL"]
shell: /bin/bash

View File

@ -0,0 +1,66 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
. /etc/sysconfig/heat-params
FLANNEL_ETCD_URL="http://${MASTER_IP}:4379"
# Install etcd for flannel data
if ! which etcd > /dev/null 2>&1; then
yum install -y etcd
fi
cat <<EOF > /etc/etcd/etcd.conf
ETCD_NAME=flannel
ETCD_DATA_DIR="/var/lib/etcd/flannel.etcd"
ETCD_LISTEN_PEER_URLS="http://${MASTER_IP}:4380"
ETCD_LISTEN_CLIENT_URLS="http://${MASTER_IP}:4379"
ETCD_INITIAL_ADVERTISE_PEER_URLS="http://${MASTER_IP}:4380"
ETCD_INITIAL_CLUSTER="flannel=http://${MASTER_IP}:4380"
ETCD_ADVERTISE_CLIENT_URLS="${FLANNEL_ETCD_URL}"
EOF
systemctl enable etcd
systemctl restart etcd
# Install flannel for overlay
if ! which flanneld > /dev/null 2>&1; then
yum install -y flannel
fi
cat <<EOF > /etc/flannel-config.json
{
"Network": "${CONTAINER_SUBNET}",
"SubnetLen": 24,
"Backend": {
"Type": "host-gw"
}
}
EOF
etcdctl -C ${FLANNEL_ETCD_URL} set /coreos.com/network/config < /etc/flannel-config.json
cat <<EOF > /etc/sysconfig/flanneld
FLANNEL_ETCD="${FLANNEL_ETCD_URL}"
FLANNEL_ETCD_KEY="/coreos.com/network"
FLANNEL_OPTIONS="-iface=eth0 --ip-masq"
EOF
systemctl enable flanneld
systemctl restart flanneld

View File

@ -0,0 +1,43 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
. /etc/sysconfig/heat-params
FLANNEL_ETCD_URL="http://${MASTER_IP}:4379"
# Install flannel for overlay
if ! which flanneld >/dev/null 2>&1; then
yum install -y flannel
fi
cat <<EOF >/etc/sysconfig/flanneld
FLANNEL_ETCD="${FLANNEL_ETCD_URL}"
FLANNEL_ETCD_KEY="/coreos.com/network"
FLANNEL_OPTIONS="-iface=eth0 --ip-masq"
EOF
systemctl enable flanneld
systemctl restart flanneld
# Kubernetes node shoud be able to resolve its hostname.
# In some cloud providers, myhostname is not enabled by default.
grep '^hosts:.*myhostname' /etc/nsswitch.conf || (
sed -e 's/^hosts:\(.*\)/hosts:\1 myhostname/' -i /etc/nsswitch.conf
)

View File

@ -0,0 +1,51 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
. /etc/sysconfig/heat-params
#Reads in profile, need to relax restrictions for some OSes.
set +o nounset
. /etc/profile
set -o nounset
rm -rf /kube-install
mkdir -p /kube-install
cd /kube-install
curl "${KUBERNETES_SERVER_URL}" -o kubernetes-server.tar.gz
curl "${KUBERNETES_SALT_URL}" -o kubernetes-salt.tar.gz
tar xzf kubernetes-salt.tar.gz
./kubernetes/saltbase/install.sh kubernetes-server.tar.gz
if ! which salt-call >/dev/null 2>&1; then
# Install salt binaries
curl -sS -L --connect-timeout 20 --retry 6 --retry-delay 10 https://bootstrap.saltstack.com | sh -s
fi
# Salt server runs at locahost
echo "127.0.0.1 salt" >> /etc/hosts
# Run salt-call
# salt-call wants to start docker daemon but is unable to.
# See <https://github.com/projectatomic/docker-storage-setup/issues/77>.
# Run salt-call in background and make cloud-final finished.
# Salt-call might be unstable in some environments, execute it twice.
salt-call --local state.highstate && salt-call --local state.highstate && $$wc_notify --data-binary '{"status": "SUCCESS"}' || $$wc_notify --data-binary '{"status": "FAILURE"}' &

View File

@ -0,0 +1,11 @@
#cloud-config
merge_how: dict(recurse_array)+list(append)
write_files:
- path: /etc/sysconfig/heat-params
owner: "root:root"
permissions: "0644"
content: |
KUBERNETES_SERVER_URL="$KUBERNETES_SERVER_URL"
KUBERNETES_SALT_URL="$KUBERNETES_SALT_URL"
MASTER_IP=$MASTER_IP
CONTAINER_SUBNET=10.246.0.0/16

View File

@ -0,0 +1,425 @@
heat_template_version: 2014-10-16
description: >
Kubernetes cluster with one master and one or more worker nodes
(as specified by the number_of_minions parameter, which defaults to 3).
parameters:
ssh_key_name:
type: string
description: name of ssh key to be provisioned on our server
external_network:
type: string
description: uuid/name of a network to use for floating ip addresses
default: public
server_image:
type: string
description: glance image used to boot the server
master_flavor:
type: string
default: m1.small
description: flavor to use when booting the server
minion_flavor:
type: string
default: m1.small
description: flavor to use when booting the server
dns_nameserver:
type: string
description: address of a dns nameserver reachable in your environment
default: 8.8.8.8
number_of_minions:
type: number
description: how many kubernetes minions to spawn initially
default: 3
max_number_of_minions:
type: number
description: maximum number of kubernetes minions to spawn
default: 10
fixed_network_cidr:
type: string
description: network range for fixed ip network
default: 10.0.0.0/24
kubernetes_server_url:
type: string
description: URL of kubernetes server binary. Must be tar.gz.
kubernetes_salt_url:
type: string
description: URL of kubernetes salt scripts. Must be tar.gz.
apiserver_user:
type: string
description: User name used for api-server
default: user
apiserver_password:
type: string
description: Password used for api-server
default: password
token_kubelet:
type: string
description: Token used by kubelet
default: TokenKubelet
token_kube_proxy:
type: string
description: Token used by kube-proxy
default: TokenKubeproxy
wait_condition_timeout:
type: number
description : >
timeout for the Wait Conditions
default: 6000
os_auth_url:
type: string
description: OpenStack Auth URL
default: false
os_username:
type: string
description: OpenStack Username
default: false
os_password:
type: string
description: OpenStack Password
default: false
os_region_name:
type: string
description: OpenStack Region Name
default: false
os_tenant_id:
type: string
description: OpenStack Tenant ID
default: false
enable_proxy:
type: string
description: Whether or not to enable proxy settings
default: false
ftp_proxy:
type: string
description: FTP Proxy URL
default: localhost
http_proxy:
type: string
description: HTTP Proxy URL
default: localhost
https_proxy:
type: string
description: HTTPS Proxy URL
default: localhost
socks_proxy:
type: string
description: SOCKS Proxy URL
default: localhost
no_proxy:
type: string
description: Comma seperated list of domains/addresses that bypass proxying.
default: localhost
resources:
master_wait_handle:
type: OS::Heat::WaitConditionHandle
master_wait_condition:
type: OS::Heat::WaitCondition
depends_on: kube_master
properties:
handle: {get_resource: master_wait_handle}
timeout: {get_param: wait_condition_timeout}
######################################################################
#
# network resources. allocate a network and router for our server.
#
fixed_network:
type: OS::Neutron::Net
fixed_subnet:
type: OS::Neutron::Subnet
properties:
cidr: {get_param: fixed_network_cidr}
network: {get_resource: fixed_network}
dns_nameservers:
- {get_param: dns_nameserver}
extrouter:
type: OS::Neutron::Router
properties:
external_gateway_info:
network: {get_param: external_network}
extrouter_inside:
type: OS::Neutron::RouterInterface
properties:
router_id: {get_resource: extrouter}
subnet: {get_resource: fixed_subnet}
######################################################################
#
# security groups. we need to permit network traffic of various
# sorts.
#
secgroup_base:
type: OS::Neutron::SecurityGroup
properties:
rules:
- protocol: icmp
- protocol: tcp
port_range_min: 22
port_range_max: 22
- remote_mode: remote_group_id
secgroup_master:
type: OS::Neutron::SecurityGroup
properties:
rules:
- protocol: tcp # api-server
port_range_min: 443
port_range_max: 443
secgroup_node:
type: OS::Neutron::SecurityGroup
properties:
rules:
- protocol: icmp
- protocol: tcp
- protocol: udp
######################################################################
#
# software configs. these are components that are combined into
# a multipart MIME user-data archive.
#
write_heat_params:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: {get_file: fragments/write-heat-params.yaml}
params:
"$KUBERNETES_SERVER_URL": {get_param: kubernetes_server_url}
"$KUBERNETES_SALT_URL": {get_param: kubernetes_salt_url}
"$MASTER_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}
proxy_config:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: {get_file: fragments/configure-proxy.sh}
params:
"$ENABLE_PROXY": {get_param: enable_proxy }
"$FTP_PROXY": {get_param: ftp_proxy }
"$HTTP_PROXY": {get_param: http_proxy }
"$HTTPS_PROXY": {get_param: https_proxy }
"$SOCKS_PROXY": {get_param: socks_proxy }
"$NO_PROXY": {get_param: no_proxy }
hostname_hack:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/hostname-hack.yaml}
hostname_hack_script:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/hostname-hack.sh}
kube_user:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/kube-user.yaml}
provision_network_master:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/provision-network-master.sh}
deploy_kube_auth_files_master:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: {get_file: fragments/deploy-kube-auth-files-master.yaml}
params:
"$apiserver_user": {get_param: apiserver_user}
"$apiserver_password": {get_param: apiserver_password}
"$token_kubelet": {get_param: token_kubelet}
"$token_kube_proxy": {get_param: token_kube_proxy}
configure_salt_master:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: {get_file: fragments/configure-salt.yaml}
params:
"$MASTER_IP": {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}
"$OS_AUTH_URL": {get_param: os_auth_url}
"$OS_USERNAME": {get_param: os_username}
"$OS_PASSWORD": {get_param: os_password}
"$OS_REGION_NAME": {get_param: os_region_name}
"$OS_TENANT_ID": {get_param: os_tenant_id}
"$role": "kubernetes-master"
run_salt:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: {get_file: fragments/run-salt.sh}
params:
"$$wc_notify": {get_attr: [master_wait_handle, curl_cli]}
kube_master_init:
type: OS::Heat::MultipartMime
properties:
parts:
- config: {get_resource: write_heat_params}
- config: {get_resource: proxy_config}
- config: {get_resource: hostname_hack}
- config: {get_resource: hostname_hack_script}
- config: {get_resource: kube_user}
- config: {get_resource: provision_network_master}
- config: {get_resource: deploy_kube_auth_files_master}
- config: {get_resource: configure_salt_master}
- config: {get_resource: run_salt}
######################################################################
#
# kubernetes master server.
#
kube_master:
type: OS::Nova::Server
depends_on:
- extrouter_inside
properties:
image: {get_param: server_image}
flavor: {get_param: master_flavor}
key_name: {get_param: ssh_key_name}
user_data_format: RAW
user_data: {get_resource: kube_master_init}
networks:
- port: {get_resource: kube_master_eth0}
name:
list_join: [-, [{get_param: "OS::stack_name"}, master]]
kube_master_eth0:
type: OS::Neutron::Port
properties:
network: {get_resource: fixed_network}
security_groups:
- {get_resource: secgroup_base}
- {get_resource: secgroup_master}
fixed_ips:
- subnet: {get_resource: fixed_subnet}
allowed_address_pairs:
- ip_address: 10.246.0.0/16
replacement_policy: AUTO
kube_master_floating:
type: OS::Neutron::FloatingIP
properties:
floating_network: {get_param: external_network}
port_id: {get_resource: kube_master_eth0}
######################################################################
#
# kubernetes minions. This is an autoscaling group that will initially
# create <number_of_minions> minions, and will scale up to
# <max_number_of_minions> based on CPU utilization.
#
kube_minions:
type: OS::Heat::AutoScalingGroup
depends_on:
- extrouter_inside
- master_wait_condition
properties:
resource:
type: kubeminion.yaml
properties:
kubernetes_server_url: {get_param: kubernetes_server_url}
kubernetes_salt_url: {get_param: kubernetes_salt_url}
ssh_key_name: {get_param: ssh_key_name}
server_image: {get_param: server_image}
minion_flavor: {get_param: minion_flavor}
token_kubelet: {get_param: token_kubelet}
token_kube_proxy: {get_param: token_kube_proxy}
fixed_network: {get_resource: fixed_network}
fixed_subnet: {get_resource: fixed_subnet}
kube_master_ip: {get_attr: [kube_master_eth0, fixed_ips, 0, ip_address]}
external_network: {get_param: external_network}
wait_condition_timeout: {get_param: wait_condition_timeout}
metadata: {"metering.stack": {get_param: "OS::stack_id"}}
cluster_name: {get_param: "OS::stack_name"}
secgroup_base: {get_resource: secgroup_base}
secgroup_node: {get_resource: secgroup_node}
os_auth_url: {get_param: os_auth_url}
os_username: {get_param: os_username}
os_password: {get_param: os_password}
os_region_name: {get_param: os_region_name}
os_tenant_id: {get_param: os_tenant_id}
enable_proxy: {get_param: enable_proxy }
ftp_proxy: {get_param: ftp_proxy }
http_proxy: {get_param: http_proxy }
https_proxy: {get_param: https_proxy }
socks_proxy: {get_param: socks_proxy }
no_proxy: {get_param: no_proxy }
min_size: {get_param: number_of_minions}
desired_capacity: {get_param: number_of_minions}
max_size: {get_param: max_number_of_minions}
outputs:
kube_master:
value: {get_attr: [kube_master_floating, floating_ip_address]}
description: >
This is the "public" IP address of the Kubernetes master node. Use this IP address
to log in to the Kubernetes master via ssh or to access the Kubernetes API
from outside the cluster.
kube_minions:
value: {get_attr: [kube_minions, outputs_list, kube_minion_ip]}
description: >
Here is the list of the "private" addresses of all Kubernetes worker nodes.
kube_minions_external:
value: {get_attr: [kube_minions, outputs_list, kube_minion_external_ip]}
description: >
Here is the list of the "public" addresses of all Kubernetes worker nodes.

View File

@ -0,0 +1,290 @@
heat_template_version: 2014-10-16
description: >
This is a nested stack that defines a single Kubernetes minion, This stack is
included by an AutoScalingGroup resource in the parent template
(kubecluster.yaml).
parameters:
server_image:
type: string
description: glance image used to boot the server
minion_flavor:
type: string
default: m1.small
description: flavor to use when booting the server
ssh_key_name:
type: string
description: name of ssh key to be provisioned on our server
default: lars
external_network:
type: string
description: uuid/name of a network to use for floating ip addresses
kubernetes_server_url:
type: string
description: URL of kubernetes server binary. Must be tar.gz.
kubernetes_salt_url:
type: string
description: URL of kubernetes salt scripts. Must be tar.gz.
token_kubelet:
type: string
description: Token used by kubelet
token_kube_proxy:
type: string
description: Token used by kube-proxy
os_auth_url:
type: string
description: OpenStack Auth URL
default: false
os_username:
type: string
description: OpenStack Username
default: false
os_password:
type: string
description: OpenStack Password
default: false
os_region_name:
type: string
description: OpenStack Region Name
default: false
os_tenant_id:
type: string
description: OpenStack Tenant ID
default: false
enable_proxy:
type: string
description: Whether or not to enable proxy settings
default: false
ftp_proxy:
type: string
description: FTP Proxy URL
default: localhost
http_proxy:
type: string
description: HTTP Proxy URL
default: localhost
https_proxy:
type: string
description: HTTPS Proxy URL
default: localhost
socks_proxy:
type: string
description: SOCKS Proxy URL
default: localhost
no_proxy:
type: string
description: Comma seperated list of domains/addresses that bypass proxying.
default: localhost
# The following are all generated in the parent template.
kube_master_ip:
type: string
description: IP address of the Kubernetes master server.
fixed_network:
type: string
description: Network from which to allocate fixed addresses.
fixed_subnet:
type: string
description: Subnet from which to allocate fixed addresses.
wait_condition_timeout:
type: number
description : >
timeout for the Wait Conditions
metadata:
type: json
description: metadata for ceilometer query
cluster_name:
type: string
secgroup_base:
type: string
secgroup_node:
type: string
resources:
minion_wait_handle:
type: OS::Heat::WaitConditionHandle
minion_wait_condition:
type: OS::Heat::WaitCondition
depends_on: kube_minion
properties:
handle: {get_resource: minion_wait_handle}
timeout: {get_param: wait_condition_timeout}
######################################################################
#
# software configs. these are components that are combined into
# a multipart MIME user-data archive.
#
write_heat_params:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: {get_file: fragments/write-heat-params.yaml}
params:
"$KUBERNETES_SERVER_URL": {get_param: kubernetes_server_url}
"$KUBERNETES_SALT_URL": {get_param: kubernetes_salt_url}
"$MASTER_IP": {get_param: kube_master_ip}
proxy_config:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: {get_file: fragments/configure-proxy.sh}
params:
"$ENABLE_PROXY": {get_param: enable_proxy }
"$FTP_PROXY": {get_param: ftp_proxy }
"$HTTP_PROXY": {get_param: http_proxy }
"$HTTPS_PROXY": {get_param: https_proxy }
"$SOCKS_PROXY": {get_param: socks_proxy }
"$NO_PROXY": {get_param: no_proxy }
hostname_hack:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/hostname-hack.yaml}
hostname_hack_script:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/hostname-hack.sh}
kube_user:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/kube-user.yaml}
provision_network_node:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config: {get_file: fragments/provision-network-node.sh}
deploy_kube_auth_files_node:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: {get_file: fragments/deploy-kube-auth-files-node.yaml}
params:
"$token_kubelet": {get_param: token_kubelet}
"$token_kube_proxy": {get_param: token_kube_proxy}
configure_salt_node:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: {get_file: fragments/configure-salt.yaml}
params:
"$MASTER_IP": {get_param: kube_master_ip}
"$OS_AUTH_URL": {get_param: os_auth_url}
"$OS_USERNAME": {get_param: os_username}
"$OS_PASSWORD": {get_param: os_password}
"$OS_REGION_NAME": {get_param: os_region_name}
"$OS_TENANT_ID": {get_param: os_tenant_id}
"$role": "kubernetes-pool"
run_salt:
type: OS::Heat::SoftwareConfig
properties:
group: ungrouped
config:
str_replace:
template: {get_file: fragments/run-salt.sh}
params:
"$$wc_notify": {get_attr: [minion_wait_handle, curl_cli]}
kube_minion_init:
type: OS::Heat::MultipartMime
properties:
parts:
- config: {get_resource: write_heat_params}
- config: {get_resource: proxy_config}
- config: {get_resource: hostname_hack}
- config: {get_resource: hostname_hack_script}
- config: {get_resource: kube_user}
- config: {get_resource: provision_network_node}
- config: {get_resource: deploy_kube_auth_files_node}
- config: {get_resource: configure_salt_node}
- config: {get_resource: run_salt}
######################################################################
#
# a single kubernetes minion.
#
server_name_post_fix:
type: OS::Heat::RandomString
properties:
length: 8
kube_minion:
type: OS::Nova::Server
properties:
image: {get_param: server_image}
flavor: {get_param: minion_flavor}
key_name: {get_param: ssh_key_name}
metadata: {get_param: metadata}
user_data_format: RAW
user_data: {get_resource: kube_minion_init}
networks:
- port: {get_resource: kube_minion_eth0}
name:
list_join: [-, [{get_param: cluster_name}, node, {get_resource: server_name_post_fix}]]
kube_minion_eth0:
type: OS::Neutron::Port
properties:
network: {get_param: fixed_network}
security_groups:
- {get_param: secgroup_base}
- {get_param: secgroup_node}
fixed_ips:
- subnet: {get_param: fixed_subnet}
allowed_address_pairs:
- ip_address: 10.246.0.0/16
replacement_policy: AUTO
kube_minion_floating:
type: OS::Neutron::FloatingIP
properties:
floating_network: {get_param: external_network}
port_id: {get_resource: kube_minion_eth0}
outputs:
kube_minion_ip:
value: {get_attr: [kube_minion_eth0, fixed_ips, 0, ip_address]}
kube_minion_external_ip:
value: {get_attr: [kube_minion_floating, floating_ip_address]}

View File

@ -0,0 +1,26 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Enviroment variables for the OpenStack command-line client
## Values set via an openrc will override these defaults.
export OS_IDENTITY_API_VERSION=${OS_IDENTITY_API_VERSION:-2.0}
export OS_USERNAME=${OS_USERNAME:-admin}
export OS_PASSWORD=${OS_PASSWORD:-secretsecret}
export OS_AUTH_URL=${OS_AUTH_URL:-http://192.168.123.100:5000/v2.0}
export OS_TENANT_NAME=${OS_TENANT_NAME:-admin}
export OS_TENANT_ID=${OS_TENANT_ID:-ed51b98b40944d89a449592eb67431eb}
export OS_REGION_NAME=${OS_REGION_NAME:-RegionOne}

View File

@ -0,0 +1,27 @@
#!/usr/bin/env bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
## Enviroment variables for the OpenStack Swift command-line client. This is required for CityCloud
## provider where Swift has different credentials. When Swift is part of your OpenStack do not
## modify these settings.
export OS_IDENTITY_API_VERSION=${OS_IDENTITY_API_VERSION:-2.0}
export OS_USERNAME=${OS_USERNAME:-admin}
export OS_PASSWORD=${OS_PASSWORD:-secretsecret}
export OS_AUTH_URL=${OS_AUTH_URL:-http://192.168.123.100:5000/v2.0}
export OS_TENANT_NAME=${OS_TENANT_NAME:-admin}
export OS_TENANT_ID=${OS_TENANT_ID:-ed51b98b40944d89a449592eb67431eb}
export OS_REGION_NAME=${OS_REGION_NAME:-RegionOne}

View File

@ -0,0 +1,273 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# A library of helper functions that each provider hosting Kubernetes must implement to use cluster/kube-*.sh scripts.
# exit on any error
set -e
# Use the config file specified in $KUBE_CONFIG_FILE, or default to
# config-default.sh.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
readonly ROOT=$(dirname "${BASH_SOURCE}")
source "${ROOT}/${KUBE_CONFIG_FILE:-"config-default.sh"}"
source "${KUBE_ROOT}/cluster/common.sh"
if [ $CREATE_IMAGE = true ]; then
source "${ROOT}/config-image.sh"
fi
# Verify prereqs on host machine
function verify-prereqs() {
# Check the OpenStack command-line clients
for client in swift glance nova openstack;
do
if which $client >/dev/null 2>&1; then
echo "${client} client installed"
else
echo "${client} client does not exist"
echo "Please install ${client} client, and retry."
exit 1
fi
done
}
# Instantiate a kubernetes cluster
#
# Assumed vars:
# KUBERNETES_PROVIDER
function kube-up() {
echo "kube-up for provider ${KUBERNETES_PROVIDER}"
create-stack
}
# Periodically checks if cluster is created
#
# Assumed vars:
# STACK_CREATE_TIMEOUT
# STACK_NAME
function validate-cluster() {
while (( --$STACK_CREATE_TIMEOUT >= 0)) ;do
local status=$(openstack stack show "${STACK_NAME}" | awk '$2=="stack_status" {print $4}')
if [[ $status ]]; then
echo "Cluster status ${status}"
if [ $status = "CREATE_COMPLETE" ]; then
configure-kubectl
break
elif [ $status = "CREATE_FAILED" ]; then
echo "Cluster not created. Please check stack logs to find the problem"
break
fi
else
echo "Cluster not created. Please verify if process started correctly"
break
fi
sleep 60
done
}
# Create stack
#
# Assumed vars:
# OPENSTACK
# OPENSTACK_TEMP
# DNS_SERVER
# OPENSTACK_IP
# OPENRC_FILE
function create-stack() {
echo "[INFO] Execute commands to create Kubernetes cluster"
# It is required for some cloud provider like CityCloud where swift client has different credentials
source "${ROOT}/openrc-swift.sh"
upload-resources
source "${ROOT}/openrc-default.sh"
create-glance-image
add-keypair
run-heat-script
}
# Upload kubernetes release tars and heat templates.
#
# Assumed vars:
# ROOT
# KUBERNETES_RELEASE_TAR
function upload-resources() {
swift post kubernetes --read-acl '.r:*,.rlistings'
echo "[INFO] Upload ${KUBERNETES_RELEASE_TAR}"
swift upload kubernetes ${ROOT}/../../_output/release-tars/${KUBERNETES_RELEASE_TAR} \
--object-name kubernetes-server.tar.gz
echo "[INFO] Upload kubernetes-salt.tar.gz"
swift upload kubernetes ${ROOT}/../../_output/release-tars/kubernetes-salt.tar.gz \
--object-name kubernetes-salt.tar.gz
}
# Create a new key pair for use with servers.
#
# Assumed vars:
# KUBERNETES_KEYPAIR_NAME
# CLIENT_PUBLIC_KEY_PATH
function add-keypair() {
local status=$(nova keypair-show ${KUBERNETES_KEYPAIR_NAME})
if [[ ! $status ]]; then
nova keypair-add ${KUBERNETES_KEYPAIR_NAME} --pub-key ${CLIENT_PUBLIC_KEY_PATH}
echo "[INFO] Key pair created"
else
echo "[INFO] Key pair already exists"
fi
}
# Create a new glance image.
#
# Assumed vars:
# IMAGE_FILE
# IMAGE_PATH
# OPENSTACK_IMAGE_NAME
function create-glance-image() {
if [[ ${CREATE_IMAGE} == "true" ]]; then
local image_status=$(openstack image show ${OPENSTACK_IMAGE_NAME} | awk '$2=="id" {print $4}')
if [[ ! $image_status ]]; then
if [[ "${DOWNLOAD_IMAGE}" == "true" ]]; then
mkdir -p ${IMAGE_PATH}
curl -L ${IMAGE_URL_PATH}/${IMAGE_FILE} -o ${IMAGE_PATH}/${IMAGE_FILE} -z ${IMAGE_PATH}/${IMAGE_FILE}
fi
echo "[INFO] Create image ${OPENSTACK_IMAGE_NAME}"
glance image-create --name ${OPENSTACK_IMAGE_NAME} --disk-format ${IMAGE_FORMAT} \
--container-format ${CONTAINER_FORMAT} --file ${IMAGE_PATH}/${IMAGE_FILE}
else
echo "[INFO] Image ${OPENSTACK_IMAGE_NAME} already exists"
fi
fi
}
# Create a new kubernetes stack.
#
# Assumed vars:
# STACK_NAME
# KUBERNETES_KEYPAIR_NAME
# DNS_SERVER
# SWIFT_SERVER_URL
# OPENSTACK_IMAGE_NAME
# EXTERNAL_NETWORK
# IMAGE_ID
# MASTER_FLAVOR
# MINION_FLAVOR
# NUMBER_OF_MINIONS
# MAX_NUMBER_OF_MINIONS
# DNS_SERVER
# STACK_NAME
function run-heat-script() {
local stack_status=$(openstack stack show ${STACK_NAME})
# Automatically detect swift url if it wasn't specified
if [[ -z $SWIFT_SERVER_URL ]]; then
SWIFT_SERVER_URL=$(openstack catalog show object-store --format value | egrep -o "publicURL: (.+)$" | cut -d" " -f2)
fi
local swift_repo_url="${SWIFT_SERVER_URL}/kubernetes"
if [ $CREATE_IMAGE = true ]; then
echo "[INFO] Retrieve new image ID"
IMAGE_ID=$(openstack image show ${OPENSTACK_IMAGE_NAME} | awk '$2=="id" {print $4}')
echo "[INFO] Image Id ${IMAGE_ID}"
fi
if [[ ! $stack_status ]]; then
echo "[INFO] Create stack ${STACK_NAME}"
(
cd ${ROOT}/kubernetes-heat
openstack stack create --timeout 60 \
--parameter external_network=${EXTERNAL_NETWORK} \
--parameter ssh_key_name=${KUBERNETES_KEYPAIR_NAME} \
--parameter server_image=${IMAGE_ID} \
--parameter master_flavor=${MASTER_FLAVOR} \
--parameter minion_flavor=${MINION_FLAVOR} \
--parameter number_of_minions=${NUMBER_OF_MINIONS} \
--parameter max_number_of_minions=${MAX_NUMBER_OF_MINIONS} \
--parameter dns_nameserver=${DNS_SERVER} \
--parameter kubernetes_salt_url=${swift_repo_url}/kubernetes-salt.tar.gz \
--parameter kubernetes_server_url=${swift_repo_url}/kubernetes-server.tar.gz \
--parameter os_auth_url=${OS_AUTH_URL} \
--parameter os_username=${OS_USERNAME} \
--parameter os_password=${OS_PASSWORD} \
--parameter os_region_name=${OS_REGION_NAME} \
--parameter os_tenant_id=${OS_TENANT_ID} \
--parameter enable_proxy=${ENABLE_PROXY} \
--parameter ftp_proxy="${FTP_PROXY}" \
--parameter http_proxy="${HTTP_PROXY}" \
--parameter https_proxy="${HTTPS_PROXY}" \
--parameter socks_proxy="${SOCKS_PROXY}" \
--parameter no_proxy="${NO_PROXY}" \
--template kubecluster.yaml \
${STACK_NAME}
)
else
echo "[INFO] Stack ${STACK_NAME} already exists"
openstack stack show ${STACK_NAME}
fi
}
# Configure kubectl.
#
# Assumed vars:
# STACK_NAME
function configure-kubectl() {
export KUBE_MASTER_IP=$(nova show "${STACK_NAME}"-master | awk '$3=="network" {print $6}')
export CONTEXT="openstack-${STACK_NAME}"
export KUBE_BEARER_TOKEN="TokenKubelet"
if [[ "${ENABLE_PROXY:-}" == "true" ]]; then
echo 'export NO_PROXY=$NO_PROXY,'"${KUBE_MASTER_IP}" > /tmp/kube-proxy-env
echo 'export no_proxy=$NO_PROXY,'"${KUBE_MASTER_IP}" >> /tmp/kube-proxy-env
. /tmp/kube-proxy-env
fi
create-kubeconfig
}
# Delete a kubernetes cluster
#
# Assumed vars:
# STACK_NAME
function kube-down {
source "${ROOT}/openrc-default.sh"
openstack stack delete ${STACK_NAME}
}
# Perform preparations required to run e2e tests
function prepare-e2e {
echo "TODO: prepare-e2e" 1>&2
}
function test-build-release {
echo "test-build-release() " 1>&2
}
# Must ensure that the following ENV vars are set
function detect-master {
source "${ROOT}/${KUBE_CONFIG_FILE:-"config-default.sh"}"
source "${ROOT}/openrc-default.sh"
export KUBE_MASTER_IP=$(nova show "${STACK_NAME}"-master | awk '$3=="network" {print $6}')
echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2
}

View File

@ -5,6 +5,7 @@ pkg-core:
{% if grains['os_family'] == 'RedHat' %}
- python
- git
- socat
{% else %}
- apt-transport-https
- python-apt

View File

@ -1,4 +1,4 @@
{% if grains['cloud'] is defined and grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere'] %}
{% if grains['cloud'] is defined and grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere', 'openstack'] %}
# TODO: generate and distribute tokens on other cloud providers.
/srv/kubernetes/known_tokens.csv:
file.managed:

View File

@ -18,6 +18,10 @@
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% endif -%}
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
@ -58,7 +62,7 @@
{% set client_ca_file = "" -%}
{% set secure_port = "6443" -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller' ] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] %}
{% set secure_port = "443" -%}
{% set client_ca_file = "--client-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}
@ -72,7 +76,7 @@
{% set basic_auth_file = "" -%}
{% set authz_mode = "" -%}
{% set abac_policy_file = "" -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere'] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'openstack'] %}
{% set token_auth_file = " --token-auth-file=/srv/kubernetes/known_tokens.csv" -%}
{% set basic_auth_file = " --basic-auth-file=/srv/kubernetes/basic_auth.csv" -%}
{% set authz_mode = " --authorization-mode=ABAC" -%}
@ -164,8 +168,8 @@
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpkitls",
"mountPath": "/etc/pki/tls",
{ "name": "etcpki",
"mountPath": "/etc/pki",
"readOnly": true},
{ "name": "srvsshproxy",
"mountPath": "{{srv_sshproxy_path}}",
@ -196,9 +200,9 @@
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpkitls",
{ "name": "etcpki",
"hostPath": {
"path": "/etc/pki/tls"}
"path": "/etc/pki"}
},
{ "name": "srvsshproxy",
"hostPath": {

View File

@ -37,6 +37,10 @@
{% endif -%}
{% set service_account_key = "--service-account-private-key-file=/srv/kubernetes/server.key" -%}
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% endif -%}
{% if grains.cloud in [ 'aws', 'gce' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% set cloud_config_mount = "{\"name\": \"cloudconfigmount\",\"mountPath\": \"" + grains.cloud_config + "\", \"readOnly\": true}," -%}
@ -46,7 +50,7 @@
{% set root_ca_file = "" -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller' ] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] %}
{% set root_ca_file = "--root-ca-file=/srv/kubernetes/ca.crt" -%}
{% endif -%}
@ -117,8 +121,8 @@
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpkitls",
"mountPath": "/etc/pki/tls",
{ "name": "etcpki",
"mountPath": "/etc/pki",
"readOnly": true}
]
}
@ -146,9 +150,9 @@
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpkitls",
{ "name": "etcpki",
"hostPath": {
"path": "/etc/pki/tls"}
"path": "/etc/pki"}
}
]
}}

View File

@ -5,7 +5,7 @@
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() -%}
{% set api_servers = "--master=https://" + ips[0][0] -%}
{% endif -%}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller' ] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack' ] %}
{% set api_servers_with_port = api_servers -%}
{% else -%}
{% set api_servers_with_port = api_servers + ":6443" -%}

View File

@ -16,7 +16,7 @@
{% endif -%}
# TODO: remove nginx for other cloud providers.
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller' ] %}
{% if grains['cloud'] is defined and grains.cloud in [ 'aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] %}
{% set api_servers_with_port = api_servers -%}
{% else -%}
{% set api_servers_with_port = api_servers + ":6443" -%}
@ -28,7 +28,7 @@
{% set reconcile_cidr_args = "" -%}
{% if grains['roles'][0] == 'kubernetes-master' -%}
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere', 'photon-controller'] -%}
{% if grains.cloud in ['aws', 'gce', 'vagrant', 'vsphere', 'photon-controller', 'openstack'] -%}
# Unless given a specific directive, disable registration for the kubelet
# running on the master.
@ -52,6 +52,11 @@
{% set cloud_provider = "--cloud-provider=" + grains.cloud -%}
{% endif -%}
{% set cloud_config = "" -%}
{% if grains.cloud in [ 'openstack' ] and grains.cloud_config is defined -%}
{% set cloud_config = "--cloud-config=" + grains.cloud_config -%}
{% endif -%}
{% set config = "--config=/etc/kubernetes/manifests" -%}
{% set manifest_url = "" -%}
@ -192,4 +197,4 @@
{% endif -%}
# test_args has to be kept at the end, so they'll overwrite any prior configuration
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{ reconcile_cidr_args }} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{test_args}}"
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{debugging_handlers}} {{hostname_override}} {{cloud_provider}} {{cloud_config}} {{config}} {{manifest_url}} --allow-privileged={{pillar['allow_privileged']}} {{log_level}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{kubelet_root}} {{configure_cbr0}} {{non_masquerade_cidr}} {{cgroup_root}} {{system_container}} {{pod_cidr}} {{ master_kubelet_args }} {{cpu_cfs_quota}} {{network_plugin}} {{kubelet_port}} {{experimental_flannel_overlay}} {{ reconcile_cidr_args }} {{ hairpin_mode }} {{enable_custom_metrics}} {{runtime_container}} {{kubelet_container}} {{node_labels}} {{babysit_daemons}} {{test_args}}"

View File

@ -72,7 +72,7 @@ base:
- logrotate
{% endif %}
- kube-addons
{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'vsphere', 'photon-controller' ] %}
{% if grains['cloud'] is defined and grains['cloud'] in [ 'vagrant', 'gce', 'aws', 'vsphere', 'photon-controller', 'openstack'] %}
- docker
- kubelet
{% endif %}