Add support for AWS charm

Support AWS integration via proxy charm.
This commit is contained in:
Cory Johns 2018-04-10 15:25:30 -04:00
parent 31d22870b2
commit bebad08dd9
6 changed files with 114 additions and 1 deletions

View File

@ -1,6 +1,7 @@
repo: https://github.com/kubernetes/kubernetes.git repo: https://github.com/kubernetes/kubernetes.git
includes: includes:
- 'layer:basic' - 'layer:basic'
- 'layer:status'
- 'layer:snap' - 'layer:snap'
- 'layer:tls-client' - 'layer:tls-client'
- 'layer:leadership' - 'layer:leadership'
@ -15,6 +16,7 @@ includes:
- 'interface:kube-dns' - 'interface:kube-dns'
- 'interface:kube-control' - 'interface:kube-control'
- 'interface:public-address' - 'interface:public-address'
- 'interface:aws'
options: options:
basic: basic:
packages: packages:

View File

@ -40,6 +40,8 @@ requires:
interface: public-address interface: public-address
ceph-storage: ceph-storage:
interface: ceph-admin interface: ceph-admin
aws:
interface: aws
resources: resources:
kubectl: kubectl:
type: file type: file

View File

@ -39,6 +39,7 @@ from charms.reactive import hook
from charms.reactive import remove_state from charms.reactive import remove_state
from charms.reactive import set_state from charms.reactive import set_state
from charms.reactive import is_state from charms.reactive import is_state
from charms.reactive import endpoint_from_flag
from charms.reactive import when, when_any, when_not from charms.reactive import when, when_any, when_not
from charms.reactive.helpers import data_changed, any_file_changed from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version from charms.kubernetes.common import get_version
@ -1219,6 +1220,9 @@ def configure_apiserver(etcd_connection_string, leader_etcd_version):
api_opts['enable-aggregator-routing'] = 'true' api_opts['enable-aggregator-routing'] = 'true'
api_opts['client-ca-file'] = ca_cert_path api_opts['client-ca-file'] = ca_cert_path
if is_state('endpoint.aws.ready'):
api_opts['cloud-provider'] = 'aws'
configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args') configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args')
restart_apiserver() restart_apiserver()
@ -1240,6 +1244,9 @@ def configure_controller_manager():
controller_opts['service-account-private-key-file'] = \ controller_opts['service-account-private-key-file'] = \
'/root/cdk/serviceaccount.key' '/root/cdk/serviceaccount.key'
if is_state('endpoint.aws.ready'):
controller_opts['cloud-provider'] = 'aws'
configure_kubernetes_service('kube-controller-manager', controller_opts, configure_kubernetes_service('kube-controller-manager', controller_opts,
'controller-manager-extra-args') 'controller-manager-extra-args')
restart_controller_manager() restart_controller_manager()
@ -1372,3 +1379,61 @@ def getStorageBackend():
if storage_backend == 'auto': if storage_backend == 'auto':
storage_backend = leader_get('auto_storage_backend') storage_backend = leader_get('auto_storage_backend')
return storage_backend return storage_backend
@when('leadership.is_leader')
@when_not('leadership.set.cluster_tag')
def create_cluster_tag():
cluster_tag = 'kubernetes-{}'.format(token_generator())
leader_set(cluster_tag=cluster_tag)
@when('leadership.set.cluster_tag',
'kube-control.connected')
@when_not('kubernetes-master.cluster-tag-sent')
def send_cluster_tag():
cluster_tag = leader_get('cluster_tag')
kube_control = endpoint_from_flag('kube-control.connected')
kube_control.set_cluster_tag(cluster_tag)
set_state('kubernetes-master.cluster-tag-sent')
@when_not('kube-control.connected')
def clear_cluster_tag_sent():
remove_state('kubernetes-master.cluster-tag-sent')
@when('endpoint.aws.joined',
'leadership.set.cluster_tag')
@when_not('kubernetes-master.aws-request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting aws integration')
aws = endpoint_from_flag('endpoint.aws.joined')
cluster_tag = leader_get('cluster_tag')
aws.tag_instance({
'KubernetesCluster': cluster_tag,
'k8s.io/role/master': 'true',
})
aws.tag_instance_security_group({
'KubernetesCluster': cluster_tag,
})
aws.enable_instance_inspection()
aws.enable_network_management()
aws.enable_dns_management()
aws.enable_load_balancer_management()
aws.enable_block_storage_management()
aws.enable_object_storage_management(['kubernetes-*'])
set_state('kubernetes-master.aws-request-sent')
hookenv.status_set('waiting', 'waiting for aws integration')
@when_not('endpoint.aws.joined')
def clear_requested_integration():
remove_state('kubernetes-master.aws-request-sent')
@when('endpoint.aws.ready')
@when_not('kubernetes-master.restarted-for-aws')
def restart_for_aws():
set_state('kubernetes-master.restarted-for-aws')
remove_state('kubernetes-master.components.started') # force restart

View File

@ -1,6 +1,7 @@
repo: https://github.com/kubernetes/kubernetes.git repo: https://github.com/kubernetes/kubernetes.git
includes: includes:
- 'layer:basic' - 'layer:basic'
- 'layer:status'
- 'layer:debug' - 'layer:debug'
- 'layer:snap' - 'layer:snap'
- 'layer:docker' - 'layer:docker'
@ -12,6 +13,7 @@ includes:
- 'interface:kubernetes-cni' - 'interface:kubernetes-cni'
- 'interface:kube-dns' - 'interface:kube-dns'
- 'interface:kube-control' - 'interface:kube-control'
- 'interface:aws'
config: config:
deletes: deletes:
- install_from_upstream - install_from_upstream

View File

@ -28,6 +28,8 @@ requires:
interface: kube-dns interface: kube-dns
kube-control: kube-control:
interface: kube-control interface: kube-control
aws:
interface: aws
provides: provides:
cni: cni:
interface: kubernetes-cni interface: kubernetes-cni

View File

@ -29,6 +29,7 @@ from socket import gethostname, getfqdn
from charms import layer from charms import layer
from charms.layer import snap from charms.layer import snap
from charms.reactive import hook from charms.reactive import hook
from charms.reactive import endpoint_from_flag
from charms.reactive import set_state, remove_state, is_state from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not from charms.reactive import when, when_any, when_not
@ -638,6 +639,9 @@ def configure_kubelet(dns, ingress_ip):
'to kubelet') 'to kubelet')
kubelet_opts['feature-gates'] = 'DevicePlugins=true' kubelet_opts['feature-gates'] = 'DevicePlugins=true'
if is_state('endpoint.aws.ready'):
kubelet_opts['cloud-provider'] = 'aws'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args') configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
@ -1082,3 +1086,39 @@ def remove_label(label):
retry = 'Failed to remove label {0}. Will retry.'.format(label) retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry): if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry) raise ApplyNodeLabelFailed(retry)
@when('endpoint.aws.joined',
'kube-control.cluster_tag.available')
@when_not('kubernetes-worker.aws-request-sent')
def request_integration():
kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
hookenv.status_set('maintenance', 'requesting aws integration')
aws = endpoint_from_flag('endpoint.aws.joined')
cluster_tag = kube_control.get_cluster_tag()
aws.tag_instance({
'KubernetesCluster': cluster_tag,
})
aws.tag_instance_security_group({
'KubernetesCluster': cluster_tag,
})
aws.tag_instance_subnet({
'KubernetesCluster': cluster_tag,
})
aws.enable_instance_inspection()
aws.enable_dns_management()
aws.enable_object_storage_management(['kubernetes-*'])
set_state('kubernetes-worker.aws-request-sent')
hookenv.status_set('waiting', 'waiting for aws integration')
@when_not('endpoint.aws.joined')
def clear_requested_integration():
remove_state('kubernetes-worker.aws-request-sent')
@when('endpoint.aws.ready')
@when_not('kubernetes-worker.restarted-for-aws')
def restart_for_aws():
set_state('kubernetes-worker.restarted-for-aws')
set_state('kubernetes-worker.restart-needed')