diff --git a/cluster/juju/layers/kubernetes-master/layer.yaml b/cluster/juju/layers/kubernetes-master/layer.yaml index ed496bc31af..9a53451dce5 100644 --- a/cluster/juju/layers/kubernetes-master/layer.yaml +++ b/cluster/juju/layers/kubernetes-master/layer.yaml @@ -1,6 +1,7 @@ repo: https://github.com/kubernetes/kubernetes.git includes: - 'layer:basic' + - 'layer:status' - 'layer:snap' - 'layer:tls-client' - 'layer:leadership' @@ -15,6 +16,7 @@ includes: - 'interface:kube-dns' - 'interface:kube-control' - 'interface:public-address' + - 'interface:aws' options: basic: packages: diff --git a/cluster/juju/layers/kubernetes-master/metadata.yaml b/cluster/juju/layers/kubernetes-master/metadata.yaml index fa1f62b74c4..dfba03c99aa 100644 --- a/cluster/juju/layers/kubernetes-master/metadata.yaml +++ b/cluster/juju/layers/kubernetes-master/metadata.yaml @@ -40,6 +40,8 @@ requires: interface: public-address ceph-storage: interface: ceph-admin + aws: + interface: aws resources: kubectl: type: file diff --git a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py index 18aa3564b67..36207665d1a 100644 --- a/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py +++ b/cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py @@ -39,6 +39,7 @@ from charms.reactive import hook from charms.reactive import remove_state from charms.reactive import set_state from charms.reactive import is_state +from charms.reactive import endpoint_from_flag from charms.reactive import when, when_any, when_not from charms.reactive.helpers import data_changed, any_file_changed from charms.kubernetes.common import get_version @@ -1224,6 +1225,9 @@ def configure_apiserver(etcd_connection_string, leader_etcd_version): api_opts['enable-aggregator-routing'] = 'true' api_opts['client-ca-file'] = ca_cert_path + if is_state('endpoint.aws.ready'): + api_opts['cloud-provider'] = 'aws' + configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args') restart_apiserver() @@ -1245,6 +1249,9 @@ def configure_controller_manager(): controller_opts['service-account-private-key-file'] = \ '/root/cdk/serviceaccount.key' + if is_state('endpoint.aws.ready'): + controller_opts['cloud-provider'] = 'aws' + configure_kubernetes_service('kube-controller-manager', controller_opts, 'controller-manager-extra-args') restart_controller_manager() @@ -1377,3 +1384,61 @@ def getStorageBackend(): if storage_backend == 'auto': storage_backend = leader_get('auto_storage_backend') return storage_backend + + +@when('leadership.is_leader') +@when_not('leadership.set.cluster_tag') +def create_cluster_tag(): + cluster_tag = 'kubernetes-{}'.format(token_generator()) + leader_set(cluster_tag=cluster_tag) + + +@when('leadership.set.cluster_tag', + 'kube-control.connected') +@when_not('kubernetes-master.cluster-tag-sent') +def send_cluster_tag(): + cluster_tag = leader_get('cluster_tag') + kube_control = endpoint_from_flag('kube-control.connected') + kube_control.set_cluster_tag(cluster_tag) + set_state('kubernetes-master.cluster-tag-sent') + + +@when_not('kube-control.connected') +def clear_cluster_tag_sent(): + remove_state('kubernetes-master.cluster-tag-sent') + + +@when('endpoint.aws.joined', + 'leadership.set.cluster_tag') +@when_not('kubernetes-master.aws-request-sent') +def request_integration(): + hookenv.status_set('maintenance', 'requesting aws integration') + aws = endpoint_from_flag('endpoint.aws.joined') + cluster_tag = leader_get('cluster_tag') + aws.tag_instance({ + 'KubernetesCluster': cluster_tag, + 'k8s.io/role/master': 'true', + }) + aws.tag_instance_security_group({ + 'KubernetesCluster': cluster_tag, + }) + aws.enable_instance_inspection() + aws.enable_network_management() + aws.enable_dns_management() + aws.enable_load_balancer_management() + aws.enable_block_storage_management() + aws.enable_object_storage_management(['kubernetes-*']) + set_state('kubernetes-master.aws-request-sent') + hookenv.status_set('waiting', 'waiting for aws integration') + + +@when_not('endpoint.aws.joined') +def clear_requested_integration(): + remove_state('kubernetes-master.aws-request-sent') + + +@when('endpoint.aws.ready') +@when_not('kubernetes-master.restarted-for-aws') +def restart_for_aws(): + set_state('kubernetes-master.restarted-for-aws') + remove_state('kubernetes-master.components.started') # force restart diff --git a/cluster/juju/layers/kubernetes-worker/layer.yaml b/cluster/juju/layers/kubernetes-worker/layer.yaml index b9f3768a4e5..26aaa4d40ae 100644 --- a/cluster/juju/layers/kubernetes-worker/layer.yaml +++ b/cluster/juju/layers/kubernetes-worker/layer.yaml @@ -1,6 +1,7 @@ repo: https://github.com/kubernetes/kubernetes.git includes: - 'layer:basic' + - 'layer:status' - 'layer:debug' - 'layer:snap' - 'layer:docker' @@ -12,6 +13,7 @@ includes: - 'interface:kubernetes-cni' - 'interface:kube-dns' - 'interface:kube-control' + - 'interface:aws' config: deletes: - install_from_upstream diff --git a/cluster/juju/layers/kubernetes-worker/metadata.yaml b/cluster/juju/layers/kubernetes-worker/metadata.yaml index 48715a4d988..cc09efb8a4d 100644 --- a/cluster/juju/layers/kubernetes-worker/metadata.yaml +++ b/cluster/juju/layers/kubernetes-worker/metadata.yaml @@ -28,6 +28,8 @@ requires: interface: kube-dns kube-control: interface: kube-control + aws: + interface: aws provides: cni: interface: kubernetes-cni diff --git a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py index 13d0e4a5f2b..6540b6250bc 100644 --- a/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py +++ b/cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py @@ -29,6 +29,7 @@ from socket import gethostname, getfqdn from charms import layer from charms.layer import snap from charms.reactive import hook +from charms.reactive import endpoint_from_flag from charms.reactive import set_state, remove_state, is_state from charms.reactive import when, when_any, when_not @@ -623,6 +624,9 @@ def configure_kubelet(dns, ingress_ip): 'to kubelet') kubelet_opts['feature-gates'] = 'DevicePlugins=true' + if is_state('endpoint.aws.ready'): + kubelet_opts['cloud-provider'] = 'aws' + configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args') @@ -738,7 +742,7 @@ def launch_default_ingress_controller(): "k8s.gcr.io/nginx-ingress-controller-arm64:0.9.0-beta.15" else: context['ingress_image'] = \ - "k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa + "k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa if get_version('kubelet') < (1, 9): context['daemonset_api_version'] = 'extensions/v1beta1' else: @@ -1067,3 +1071,39 @@ def remove_label(label): retry = 'Failed to remove label {0}. Will retry.'.format(label) if not persistent_call(cmd, retry): raise ApplyNodeLabelFailed(retry) + + +@when('endpoint.aws.joined', + 'kube-control.cluster_tag.available') +@when_not('kubernetes-worker.aws-request-sent') +def request_integration(): + kube_control = endpoint_from_flag('kube-control.cluster_tag.available') + hookenv.status_set('maintenance', 'requesting aws integration') + aws = endpoint_from_flag('endpoint.aws.joined') + cluster_tag = kube_control.get_cluster_tag() + aws.tag_instance({ + 'KubernetesCluster': cluster_tag, + }) + aws.tag_instance_security_group({ + 'KubernetesCluster': cluster_tag, + }) + aws.tag_instance_subnet({ + 'KubernetesCluster': cluster_tag, + }) + aws.enable_instance_inspection() + aws.enable_dns_management() + aws.enable_object_storage_management(['kubernetes-*']) + set_state('kubernetes-worker.aws-request-sent') + hookenv.status_set('waiting', 'waiting for aws integration') + + +@when_not('endpoint.aws.joined') +def clear_requested_integration(): + remove_state('kubernetes-worker.aws-request-sent') + + +@when('endpoint.aws.ready') +@when_not('kubernetes-worker.restarted-for-aws') +def restart_for_aws(): + set_state('kubernetes-worker.restarted-for-aws') + set_state('kubernetes-worker.restart-needed')