Merge pull request #62354 from johnsca/feature/aws-charm

Automatic merge from submit-queue (batch tested with PRs 62354, 62934, 63502). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add support for AWS charm

Support AWS integration via proxy charm.



**What this PR does / why we need it**: Add support for Juju charms to connect to AWS integration charm to automatically manage IAM tags and roles, and cloud-provider setting for AWS.

**Release note**:

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2018-05-09 09:30:08 -07:00 committed by GitHub
commit 002078dc97
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 114 additions and 1 deletions

View File

@ -1,6 +1,7 @@
repo: https://github.com/kubernetes/kubernetes.git
includes:
- 'layer:basic'
- 'layer:status'
- 'layer:snap'
- 'layer:tls-client'
- 'layer:leadership'
@ -15,6 +16,7 @@ includes:
- 'interface:kube-dns'
- 'interface:kube-control'
- 'interface:public-address'
- 'interface:aws'
options:
basic:
packages:

View File

@ -40,6 +40,8 @@ requires:
interface: public-address
ceph-storage:
interface: ceph-admin
aws:
interface: aws
resources:
kubectl:
type: file

View File

@ -39,6 +39,7 @@ from charms.reactive import hook
from charms.reactive import remove_state
from charms.reactive import set_state
from charms.reactive import is_state
from charms.reactive import endpoint_from_flag
from charms.reactive import when, when_any, when_not
from charms.reactive.helpers import data_changed, any_file_changed
from charms.kubernetes.common import get_version
@ -1224,6 +1225,9 @@ def configure_apiserver(etcd_connection_string, leader_etcd_version):
api_opts['enable-aggregator-routing'] = 'true'
api_opts['client-ca-file'] = ca_cert_path
if is_state('endpoint.aws.ready'):
api_opts['cloud-provider'] = 'aws'
configure_kubernetes_service('kube-apiserver', api_opts, 'api-extra-args')
restart_apiserver()
@ -1245,6 +1249,9 @@ def configure_controller_manager():
controller_opts['service-account-private-key-file'] = \
'/root/cdk/serviceaccount.key'
if is_state('endpoint.aws.ready'):
controller_opts['cloud-provider'] = 'aws'
configure_kubernetes_service('kube-controller-manager', controller_opts,
'controller-manager-extra-args')
restart_controller_manager()
@ -1377,3 +1384,61 @@ def getStorageBackend():
if storage_backend == 'auto':
storage_backend = leader_get('auto_storage_backend')
return storage_backend
@when('leadership.is_leader')
@when_not('leadership.set.cluster_tag')
def create_cluster_tag():
cluster_tag = 'kubernetes-{}'.format(token_generator())
leader_set(cluster_tag=cluster_tag)
@when('leadership.set.cluster_tag',
'kube-control.connected')
@when_not('kubernetes-master.cluster-tag-sent')
def send_cluster_tag():
cluster_tag = leader_get('cluster_tag')
kube_control = endpoint_from_flag('kube-control.connected')
kube_control.set_cluster_tag(cluster_tag)
set_state('kubernetes-master.cluster-tag-sent')
@when_not('kube-control.connected')
def clear_cluster_tag_sent():
remove_state('kubernetes-master.cluster-tag-sent')
@when('endpoint.aws.joined',
'leadership.set.cluster_tag')
@when_not('kubernetes-master.aws-request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting aws integration')
aws = endpoint_from_flag('endpoint.aws.joined')
cluster_tag = leader_get('cluster_tag')
aws.tag_instance({
'KubernetesCluster': cluster_tag,
'k8s.io/role/master': 'true',
})
aws.tag_instance_security_group({
'KubernetesCluster': cluster_tag,
})
aws.enable_instance_inspection()
aws.enable_network_management()
aws.enable_dns_management()
aws.enable_load_balancer_management()
aws.enable_block_storage_management()
aws.enable_object_storage_management(['kubernetes-*'])
set_state('kubernetes-master.aws-request-sent')
hookenv.status_set('waiting', 'waiting for aws integration')
@when_not('endpoint.aws.joined')
def clear_requested_integration():
remove_state('kubernetes-master.aws-request-sent')
@when('endpoint.aws.ready')
@when_not('kubernetes-master.restarted-for-aws')
def restart_for_aws():
set_state('kubernetes-master.restarted-for-aws')
remove_state('kubernetes-master.components.started') # force restart

View File

@ -1,6 +1,7 @@
repo: https://github.com/kubernetes/kubernetes.git
includes:
- 'layer:basic'
- 'layer:status'
- 'layer:debug'
- 'layer:snap'
- 'layer:docker'
@ -12,6 +13,7 @@ includes:
- 'interface:kubernetes-cni'
- 'interface:kube-dns'
- 'interface:kube-control'
- 'interface:aws'
config:
deletes:
- install_from_upstream

View File

@ -28,6 +28,8 @@ requires:
interface: kube-dns
kube-control:
interface: kube-control
aws:
interface: aws
provides:
cni:
interface: kubernetes-cni

View File

@ -29,6 +29,7 @@ from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import endpoint_from_flag
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not
@ -623,6 +624,9 @@ def configure_kubelet(dns, ingress_ip):
'to kubelet')
kubelet_opts['feature-gates'] = 'DevicePlugins=true'
if is_state('endpoint.aws.ready'):
kubelet_opts['cloud-provider'] = 'aws'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
@ -738,7 +742,7 @@ def launch_default_ingress_controller():
"k8s.gcr.io/nginx-ingress-controller-arm64:0.9.0-beta.15"
else:
context['ingress_image'] = \
"k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa
"k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.15" # noqa
if get_version('kubelet') < (1, 9):
context['daemonset_api_version'] = 'extensions/v1beta1'
else:
@ -1067,3 +1071,39 @@ def remove_label(label):
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
@when('endpoint.aws.joined',
'kube-control.cluster_tag.available')
@when_not('kubernetes-worker.aws-request-sent')
def request_integration():
kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
hookenv.status_set('maintenance', 'requesting aws integration')
aws = endpoint_from_flag('endpoint.aws.joined')
cluster_tag = kube_control.get_cluster_tag()
aws.tag_instance({
'KubernetesCluster': cluster_tag,
})
aws.tag_instance_security_group({
'KubernetesCluster': cluster_tag,
})
aws.tag_instance_subnet({
'KubernetesCluster': cluster_tag,
})
aws.enable_instance_inspection()
aws.enable_dns_management()
aws.enable_object_storage_management(['kubernetes-*'])
set_state('kubernetes-worker.aws-request-sent')
hookenv.status_set('waiting', 'waiting for aws integration')
@when_not('endpoint.aws.joined')
def clear_requested_integration():
remove_state('kubernetes-worker.aws-request-sent')
@when('endpoint.aws.ready')
@when_not('kubernetes-worker.restarted-for-aws')
def restart_for_aws():
set_state('kubernetes-worker.restarted-for-aws')
set_state('kubernetes-worker.restart-needed')