Changing ingress from manually scaling rc to a daemon set.

This commit is contained in:
Mike Wilson 2017-12-21 18:22:43 -05:00
parent d7e5bd194a
commit 1b814c43ad
2 changed files with 83 additions and 42 deletions

View File

@ -20,6 +20,7 @@ import random
import shutil import shutil
import subprocess import subprocess
import time import time
import json
from shlex import split from shlex import split
from subprocess import check_call, check_output from subprocess import check_call, check_output
@ -63,6 +64,10 @@ def upgrade_charm():
cleanup_pre_snap_services() cleanup_pre_snap_services()
check_resources_for_upgrade_needed() check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags, # Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions # since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled') remove_state('kubernetes-worker.gpu.enabled')
@ -373,7 +378,7 @@ def sdn_changed():
@when('kubernetes-worker.config.created') @when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available') @when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress(): def render_and_launch_ingress():
''' If configuration has ingress RC enabled, launch the ingress load ''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. ''' balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config() config = hookenv.config()
# If ingress is enabled, launch the ingress controller # If ingress is enabled, launch the ingress controller
@ -384,23 +389,11 @@ def render_and_launch_ingress():
kubectl_manifest('delete', kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml') '/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete', kubectl_manifest('delete',
'/root/cdk/addons/ingress-replication-controller.yaml') # noqa '/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80) hookenv.close_port(80)
hookenv.close_port(443) hookenv.close_port(443)
@when('kubernetes-worker.ingress.available')
def scale_ingress_controller():
''' Scale the number of ingress controller replicas to match the number of
nodes. '''
try:
output = kubectl('get', 'nodes', '-o', 'name')
count = len(output.splitlines())
kubectl('scale', '--replicas=%d' % count, 'rc/nginx-ingress-controller') # noqa
except CalledProcessError:
hookenv.log('Failed to scale ingress controllers. Will attempt again next update.') # noqa
@when('config.changed.labels', 'kubernetes-worker.config.created') @when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels(): def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node. ''' Parse the labels configuration option and apply the labels to the node.
@ -429,6 +422,10 @@ def apply_node_labels():
for label in user_labels: for label in user_labels:
_apply_node_label(label, overwrite=True) _apply_node_label(label, overwrite=True)
# Set label for application name
_apply_node_label('juju-application={}'.format(hookenv.service_name()),
overwrite=True)
@when_any('config.changed.kubelet-extra-args', @when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args') 'config.changed.proxy-extra-args')
@ -653,15 +650,16 @@ def launch_default_ingress_controller():
hookenv.close_port(443) hookenv.close_port(443)
return return
# Render the ingress replication controller manifest # Render the ingress daemon set controller manifest
context['ingress_image'] = \ context['ingress_image'] = \
"k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.13" "k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.13"
if arch() == 's390x': if arch() == 's390x':
context['ingress_image'] = \ context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13" "docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
manifest = addon_path.format('ingress-replication-controller.yaml') context['juju_application'] = hookenv.service_name()
render('ingress-replication-controller.yaml', manifest, context) manifest = addon_path.format('ingress-daemon-set.yaml')
hookenv.log('Creating the ingress replication controller.') render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try: try:
kubectl('apply', '-f', manifest) kubectl('apply', '-f', manifest)
except CalledProcessError as e: except CalledProcessError as e:
@ -934,24 +932,66 @@ def _systemctl_is_active(application):
return False return False
class GetNodeNameFailed(Exception):
pass
def get_node_name():
# Get all the nodes in the cluster
cmd = 'kubectl --kubeconfig={} get no -o=json'.format(kubeconfig_path)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
try:
raw = check_output(cmd)
break
except CalledProcessError:
hookenv.log('Failed to get node name for node %s.'
' Will retry.' % (gethostname()))
time.sleep(1)
else:
msg = 'Failed to get node name for node %s' % gethostname()
raise GetNodeNameFailed(msg)
result = json.loads(raw.decode('utf-8'))
if 'items' in result:
for node in result['items']:
if 'status' not in node:
continue
if 'addresses' not in node['status']:
continue
# find the hostname
for address in node['status']['addresses']:
if address['type'] == 'Hostname':
if address['address'] == gethostname():
return node['metadata']['name']
# if we didn't match, just bail to the next node
break
return ""
class ApplyNodeLabelFailed(Exception): class ApplyNodeLabelFailed(Exception):
pass pass
def _apply_node_label(label, delete=False, overwrite=False): def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes ''' ''' Invoke kubectl to apply node label changes '''
nodename = get_node_name()
if nodename == "":
msg = 'Unable to get node name for node {}'.format(gethostname())
raise ApplyNodeLabelFailed(msg)
# k8s lowercases hostnames and uses them as node names
hostname = gethostname().lower()
# TODO: Make this part of the kubectl calls instead of a special string # TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}' cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True: if delete is True:
label_key = label.split('=')[0] label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, hostname, label_key) cmd = cmd_base.format(kubeconfig_path, nodename, label_key)
cmd = cmd + '-' cmd = cmd + '-'
else: else:
cmd = cmd_base.format(kubeconfig_path, hostname, label) cmd = cmd_base.format(kubeconfig_path, nodename, label)
if overwrite: if overwrite:
cmd = '{} --overwrite'.format(cmd) cmd = '{} --overwrite'.format(cmd)
cmd = cmd.split() cmd = cmd.split()

View File

@ -1,12 +1,12 @@
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: nginx-ingress-serviceaccount name: nginx-ingress-{{ juju_application }}-serviceaccount
--- ---
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole kind: ClusterRole
metadata: metadata:
name: nginx-ingress-clusterrole name: nginx-ingress-{{ juju_application }}-clusterrole
rules: rules:
- apiGroups: - apiGroups:
- "" - ""
@ -58,7 +58,7 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role kind: Role
metadata: metadata:
name: nginx-ingress-role name: nginx-ingress-{{ juju_application }}-role
rules: rules:
- apiGroups: - apiGroups:
- "" - ""
@ -100,57 +100,58 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding kind: RoleBinding
metadata: metadata:
name: nginx-ingress-role-nisa-binding name: nginx-ingress-role-nisa-{{ juju_application }}-binding
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: Role kind: Role
name: nginx-ingress-role name: nginx-ingress-{{ juju_application }}-role
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: nginx-ingress-serviceaccount name: nginx-ingress-{{ juju_application }}-serviceaccount
--- ---
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding kind: ClusterRoleBinding
metadata: metadata:
name: nginx-ingress-clusterrole-nisa-binding name: nginx-ingress-clusterrole-nisa-{{ juju_application }}-binding
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: ClusterRole kind: ClusterRole
name: nginx-ingress-clusterrole name: nginx-ingress-{{ juju_application }}-clusterrole
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: nginx-ingress-serviceaccount name: nginx-ingress-{{ juju_application }}-serviceaccount
namespace: default namespace: default
--- ---
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: nginx-load-balancer-conf name: nginx-load-balancer-{{ juju_application }}-conf
--- ---
apiVersion: v1 apiVersion: apps/v1beta2
kind: ReplicationController kind: DaemonSet
metadata: metadata:
name: nginx-ingress-controller name: nginx-ingress-{{ juju_application }}-controller
labels: labels:
k8s-app: nginx-ingress-lb juju-application: nginx-ingress-{{ juju_application }}
spec: spec:
replicas: 1
selector: selector:
k8s-app: nginx-ingress-lb matchLabels:
name: nginx-ingress-{{ juju_application }}
template: template:
metadata: metadata:
labels: labels:
k8s-app: nginx-ingress-lb name: nginx-ingress-{{ juju_application }}
name: nginx-ingress-lb
spec: spec:
nodeSelector:
juju-application: {{ juju_application }}
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
# hostPort doesn't work with CNI, so we have to use hostNetwork instead # hostPort doesn't work with CNI, so we have to use hostNetwork instead
# see https://github.com/kubernetes/kubernetes/issues/23920 # see https://github.com/kubernetes/kubernetes/issues/23920
hostNetwork: true hostNetwork: true
serviceAccountName: nginx-ingress-serviceaccount serviceAccountName: nginx-ingress-{{ juju_application }}-serviceaccount
containers: containers:
- image: {{ ingress_image }} - image: {{ ingress_image }}
name: nginx-ingress-lb name: nginx-ingress-{{ juju_application }}
imagePullPolicy: Always imagePullPolicy: Always
livenessProbe: livenessProbe:
httpGet: httpGet: