Changing ingress from manually scaling rc to a daemon set.

This commit is contained in:
Mike Wilson 2017-12-21 18:22:43 -05:00
parent d7e5bd194a
commit 1b814c43ad
2 changed files with 83 additions and 42 deletions

View File

@ -20,6 +20,7 @@ import random
import shutil
import subprocess
import time
import json
from shlex import split
from subprocess import check_call, check_output
@ -63,6 +64,10 @@ def upgrade_charm():
cleanup_pre_snap_services()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
remove_state('kubernetes-worker.gpu.enabled')
@ -373,7 +378,7 @@ def sdn_changed():
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress RC enabled, launch the ingress load
''' If configuration has ingress daemon set enabled, launch the ingress load
balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
@ -384,23 +389,11 @@ def render_and_launch_ingress():
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-replication-controller.yaml') # noqa
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('kubernetes-worker.ingress.available')
def scale_ingress_controller():
''' Scale the number of ingress controller replicas to match the number of
nodes. '''
try:
output = kubectl('get', 'nodes', '-o', 'name')
count = len(output.splitlines())
kubectl('scale', '--replicas=%d' % count, 'rc/nginx-ingress-controller') # noqa
except CalledProcessError:
hookenv.log('Failed to scale ingress controllers. Will attempt again next update.') # noqa
@when('config.changed.labels', 'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the node.
@ -429,6 +422,10 @@ def apply_node_labels():
for label in user_labels:
_apply_node_label(label, overwrite=True)
# Set label for application name
_apply_node_label('juju-application={}'.format(hookenv.service_name()),
overwrite=True)
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args')
@ -653,15 +650,16 @@ def launch_default_ingress_controller():
hookenv.close_port(443)
return
# Render the ingress replication controller manifest
# Render the ingress daemon set controller manifest
context['ingress_image'] = \
"k8s.gcr.io/nginx-ingress-controller:0.9.0-beta.13"
if arch() == 's390x':
context['ingress_image'] = \
"docker.io/cdkbot/nginx-ingress-controller-s390x:0.9.0-beta.13"
manifest = addon_path.format('ingress-replication-controller.yaml')
render('ingress-replication-controller.yaml', manifest, context)
hookenv.log('Creating the ingress replication controller.')
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
@ -934,24 +932,66 @@ def _systemctl_is_active(application):
return False
class GetNodeNameFailed(Exception):
pass
def get_node_name():
# Get all the nodes in the cluster
cmd = 'kubectl --kubeconfig={} get no -o=json'.format(kubeconfig_path)
cmd = cmd.split()
deadline = time.time() + 60
while time.time() < deadline:
try:
raw = check_output(cmd)
break
except CalledProcessError:
hookenv.log('Failed to get node name for node %s.'
' Will retry.' % (gethostname()))
time.sleep(1)
else:
msg = 'Failed to get node name for node %s' % gethostname()
raise GetNodeNameFailed(msg)
result = json.loads(raw.decode('utf-8'))
if 'items' in result:
for node in result['items']:
if 'status' not in node:
continue
if 'addresses' not in node['status']:
continue
# find the hostname
for address in node['status']['addresses']:
if address['type'] == 'Hostname':
if address['address'] == gethostname():
return node['metadata']['name']
# if we didn't match, just bail to the next node
break
return ""
class ApplyNodeLabelFailed(Exception):
pass
def _apply_node_label(label, delete=False, overwrite=False):
''' Invoke kubectl to apply node label changes '''
nodename = get_node_name()
if nodename == "":
msg = 'Unable to get node name for node {}'.format(gethostname())
raise ApplyNodeLabelFailed(msg)
# k8s lowercases hostnames and uses them as node names
hostname = gethostname().lower()
# TODO: Make this part of the kubectl calls instead of a special string
cmd_base = 'kubectl --kubeconfig={0} label node {1} {2}'
if delete is True:
label_key = label.split('=')[0]
cmd = cmd_base.format(kubeconfig_path, hostname, label_key)
cmd = cmd_base.format(kubeconfig_path, nodename, label_key)
cmd = cmd + '-'
else:
cmd = cmd_base.format(kubeconfig_path, hostname, label)
cmd = cmd_base.format(kubeconfig_path, nodename, label)
if overwrite:
cmd = '{} --overwrite'.format(cmd)
cmd = cmd.split()

View File

@ -1,12 +1,12 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-ingress-serviceaccount
name: nginx-ingress-{{ juju_application }}-serviceaccount
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nginx-ingress-clusterrole
name: nginx-ingress-{{ juju_application }}-clusterrole
rules:
- apiGroups:
- ""
@ -58,7 +58,7 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: nginx-ingress-role
name: nginx-ingress-{{ juju_application }}-role
rules:
- apiGroups:
- ""
@ -100,57 +100,58 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: nginx-ingress-role-nisa-binding
name: nginx-ingress-role-nisa-{{ juju_application }}-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-role
name: nginx-ingress-{{ juju_application }}-role
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
name: nginx-ingress-{{ juju_application }}-serviceaccount
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nginx-ingress-clusterrole-nisa-binding
name: nginx-ingress-clusterrole-nisa-{{ juju_application }}-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-clusterrole
name: nginx-ingress-{{ juju_application }}-clusterrole
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
name: nginx-ingress-{{ juju_application }}-serviceaccount
namespace: default
---
apiVersion: v1
kind: ConfigMap
metadata:
name: nginx-load-balancer-conf
name: nginx-load-balancer-{{ juju_application }}-conf
---
apiVersion: v1
kind: ReplicationController
apiVersion: apps/v1beta2
kind: DaemonSet
metadata:
name: nginx-ingress-controller
name: nginx-ingress-{{ juju_application }}-controller
labels:
k8s-app: nginx-ingress-lb
juju-application: nginx-ingress-{{ juju_application }}
spec:
replicas: 1
selector:
k8s-app: nginx-ingress-lb
matchLabels:
name: nginx-ingress-{{ juju_application }}
template:
metadata:
labels:
k8s-app: nginx-ingress-lb
name: nginx-ingress-lb
name: nginx-ingress-{{ juju_application }}
spec:
nodeSelector:
juju-application: {{ juju_application }}
terminationGracePeriodSeconds: 60
# hostPort doesn't work with CNI, so we have to use hostNetwork instead
# see https://github.com/kubernetes/kubernetes/issues/23920
hostNetwork: true
serviceAccountName: nginx-ingress-serviceaccount
serviceAccountName: nginx-ingress-{{ juju_application }}-serviceaccount
containers:
- image: {{ ingress_image }}
name: nginx-ingress-lb
name: nginx-ingress-{{ juju_application }}
imagePullPolicy: Always
livenessProbe:
httpGet: