New master takes over

This commit is contained in:
Konstantinos Tsakalozos 2017-11-03 20:47:43 +02:00
parent db17709552
commit b134ce7621
3 changed files with 37 additions and 27 deletions

View File

@ -24,14 +24,14 @@ from charms.reactive import when
from charms.reactive import when_not from charms.reactive import when_not
from charms.reactive.helpers import data_changed from charms.reactive.helpers import data_changed
from charmhelpers.core import hookenv from charmhelpers.core import hookenv, unitdata
from shlex import split from shlex import split
from subprocess import check_call from subprocess import check_call
from subprocess import check_output from subprocess import check_output
db = unitdata.kv()
USER = 'system:e2e' USER = 'system:e2e'
@ -91,16 +91,15 @@ def install_snaps():
@when('tls_client.ca.saved', 'tls_client.client.certificate.saved', @when('tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'kubernetes-master.available', 'tls_client.client.key.saved', 'kubernetes-master.available',
'kubernetes-e2e.installed', 'e2e.auth.bootstrapped', 'kubernetes-e2e.installed', 'e2e.auth.bootstrapped')
'kube-control.auth.available')
@when_not('kubeconfig.ready') @when_not('kubeconfig.ready')
def prepare_kubeconfig_certificates(master, kube_control): def prepare_kubeconfig_certificates(master):
''' Prepare the data to feed to create the kubeconfig file. ''' ''' Prepare the data to feed to create the kubeconfig file. '''
layer_options = layer.options('tls-client') layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig. # Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path') ca = layer_options.get('ca_certificate_path')
creds = kube_control.get_auth_credentials(USER) creds = db.get('credentials')
data_changed('kube-control.creds', creds) data_changed('kube-control.creds', creds)
servers = get_kube_api_servers(master) servers = get_kube_api_servers(master)
@ -135,6 +134,10 @@ def catch_change_in_creds(kube_control):
if creds \ if creds \
and data_changed('kube-control.creds', creds) \ and data_changed('kube-control.creds', creds) \
and creds['user'] == USER: and creds['user'] == USER:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('e2e.auth.bootstrapped') set_state('e2e.auth.bootstrapped')

View File

@ -478,24 +478,27 @@ def flush_auth_for_departed(kube_control):
''' Unit has left the cluster and needs to have its authentication ''' Unit has left the cluster and needs to have its authentication
tokens removed from the token registry ''' tokens removed from the token registry '''
token_auth_file = '/root/cdk/known_tokens.csv' token_auth_file = '/root/cdk/known_tokens.csv'
departing_unit = kube_control.flush_departed() departing_units = kube_control.flush_departed()
userid = "kubelet-{}".format(departing_unit.split('/')[1]) if departing_units:
known_tokens = open(token_auth_file, 'r').readlines() userids = []
for line in known_tokens[:]: for departing_unit in departing_units:
haystack = line.split(',') userids.append("kubelet-{}".format(departing_unit.split('/')[1]))
# skip the entry if we dont have token,user,id,groups format known_tokens = open(token_auth_file, 'r').readlines()
if len(haystack) < 4: for line in known_tokens[:]:
continue haystack = line.split(',')
if haystack[2] == userid: # skip the entry if we dont have token,user,id,groups format
hookenv.log('Found unit {} in token auth. Removing auth' if len(haystack) < 4:
' token.'.format(userid)) continue
known_tokens.remove(line) if haystack[2] in userids:
# atomically rewrite the file minus any scrubbed units hookenv.log('Found unit {} in token auth. Removing auth'
hookenv.log('Rewriting token auth file: {}'.format(token_auth_file)) ' token.'.format(haystack[2]))
with open(token_auth_file, 'w') as fp: known_tokens.remove(line)
fp.writelines(known_tokens) # atomically rewrite the file minus any scrubbed units
# Trigger rebroadcast of auth files for followers hookenv.log('Rewriting token auth file: {}'.format(token_auth_file))
remove_state('authentication.setup') with open(token_auth_file, 'w') as fp:
fp.writelines(known_tokens)
# Trigger rebroadcast of auth files for followers
remove_state('authentication.setup')
@when_not('kube-control.connected') @when_not('kube-control.connected')

View File

@ -37,7 +37,7 @@ from charms.kubernetes.flagmanager import FlagManager
from charms.reactive.helpers import data_changed, any_file_changed from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render from charms.templating.jinja2 import render
from charmhelpers.core import hookenv from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.charmsupport import nrpe
@ -51,6 +51,7 @@ kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config' kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin') os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm') @hook('upgrade-charm')
@ -336,8 +337,7 @@ def start_worker(kube_api, kube_control, auth_control, cni):
hookenv.log('Waiting for cluster cidr.') hookenv.log('Waiting for cluster cidr.')
return return
nodeuser = 'system:node:{}'.format(gethostname()) creds = db.get('credentials')
creds = kube_control.get_auth_credentials(nodeuser)
data_changed('kube-control.creds', creds) data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet # set --allow-privileged flag for kubelet
@ -829,6 +829,10 @@ def catch_change_in_creds(kube_control):
if creds \ if creds \
and data_changed('kube-control.creds', creds) \ and data_changed('kube-control.creds', creds) \
and creds['user'] == nodeuser: and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped') set_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed') set_state('kubernetes-worker.restart-needed')