mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 04:06:03 +00:00
New master takes over
This commit is contained in:
parent
db17709552
commit
b134ce7621
@ -24,14 +24,14 @@ from charms.reactive import when
|
||||
from charms.reactive import when_not
|
||||
from charms.reactive.helpers import data_changed
|
||||
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core import hookenv, unitdata
|
||||
|
||||
from shlex import split
|
||||
|
||||
from subprocess import check_call
|
||||
from subprocess import check_output
|
||||
|
||||
|
||||
db = unitdata.kv()
|
||||
USER = 'system:e2e'
|
||||
|
||||
|
||||
@ -91,16 +91,15 @@ def install_snaps():
|
||||
|
||||
@when('tls_client.ca.saved', 'tls_client.client.certificate.saved',
|
||||
'tls_client.client.key.saved', 'kubernetes-master.available',
|
||||
'kubernetes-e2e.installed', 'e2e.auth.bootstrapped',
|
||||
'kube-control.auth.available')
|
||||
'kubernetes-e2e.installed', 'e2e.auth.bootstrapped')
|
||||
@when_not('kubeconfig.ready')
|
||||
def prepare_kubeconfig_certificates(master, kube_control):
|
||||
def prepare_kubeconfig_certificates(master):
|
||||
''' Prepare the data to feed to create the kubeconfig file. '''
|
||||
|
||||
layer_options = layer.options('tls-client')
|
||||
# Get all the paths to the tls information required for kubeconfig.
|
||||
ca = layer_options.get('ca_certificate_path')
|
||||
creds = kube_control.get_auth_credentials(USER)
|
||||
creds = db.get('credentials')
|
||||
data_changed('kube-control.creds', creds)
|
||||
|
||||
servers = get_kube_api_servers(master)
|
||||
@ -135,6 +134,10 @@ def catch_change_in_creds(kube_control):
|
||||
if creds \
|
||||
and data_changed('kube-control.creds', creds) \
|
||||
and creds['user'] == USER:
|
||||
# We need to cache the credentials here because if the
|
||||
# master changes (master leader dies and replaced by a new one)
|
||||
# the new master will have no recollection of our certs.
|
||||
db.set('credentials', creds)
|
||||
set_state('e2e.auth.bootstrapped')
|
||||
|
||||
|
||||
|
@ -478,24 +478,27 @@ def flush_auth_for_departed(kube_control):
|
||||
''' Unit has left the cluster and needs to have its authentication
|
||||
tokens removed from the token registry '''
|
||||
token_auth_file = '/root/cdk/known_tokens.csv'
|
||||
departing_unit = kube_control.flush_departed()
|
||||
userid = "kubelet-{}".format(departing_unit.split('/')[1])
|
||||
known_tokens = open(token_auth_file, 'r').readlines()
|
||||
for line in known_tokens[:]:
|
||||
haystack = line.split(',')
|
||||
# skip the entry if we dont have token,user,id,groups format
|
||||
if len(haystack) < 4:
|
||||
continue
|
||||
if haystack[2] == userid:
|
||||
hookenv.log('Found unit {} in token auth. Removing auth'
|
||||
' token.'.format(userid))
|
||||
known_tokens.remove(line)
|
||||
# atomically rewrite the file minus any scrubbed units
|
||||
hookenv.log('Rewriting token auth file: {}'.format(token_auth_file))
|
||||
with open(token_auth_file, 'w') as fp:
|
||||
fp.writelines(known_tokens)
|
||||
# Trigger rebroadcast of auth files for followers
|
||||
remove_state('authentication.setup')
|
||||
departing_units = kube_control.flush_departed()
|
||||
if departing_units:
|
||||
userids = []
|
||||
for departing_unit in departing_units:
|
||||
userids.append("kubelet-{}".format(departing_unit.split('/')[1]))
|
||||
known_tokens = open(token_auth_file, 'r').readlines()
|
||||
for line in known_tokens[:]:
|
||||
haystack = line.split(',')
|
||||
# skip the entry if we dont have token,user,id,groups format
|
||||
if len(haystack) < 4:
|
||||
continue
|
||||
if haystack[2] in userids:
|
||||
hookenv.log('Found unit {} in token auth. Removing auth'
|
||||
' token.'.format(haystack[2]))
|
||||
known_tokens.remove(line)
|
||||
# atomically rewrite the file minus any scrubbed units
|
||||
hookenv.log('Rewriting token auth file: {}'.format(token_auth_file))
|
||||
with open(token_auth_file, 'w') as fp:
|
||||
fp.writelines(known_tokens)
|
||||
# Trigger rebroadcast of auth files for followers
|
||||
remove_state('authentication.setup')
|
||||
|
||||
|
||||
@when_not('kube-control.connected')
|
||||
|
@ -37,7 +37,7 @@ from charms.kubernetes.flagmanager import FlagManager
|
||||
from charms.reactive.helpers import data_changed, any_file_changed
|
||||
from charms.templating.jinja2 import render
|
||||
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core import hookenv, unitdata
|
||||
from charmhelpers.core.host import service_stop, service_restart
|
||||
from charmhelpers.contrib.charmsupport import nrpe
|
||||
|
||||
@ -51,6 +51,7 @@ kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
|
||||
kubeclientconfig_path = '/root/.kube/config'
|
||||
|
||||
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
|
||||
db = unitdata.kv()
|
||||
|
||||
|
||||
@hook('upgrade-charm')
|
||||
@ -336,8 +337,7 @@ def start_worker(kube_api, kube_control, auth_control, cni):
|
||||
hookenv.log('Waiting for cluster cidr.')
|
||||
return
|
||||
|
||||
nodeuser = 'system:node:{}'.format(gethostname())
|
||||
creds = kube_control.get_auth_credentials(nodeuser)
|
||||
creds = db.get('credentials')
|
||||
data_changed('kube-control.creds', creds)
|
||||
|
||||
# set --allow-privileged flag for kubelet
|
||||
@ -829,6 +829,10 @@ def catch_change_in_creds(kube_control):
|
||||
if creds \
|
||||
and data_changed('kube-control.creds', creds) \
|
||||
and creds['user'] == nodeuser:
|
||||
# We need to cache the credentials here because if the
|
||||
# master changes (master leader dies and replaced by a new one)
|
||||
# the new master will have no recollection of our certs.
|
||||
db.set('credentials', creds)
|
||||
set_state('worker.auth.bootstrapped')
|
||||
set_state('kubernetes-worker.restart-needed')
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user