Merge pull request #55081 from juju-solutions/bug/departing

Automatic merge from submit-queue (batch tested with PRs 54535, 54950, 55081). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Bug/departing

**What this PR does / why we need it**: With this PR we fix the bugs we had with multiple masters setup with juju.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes https://github.com/juju-solutions/bundle-canonical-kubernetes/issues/422

**Special notes for your reviewer**: Related PR: https://github.com/juju-solutions/interface-kube-control/pull/15

**Release note**:

```NONE

```
This commit is contained in:
Kubernetes Submit Queue 2017-11-06 07:30:48 -08:00 committed by GitHub
commit 4882789147
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 16 additions and 35 deletions

View File

@ -24,14 +24,14 @@ from charms.reactive import when
from charms.reactive import when_not
from charms.reactive.helpers import data_changed
from charmhelpers.core import hookenv
from charmhelpers.core import hookenv, unitdata
from shlex import split
from subprocess import check_call
from subprocess import check_output
db = unitdata.kv()
USER = 'system:e2e'
@ -91,16 +91,15 @@ def install_snaps():
@when('tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'kubernetes-master.available',
'kubernetes-e2e.installed', 'e2e.auth.bootstrapped',
'kube-control.auth.available')
'kubernetes-e2e.installed', 'e2e.auth.bootstrapped')
@when_not('kubeconfig.ready')
def prepare_kubeconfig_certificates(master, kube_control):
def prepare_kubeconfig_certificates(master):
''' Prepare the data to feed to create the kubeconfig file. '''
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
creds = kube_control.get_auth_credentials(USER)
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
servers = get_kube_api_servers(master)
@ -135,6 +134,10 @@ def catch_change_in_creds(kube_control):
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == USER:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('e2e.auth.bootstrapped')

View File

@ -472,32 +472,6 @@ def create_service_configs(kube_control):
remove_state('authentication.setup')
@when('kube-control.departed')
@when('leadership.is_leader')
def flush_auth_for_departed(kube_control):
''' Unit has left the cluster and needs to have its authentication
tokens removed from the token registry '''
token_auth_file = '/root/cdk/known_tokens.csv'
departing_unit = kube_control.flush_departed()
userid = "kubelet-{}".format(departing_unit.split('/')[1])
known_tokens = open(token_auth_file, 'r').readlines()
for line in known_tokens[:]:
haystack = line.split(',')
# skip the entry if we dont have token,user,id,groups format
if len(haystack) < 4:
continue
if haystack[2] == userid:
hookenv.log('Found unit {} in token auth. Removing auth'
' token.'.format(userid))
known_tokens.remove(line)
# atomically rewrite the file minus any scrubbed units
hookenv.log('Rewriting token auth file: {}'.format(token_auth_file))
with open(token_auth_file, 'w') as fp:
fp.writelines(known_tokens)
# Trigger rebroadcast of auth files for followers
remove_state('authentication.setup')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator master is waiting for a relation to workers.

View File

@ -37,7 +37,7 @@ from charms.kubernetes.flagmanager import FlagManager
from charms.reactive.helpers import data_changed, any_file_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
@ -51,6 +51,7 @@ kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
@ -336,8 +337,7 @@ def start_worker(kube_api, kube_control, auth_control, cni):
hookenv.log('Waiting for cluster cidr.')
return
nodeuser = 'system:node:{}'.format(gethostname())
creds = kube_control.get_auth_credentials(nodeuser)
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
# set --allow-privileged flag for kubelet
@ -829,6 +829,10 @@ def catch_change_in_creds(kube_control):
if creds \
and data_changed('kube-control.creds', creds) \
and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')