Work on upgrade path

This commit is contained in:
Konstantinos Tsakalozos 2017-10-11 17:13:19 +03:00
parent 50354896b6
commit 95fec2dc3f
6 changed files with 63 additions and 33 deletions

View File

@ -42,7 +42,7 @@ options:
--runtime-config=batch/v2alpha1=true --profiling=true --runtime-config=batch/v2alpha1=true --profiling=true
authorization-mode: authorization-mode:
type: string type: string
default: "RBAC" default: "None"
description: | description: |
Set the cluster's authorization mode. Allowed values are Set the cluster's authorization mode. Allowed values are
"RBAC" and "None". "RBAC" and "None".

View File

@ -26,6 +26,8 @@ import ipaddress
import charms.leadership import charms.leadership
from shutil import move
from shlex import split from shlex import split
from subprocess import check_call from subprocess import check_call
from subprocess import check_output from subprocess import check_output
@ -61,6 +63,7 @@ os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
valid_auth_modes = ['rbac', 'none'] valid_auth_modes = ['rbac', 'none']
def service_cidr(): def service_cidr():
''' Return the charm's service-cidr config ''' ''' Return the charm's service-cidr config '''
db = unitdata.kv() db = unitdata.kv()
@ -80,10 +83,41 @@ def reset_states_for_delivery():
'''An upgrade charm event was triggered by Juju, react to that here.''' '''An upgrade charm event was triggered by Juju, react to that here.'''
migrate_from_pre_snaps() migrate_from_pre_snaps()
install_snaps() install_snaps()
add_rbac_roles()
set_state('reconfigure.authentication.setup') set_state('reconfigure.authentication.setup')
remove_state('authentication.setup') remove_state('authentication.setup')
def add_rbac_roles():
'''Update the known_tokens file with proper groups.'''
tokens_fname = '/root/cdk/known_tokens.csv'
tokens_backup_fname = '/root/cdk/known_tokens.csv.backup'
move(tokens_fname, tokens_backup_fname)
with open(tokens_fname, 'w') as ftokens:
with open(tokens_backup_fname, 'r') as stream:
for line in stream:
record = line.strip().split(',')
# token, username, user, groups
if record[2] == 'admin' and len(record) == 3:
towrite = '{0},{1},{2},"{3}"\n'.format(record[0],
record[1],
record[2],
'system:masters')
ftokens.write(towrite)
continue
if record[2] == 'kube_proxy':
towrite = '{0},{1},{2}\n'.format(record[0],
'system:kube-proxy',
'kube-proxy')
ftokens.write(towrite)
continue
if record[2] == 'kubelet' and record[1] == 'kubelet':
continue
ftokens.write('{}'.format(line))
def rename_file_idempotent(source, destination): def rename_file_idempotent(source, destination):
if os.path.isfile(source): if os.path.isfile(source):
os.rename(source, destination) os.rename(source, destination)
@ -404,38 +438,43 @@ def send_cluster_dns_detail(kube_control):
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip) kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip)
@when('kube-control.auth.requested') @when('kube-control.connected')
@when('snap.installed.kubectl') @when('snap.installed.kubectl')
@when('leadership.is_leader') @when('leadership.is_leader')
def create_service_configs(kube_control): def create_service_configs(kube_control):
"""Create the users for kubelet""" """Create the users for kubelet"""
should_restart = False
# generate the username/pass for the requesting unit # generate the username/pass for the requesting unit
proxy_token = get_token('system:kube-proxy') proxy_token = get_token('system:kube-proxy')
if not proxy_token: if not proxy_token:
setup_tokens(None, 'system:kube-proxy', 'kube-proxy') setup_tokens(None, 'system:kube-proxy', 'kube-proxy')
proxy_token = get_token('system:kube-proxy') proxy_token = get_token('system:kube-proxy')
should_restart = True
client_token = get_token('admin') client_token = get_token('admin')
if not client_token: if not client_token:
setup_tokens(None, 'admin', 'admin', "system:masters") setup_tokens(None, 'admin', 'admin', "system:masters")
client_token = get_token('admin') client_token = get_token('admin')
should_restart = True
requests = kube_control.auth_user() requests = kube_control.auth_user()
for request in requests: for request in requests:
username = request[1]['user'] username = request[1]['user']
group = request[1]['group'] group = request[1]['group']
kubelet_token = get_token(username) kubelet_token = get_token(username)
if not kubelet_token: if not kubelet_token and username and group:
# Usernames have to be in the form of system:node:<hostname> # Usernames have to be in the form of system:node:<hostname>
userid = "kubelet-{}".format(request[0].split('/')[1]) userid = "kubelet-{}".format(request[0].split('/')[1])
setup_tokens(None, username, userid, group) setup_tokens(None, username, userid, group)
kubelet_token = get_token(username) kubelet_token = get_token(username)
kube_control.sign_auth_request(request[0], username,
kubelet_token, proxy_token,
client_token)
should_restart = True
kube_control.sign_auth_request(request[0], username, if should_restart:
kubelet_token, proxy_token, client_token) host.service_restart('snap.kube-apiserver.daemon')
remove_state('authentication.setup')
host.service_restart('snap.kube-apiserver.daemon')
remove_state('authentication.setup')
@when('kube-control.departed') @when('kube-control.departed')
@ -1113,7 +1152,9 @@ def setup_tokens(token, username, user, groups=None):
with open(known_tokens, 'a') as stream: with open(known_tokens, 'a') as stream:
if groups: if groups:
stream.write('{0},{1},{2},"{3}"\n'.format(token, stream.write('{0},{1},{2},"{3}"\n'.format(token,
username, user, groups)) username,
user,
groups))
else: else:
stream.write('{0},{1},{2}\n'.format(token, username, user)) stream.write('{0},{1},{2}\n'.format(token, username, user))

View File

@ -822,7 +822,7 @@ def request_kubelet_and_proxy_credentials(kube_control):
kube_control.set_auth_request(nodeuser) kube_control.set_auth_request(nodeuser)
@when('kube-control.auth.available') @when('kube-control.connected')
def catch_change_in_creds(kube_control): def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected.""" """Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(gethostname()) nodeuser = 'system:node:{}'.format(gethostname())

View File

@ -1,16 +1,15 @@
apiVersion: extensions/v1beta1 apiVersion: v1
kind: Deployment kind: ReplicationController
metadata: metadata:
name: default-http-backend name: default-http-backend
namespace: kube-system
labels:
k8s-app: default-http-backend
spec: spec:
replicas: 1 replicas: 1
selector:
app: default-http-backend
template: template:
metadata: metadata:
labels: labels:
k8s-app: default-http-backend app: default-http-backend
spec: spec:
terminationGracePeriodSeconds: 60 terminationGracePeriodSeconds: 60
containers: containers:
@ -28,24 +27,18 @@ spec:
timeoutSeconds: 5 timeoutSeconds: 5
ports: ports:
- containerPort: 8080 - containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
--- ---
apiVersion: v1 apiVersion: v1
kind: Service kind: Service
metadata: metadata:
name: default-http-backend name: default-http-backend
namespace: kube-system # namespace: kube-system
labels: labels:
k8s-app: default-http-backend k8s-app: default-http-backend
spec: spec:
ports: ports:
- port: 80 - port: 80
targetPort: 8080 protocol: TCP
targetPort: 80
selector: selector:
k8s-app: default-http-backend app: default-http-backend

View File

@ -2,7 +2,6 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: nginx-ingress-serviceaccount name: nginx-ingress-serviceaccount
namespace: kube-system
--- ---
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole kind: ClusterRole
@ -60,7 +59,6 @@ apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role kind: Role
metadata: metadata:
name: nginx-ingress-role name: nginx-ingress-role
namespace: kube-system
rules: rules:
- apiGroups: - apiGroups:
- "" - ""
@ -103,7 +101,6 @@ apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding kind: RoleBinding
metadata: metadata:
name: nginx-ingress-role-nisa-binding name: nginx-ingress-role-nisa-binding
namespace: kube-system
roleRef: roleRef:
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
kind: Role kind: Role
@ -111,7 +108,6 @@ roleRef:
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: nginx-ingress-serviceaccount name: nginx-ingress-serviceaccount
namespace: kube-system
--- ---
apiVersion: rbac.authorization.k8s.io/v1beta1 apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding kind: ClusterRoleBinding
@ -124,7 +120,7 @@ roleRef:
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: nginx-ingress-serviceaccount name: nginx-ingress-serviceaccount
namespace: kube-system namespace: default
--- ---
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
@ -135,7 +131,6 @@ apiVersion: v1
kind: ReplicationController kind: ReplicationController
metadata: metadata:
name: nginx-ingress-controller name: nginx-ingress-controller
namespace: kube-system
labels: labels:
k8s-app: nginx-ingress-lb k8s-app: nginx-ingress-lb
spec: spec:
@ -152,6 +147,7 @@ spec:
# hostPort doesn't work with CNI, so we have to use hostNetwork instead # hostPort doesn't work with CNI, so we have to use hostNetwork instead
# see https://github.com/kubernetes/kubernetes/issues/23920 # see https://github.com/kubernetes/kubernetes/issues/23920
hostNetwork: true hostNetwork: true
serviceAccountName: nginx-ingress-serviceaccount
containers: containers:
- image: {{ ingress_image }} - image: {{ ingress_image }}
name: nginx-ingress-lb name: nginx-ingress-lb