mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
Merge pull request #53820 from juju-solutions/feature/rbac
Automatic merge from submit-queue (batch tested with PRs 53820, 53971). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Add support for RBAC support to Kubernetes via Juju **What this PR does / why we need it**: This PR add RBAC to the Juju deployment of Kubernetes **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: **Special notes for your reviewer**: **Release note**: ```Canonical Distribution of Kubernetes offers configurable RBAC ```
This commit is contained in:
commit
df9e32b219
@ -22,6 +22,7 @@ from charms.reactive import is_state
|
||||
from charms.reactive import set_state
|
||||
from charms.reactive import when
|
||||
from charms.reactive import when_not
|
||||
from charms.reactive.helpers import data_changed
|
||||
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
@ -31,6 +32,9 @@ from subprocess import check_call
|
||||
from subprocess import check_output
|
||||
|
||||
|
||||
USER = 'system:e2e'
|
||||
|
||||
|
||||
@hook('upgrade-charm')
|
||||
def reset_delivery_states():
|
||||
''' Remove the state set when resources are unpacked. '''
|
||||
@ -87,7 +91,8 @@ def install_snaps():
|
||||
|
||||
@when('tls_client.ca.saved', 'tls_client.client.certificate.saved',
|
||||
'tls_client.client.key.saved', 'kubernetes-master.available',
|
||||
'kubernetes-e2e.installed', 'kube-control.auth.available')
|
||||
'kubernetes-e2e.installed', 'e2e.auth.bootstrapped',
|
||||
'kube-control.auth.available')
|
||||
@when_not('kubeconfig.ready')
|
||||
def prepare_kubeconfig_certificates(master, kube_control):
|
||||
''' Prepare the data to feed to create the kubeconfig file. '''
|
||||
@ -95,7 +100,8 @@ def prepare_kubeconfig_certificates(master, kube_control):
|
||||
layer_options = layer.options('tls-client')
|
||||
# Get all the paths to the tls information required for kubeconfig.
|
||||
ca = layer_options.get('ca_certificate_path')
|
||||
creds = kube_control.get_auth_credentials()
|
||||
creds = kube_control.get_auth_credentials(USER)
|
||||
data_changed('kube-control.creds', creds)
|
||||
|
||||
servers = get_kube_api_servers(master)
|
||||
|
||||
@ -118,11 +124,18 @@ def prepare_kubeconfig_certificates(master, kube_control):
|
||||
def request_credentials(kube_control):
|
||||
""" Request authorization creds."""
|
||||
|
||||
# The kube-cotrol interface is created to support RBAC.
|
||||
# At this point we might as well do the right thing and return the hostname
|
||||
# even if it will only be used when we enable RBAC
|
||||
user = 'system:masters'
|
||||
kube_control.set_auth_request(user)
|
||||
# Ask for a user, although we will be using the 'client_token'
|
||||
kube_control.set_auth_request(USER)
|
||||
|
||||
|
||||
@when('kube-control.auth.available')
|
||||
def catch_change_in_creds(kube_control):
|
||||
"""Request a service restart in case credential updates were detected."""
|
||||
creds = kube_control.get_auth_credentials(USER)
|
||||
if creds \
|
||||
and data_changed('kube-control.creds', creds) \
|
||||
and creds['user'] == USER:
|
||||
set_state('e2e.auth.bootstrapped')
|
||||
|
||||
|
||||
@when('kubernetes-e2e.installed', 'kubeconfig.ready')
|
||||
|
@ -54,6 +54,10 @@ The domain name to use for the Kubernetes cluster for DNS.
|
||||
Enables the installation of Kubernetes dashboard, Heapster, Grafana, and
|
||||
InfluxDB.
|
||||
|
||||
#### enable-rbac
|
||||
|
||||
Enable RBAC and Node authorisation.
|
||||
|
||||
# DNS for the cluster
|
||||
|
||||
The DNS add-on allows the pods to have a DNS names in addition to IP addresses.
|
||||
|
@ -46,3 +46,9 @@ options:
|
||||
runtime-config=batch/v2alpha1=true profiling=true
|
||||
will result in kube-apiserver being run with the following options:
|
||||
--runtime-config=batch/v2alpha1=true --profiling=true
|
||||
authorization-mode:
|
||||
type: string
|
||||
default: "AlwaysAllow"
|
||||
description: |
|
||||
Comma separated authorization modes. Allowed values are
|
||||
"RBAC", "Node", "Webhook", "ABAC", "AlwaysDeny" and "AlwaysAllow".
|
||||
|
@ -26,6 +26,8 @@ import ipaddress
|
||||
|
||||
import charms.leadership
|
||||
|
||||
from shutil import move
|
||||
|
||||
from shlex import split
|
||||
from subprocess import check_call
|
||||
from subprocess import check_output
|
||||
@ -79,10 +81,41 @@ def reset_states_for_delivery():
|
||||
'''An upgrade charm event was triggered by Juju, react to that here.'''
|
||||
migrate_from_pre_snaps()
|
||||
install_snaps()
|
||||
add_rbac_roles()
|
||||
set_state('reconfigure.authentication.setup')
|
||||
remove_state('authentication.setup')
|
||||
|
||||
|
||||
def add_rbac_roles():
|
||||
'''Update the known_tokens file with proper groups.'''
|
||||
|
||||
tokens_fname = '/root/cdk/known_tokens.csv'
|
||||
tokens_backup_fname = '/root/cdk/known_tokens.csv.backup'
|
||||
move(tokens_fname, tokens_backup_fname)
|
||||
with open(tokens_fname, 'w') as ftokens:
|
||||
with open(tokens_backup_fname, 'r') as stream:
|
||||
for line in stream:
|
||||
record = line.strip().split(',')
|
||||
# token, username, user, groups
|
||||
if record[2] == 'admin' and len(record) == 3:
|
||||
towrite = '{0},{1},{2},"{3}"\n'.format(record[0],
|
||||
record[1],
|
||||
record[2],
|
||||
'system:masters')
|
||||
ftokens.write(towrite)
|
||||
continue
|
||||
if record[2] == 'kube_proxy':
|
||||
towrite = '{0},{1},{2}\n'.format(record[0],
|
||||
'system:kube-proxy',
|
||||
'kube-proxy')
|
||||
ftokens.write(towrite)
|
||||
continue
|
||||
if record[2] == 'kubelet' and record[1] == 'kubelet':
|
||||
continue
|
||||
|
||||
ftokens.write('{}'.format(line))
|
||||
|
||||
|
||||
def rename_file_idempotent(source, destination):
|
||||
if os.path.isfile(source):
|
||||
os.rename(source, destination)
|
||||
@ -209,12 +242,10 @@ def setup_leader_authentication():
|
||||
if not get_keys_from_leader(keys) \
|
||||
or is_state('reconfigure.authentication.setup'):
|
||||
last_pass = get_password('basic_auth.csv', 'admin')
|
||||
setup_basic_auth(last_pass, 'admin', 'admin')
|
||||
setup_basic_auth(last_pass, 'admin', 'admin', 'system:masters')
|
||||
|
||||
if not os.path.isfile(known_tokens):
|
||||
setup_tokens(None, 'admin', 'admin')
|
||||
setup_tokens(None, 'kubelet', 'kubelet')
|
||||
setup_tokens(None, 'kube_proxy', 'kube_proxy')
|
||||
touch(known_tokens)
|
||||
|
||||
# Generate the default service account token key
|
||||
os.makedirs('/root/cdk', exist_ok=True)
|
||||
@ -302,6 +333,7 @@ def get_keys_from_leader(keys, overwrite_local=False):
|
||||
# Write out the file and move on to the next item
|
||||
with open(k, 'w+') as fp:
|
||||
fp.write(contents)
|
||||
fp.write('\n')
|
||||
|
||||
return True
|
||||
|
||||
@ -399,20 +431,69 @@ def send_cluster_dns_detail(kube_control):
|
||||
kube_control.set_dns(53, hookenv.config('dns_domain'), dns_ip)
|
||||
|
||||
|
||||
@when('kube-control.auth.requested')
|
||||
@when('authentication.setup')
|
||||
@when('kube-control.connected')
|
||||
@when('snap.installed.kubectl')
|
||||
@when('leadership.is_leader')
|
||||
def send_tokens(kube_control):
|
||||
"""Send the tokens to the workers."""
|
||||
kubelet_token = get_token('kubelet')
|
||||
proxy_token = get_token('kube_proxy')
|
||||
admin_token = get_token('admin')
|
||||
def create_service_configs(kube_control):
|
||||
"""Create the users for kubelet"""
|
||||
should_restart = False
|
||||
# generate the username/pass for the requesting unit
|
||||
proxy_token = get_token('system:kube-proxy')
|
||||
if not proxy_token:
|
||||
setup_tokens(None, 'system:kube-proxy', 'kube-proxy')
|
||||
proxy_token = get_token('system:kube-proxy')
|
||||
should_restart = True
|
||||
|
||||
client_token = get_token('admin')
|
||||
if not client_token:
|
||||
setup_tokens(None, 'admin', 'admin', "system:masters")
|
||||
client_token = get_token('admin')
|
||||
should_restart = True
|
||||
|
||||
# Send the data
|
||||
requests = kube_control.auth_user()
|
||||
for request in requests:
|
||||
kube_control.sign_auth_request(request[0], kubelet_token,
|
||||
proxy_token, admin_token)
|
||||
username = request[1]['user']
|
||||
group = request[1]['group']
|
||||
kubelet_token = get_token(username)
|
||||
if not kubelet_token and username and group:
|
||||
# Usernames have to be in the form of system:node:<nodeName>
|
||||
userid = "kubelet-{}".format(request[0].split('/')[1])
|
||||
setup_tokens(None, username, userid, group)
|
||||
kubelet_token = get_token(username)
|
||||
kube_control.sign_auth_request(request[0], username,
|
||||
kubelet_token, proxy_token,
|
||||
client_token)
|
||||
should_restart = True
|
||||
|
||||
if should_restart:
|
||||
host.service_restart('snap.kube-apiserver.daemon')
|
||||
remove_state('authentication.setup')
|
||||
|
||||
|
||||
@when('kube-control.departed')
|
||||
@when('leadership.is_leader')
|
||||
def flush_auth_for_departed(kube_control):
|
||||
''' Unit has left the cluster and needs to have its authentication
|
||||
tokens removed from the token registry '''
|
||||
token_auth_file = '/root/cdk/known_tokens.csv'
|
||||
departing_unit = kube_control.flush_departed()
|
||||
userid = "kubelet-{}".format(departing_unit.split('/')[1])
|
||||
known_tokens = open(token_auth_file, 'r').readlines()
|
||||
for line in known_tokens[:]:
|
||||
haystack = line.split(',')
|
||||
# skip the entry if we dont have token,user,id,groups format
|
||||
if len(haystack) < 4:
|
||||
continue
|
||||
if haystack[2] == userid:
|
||||
hookenv.log('Found unit {} in token auth. Removing auth'
|
||||
' token.'.format(userid))
|
||||
known_tokens.remove(line)
|
||||
# atomically rewrite the file minus any scrubbed units
|
||||
hookenv.log('Rewriting token auth file: {}'.format(token_auth_file))
|
||||
with open(token_auth_file, 'w') as fp:
|
||||
fp.writelines(known_tokens)
|
||||
# Trigger rebroadcast of auth files for followers
|
||||
remove_state('authentication.setup')
|
||||
|
||||
|
||||
@when_not('kube-control.connected')
|
||||
@ -640,6 +721,15 @@ def initial_nrpe_config(nagios=None):
|
||||
update_nrpe_config(nagios)
|
||||
|
||||
|
||||
@when('config.changed.authorization-mode',
|
||||
'kubernetes-master.components.started')
|
||||
def switch_auth_mode():
|
||||
config = hookenv.config()
|
||||
mode = config.get('authorization-mode')
|
||||
if data_changed('auth-mode', mode):
|
||||
remove_state('kubernetes-master.components.started')
|
||||
|
||||
|
||||
@when('kubernetes-master.components.started')
|
||||
@when('nrpe-external-master.available')
|
||||
@when_any('config.changed.nagios_context',
|
||||
@ -991,6 +1081,12 @@ def configure_apiserver():
|
||||
'DefaultTolerationSeconds'
|
||||
]
|
||||
|
||||
auth_mode = hookenv.config('authorization-mode')
|
||||
if 'Node' in auth_mode:
|
||||
admission_control.append('NodeRestriction')
|
||||
|
||||
api_opts.add('authorization-mode', auth_mode, strict=True)
|
||||
|
||||
if get_version('kube-apiserver') < (1, 6):
|
||||
hookenv.log('Removing DefaultTolerationSeconds from admission-control')
|
||||
admission_control.remove('DefaultTolerationSeconds')
|
||||
@ -1046,7 +1142,8 @@ def configure_scheduler():
|
||||
set_state('kube-scheduler.do-restart')
|
||||
|
||||
|
||||
def setup_basic_auth(password=None, username='admin', uid='admin'):
|
||||
def setup_basic_auth(password=None, username='admin', uid='admin',
|
||||
groups=None):
|
||||
'''Create the htacces file and the tokens.'''
|
||||
root_cdk = '/root/cdk'
|
||||
if not os.path.isdir(root_cdk):
|
||||
@ -1055,10 +1152,14 @@ def setup_basic_auth(password=None, username='admin', uid='admin'):
|
||||
if not password:
|
||||
password = token_generator()
|
||||
with open(htaccess, 'w') as stream:
|
||||
stream.write('{0},{1},{2}'.format(password, username, uid))
|
||||
if groups:
|
||||
stream.write('{0},{1},{2},"{3}"'.format(password,
|
||||
username, uid, groups))
|
||||
else:
|
||||
stream.write('{0},{1},{2}'.format(password, username, uid))
|
||||
|
||||
|
||||
def setup_tokens(token, username, user):
|
||||
def setup_tokens(token, username, user, groups=None):
|
||||
'''Create a token file for kubernetes authentication.'''
|
||||
root_cdk = '/root/cdk'
|
||||
if not os.path.isdir(root_cdk):
|
||||
@ -1067,7 +1168,13 @@ def setup_tokens(token, username, user):
|
||||
if not token:
|
||||
token = token_generator()
|
||||
with open(known_tokens, 'a') as stream:
|
||||
stream.write('{0},{1},{2}\n'.format(token, username, user))
|
||||
if groups:
|
||||
stream.write('{0},{1},{2},"{3}"\n'.format(token,
|
||||
username,
|
||||
user,
|
||||
groups))
|
||||
else:
|
||||
stream.write('{0},{1},{2}\n'.format(token, username, user))
|
||||
|
||||
|
||||
def get_password(csv_fname, user):
|
||||
@ -1133,3 +1240,10 @@ def apiserverVersion():
|
||||
cmd = 'kube-apiserver --version'.split()
|
||||
version_string = check_output(cmd).decode('utf-8')
|
||||
return tuple(int(q) for q in re.findall("[0-9]+", version_string)[:3])
|
||||
|
||||
|
||||
def touch(fname):
|
||||
try:
|
||||
os.utime(fname, None)
|
||||
except OSError:
|
||||
open(fname, 'a').close()
|
||||
|
@ -34,7 +34,7 @@ if not context['replicas']:
|
||||
context['replicas'] = 3
|
||||
|
||||
# Declare a kubectl template when invoking kubectl
|
||||
kubectl = ['kubectl', '--kubeconfig=/root/cdk/kubeconfig']
|
||||
kubectl = ['kubectl', '--kubeconfig=/root/.kube/config']
|
||||
|
||||
# Remove deployment if requested
|
||||
if context['delete']:
|
||||
|
@ -21,8 +21,8 @@ fi
|
||||
|
||||
|
||||
# Cordon and drain the unit
|
||||
kubectl --kubeconfig=/root/cdk/kubeconfig cordon $(hostname)
|
||||
kubectl --kubeconfig=/root/cdk/kubeconfig drain $(hostname) ${EXTRA_FLAGS}
|
||||
kubectl --kubeconfig=/root/.kube/config cordon $(hostname)
|
||||
kubectl --kubeconfig=/root/.kube/config drain $(hostname) ${EXTRA_FLAGS}
|
||||
|
||||
# Set status to indicate the unit is paused and under maintenance.
|
||||
status-set 'waiting' 'Kubernetes unit paused'
|
||||
|
@ -57,7 +57,7 @@ if param_error:
|
||||
context['ingress'] = action_get('ingress')
|
||||
|
||||
# Declare a kubectl template when invoking kubectl
|
||||
kubectl = ['kubectl', '--kubeconfig=/root/cdk/kubeconfig']
|
||||
kubectl = ['kubectl', '--kubeconfig=/root/.kube/config']
|
||||
|
||||
# Remove deployment if requested
|
||||
if deletion:
|
||||
|
@ -4,5 +4,5 @@ set -ex
|
||||
|
||||
export PATH=$PATH:/snap/bin
|
||||
|
||||
kubectl --kubeconfig=/root/cdk/kubeconfig uncordon $(hostname)
|
||||
kubectl --kubeconfig=/root/.kube/config uncordon $(hostname)
|
||||
status-set 'active' 'Kubernetes unit resumed'
|
||||
|
@ -47,11 +47,11 @@ from charmhelpers.contrib.charmsupport import nrpe
|
||||
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
|
||||
|
||||
kubeconfig_path = '/root/cdk/kubeconfig'
|
||||
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
|
||||
kubeclientconfig_path = '/root/.kube/config'
|
||||
|
||||
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
|
||||
|
||||
db = unitdata.kv()
|
||||
|
||||
|
||||
@hook('upgrade-charm')
|
||||
def upgrade_charm():
|
||||
@ -319,7 +319,8 @@ def watch_for_changes(kube_api, kube_control, cni):
|
||||
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
|
||||
'tls_client.server.key.saved',
|
||||
'kube-control.dns.available', 'kube-control.auth.available',
|
||||
'cni.available', 'kubernetes-worker.restart-needed')
|
||||
'cni.available', 'kubernetes-worker.restart-needed',
|
||||
'worker.auth.bootstrapped')
|
||||
def start_worker(kube_api, kube_control, auth_control, cni):
|
||||
''' Start kubelet using the provided API and DNS info.'''
|
||||
servers = get_kube_api_servers(kube_api)
|
||||
@ -335,7 +336,8 @@ def start_worker(kube_api, kube_control, auth_control, cni):
|
||||
hookenv.log('Waiting for cluster cidr.')
|
||||
return
|
||||
|
||||
creds = kube_control.get_auth_credentials()
|
||||
nodeuser = 'system:node:{}'.format(gethostname())
|
||||
creds = kube_control.get_auth_credentials(nodeuser)
|
||||
data_changed('kube-control.creds', creds)
|
||||
|
||||
# set --allow-privileged flag for kubelet
|
||||
@ -458,11 +460,13 @@ def create_config(server, creds):
|
||||
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
|
||||
check_call(cmd)
|
||||
# Create kubernetes configuration in the default location for root.
|
||||
create_kubeconfig('/root/.kube/config', server, ca,
|
||||
create_kubeconfig(kubeclientconfig_path, server, ca,
|
||||
token=creds['client_token'], user='root')
|
||||
# Create kubernetes configuration for kubelet, and kube-proxy services.
|
||||
create_kubeconfig(kubeconfig_path, server, ca,
|
||||
token=creds['kubelet_token'], user='kubelet')
|
||||
create_kubeconfig(kubeproxyconfig_path, server, ca,
|
||||
token=creds['proxy_token'], user='kube-proxy')
|
||||
|
||||
|
||||
def configure_worker_services(api_servers, dns, cluster_cidr):
|
||||
@ -491,7 +495,7 @@ def configure_worker_services(api_servers, dns, cluster_cidr):
|
||||
|
||||
kube_proxy_opts = FlagManager('kube-proxy')
|
||||
kube_proxy_opts.add('cluster-cidr', cluster_cidr)
|
||||
kube_proxy_opts.add('kubeconfig', kubeconfig_path)
|
||||
kube_proxy_opts.add('kubeconfig', kubeproxyconfig_path)
|
||||
kube_proxy_opts.add('logtostderr', 'true')
|
||||
kube_proxy_opts.add('v', '0')
|
||||
kube_proxy_opts.add('master', random.choice(api_servers), strict=True)
|
||||
@ -613,7 +617,7 @@ def get_kube_api_servers(kube_api):
|
||||
def kubectl(*args):
|
||||
''' Run a kubectl cli command with a config file. Returns stdout and throws
|
||||
an error if the command fails. '''
|
||||
command = ['kubectl', '--kubeconfig=' + kubeconfig_path] + list(args)
|
||||
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
|
||||
hookenv.log('Executing {}'.format(command))
|
||||
return check_output(command)
|
||||
|
||||
@ -817,11 +821,15 @@ def request_kubelet_and_proxy_credentials(kube_control):
|
||||
kube_control.set_auth_request(nodeuser)
|
||||
|
||||
|
||||
@when('kube-control.auth.available')
|
||||
@when('kube-control.connected')
|
||||
def catch_change_in_creds(kube_control):
|
||||
"""Request a service restart in case credential updates were detected."""
|
||||
creds = kube_control.get_auth_credentials()
|
||||
if data_changed('kube-control.creds', creds):
|
||||
nodeuser = 'system:node:{}'.format(gethostname())
|
||||
creds = kube_control.get_auth_credentials(nodeuser)
|
||||
if creds \
|
||||
and data_changed('kube-control.creds', creds) \
|
||||
and creds['user'] == nodeuser:
|
||||
set_state('worker.auth.bootstrapped')
|
||||
set_state('kubernetes-worker.restart-needed')
|
||||
|
||||
|
||||
|
@ -32,12 +32,13 @@ apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: default-http-backend
|
||||
# namespace: kube-system
|
||||
labels:
|
||||
app: default-http-backend
|
||||
k8s-app: default-http-backend
|
||||
spec:
|
||||
ports:
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
- port: 80
|
||||
protocol: TCP
|
||||
targetPort: 80
|
||||
selector:
|
||||
app: default-http-backend
|
||||
|
@ -1,4 +1,128 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nginx-ingress-serviceaccount
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: nginx-ingress-clusterrole
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
- nodes
|
||||
- pods
|
||||
- secrets
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- "extensions"
|
||||
resources:
|
||||
- ingresses/status
|
||||
verbs:
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: nginx-ingress-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- pods
|
||||
- secrets
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
resourceNames:
|
||||
# Defaults to "<election-id>-<ingress-class>"
|
||||
# Here: "<ingress-controller-leader>-<nginx>"
|
||||
# This has to be adapted if you change either parameter
|
||||
# when launching the nginx-ingress-controller.
|
||||
- "ingress-controller-leader-nginx"
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: nginx-ingress-role-nisa-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: nginx-ingress-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nginx-ingress-serviceaccount
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: nginx-ingress-clusterrole-nisa-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: nginx-ingress-clusterrole
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: nginx-ingress-serviceaccount
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: nginx-load-balancer-conf
|
||||
@ -23,6 +147,7 @@ spec:
|
||||
# hostPort doesn't work with CNI, so we have to use hostNetwork instead
|
||||
# see https://github.com/kubernetes/kubernetes/issues/23920
|
||||
hostNetwork: true
|
||||
serviceAccountName: nginx-ingress-serviceaccount
|
||||
containers:
|
||||
- image: {{ ingress_image }}
|
||||
name: nginx-ingress-lb
|
||||
|
Loading…
Reference in New Issue
Block a user