mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #41351 from chuckbutler/multi-master-patch
Automatic merge from submit-queue (batch tested with PRs 40665, 41094, 41351, 41721, 41843) Multi master patch **What this PR does / why we need it**: Corrects a sync files issue present when running in a HA Master configuration. This PR adds logic to syncronize on first deployment for `/etc/kubernetes/serviceaccount.key` which will cause cypto verification failure if not 1:1 on each master unit. Additionally syncs basic_auth and additional files in /srv/kubernetes. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #41019 **Special notes for your reviewer**: This requires PR #41251 as a dependency before merging. **Release note**: ```release-note Juju - K8s master charm now properly keeps distributed master files in sync for an HA control plane. ```
This commit is contained in:
commit
3701e54eb1
@ -2,6 +2,7 @@ repo: https://github.com/kubernetes/kubernetes.git
|
||||
includes:
|
||||
- 'layer:basic'
|
||||
- 'layer:tls-client'
|
||||
- 'layer:leadership'
|
||||
- 'layer:debug'
|
||||
- 'interface:etcd'
|
||||
- 'interface:http'
|
||||
|
@ -21,6 +21,8 @@ import socket
|
||||
import string
|
||||
import json
|
||||
|
||||
import charms.leadership
|
||||
|
||||
from shlex import split
|
||||
from subprocess import call
|
||||
from subprocess import check_call
|
||||
@ -140,34 +142,92 @@ def configure_cni(cni):
|
||||
cni.set_config(is_master=True, kubeconfig_path='')
|
||||
|
||||
|
||||
@when('leadership.is_leader')
|
||||
@when('kubernetes-master.components.installed')
|
||||
@when_not('authentication.setup')
|
||||
def setup_authentication():
|
||||
def setup_leader_authentication():
|
||||
'''Setup basic authentication and token access for the cluster.'''
|
||||
api_opts = FlagManager('kube-apiserver')
|
||||
controller_opts = FlagManager('kube-controller-manager')
|
||||
|
||||
api_opts.add('--basic-auth-file', '/srv/kubernetes/basic_auth.csv')
|
||||
api_opts.add('--token-auth-file', '/srv/kubernetes/known_tokens.csv')
|
||||
service_key = '/etc/kubernetes/serviceaccount.key'
|
||||
basic_auth = '/srv/kubernetes/basic_auth.csv'
|
||||
known_tokens = '/srv/kubernetes/known_tokens.csv'
|
||||
|
||||
api_opts.add('--basic-auth-file', basic_auth)
|
||||
api_opts.add('--token-auth-file', known_tokens)
|
||||
api_opts.add('--service-cluster-ip-range', service_cidr())
|
||||
hookenv.status_set('maintenance', 'Rendering authentication templates.')
|
||||
htaccess = '/srv/kubernetes/basic_auth.csv'
|
||||
if not os.path.isfile(htaccess):
|
||||
if not os.path.isfile(basic_auth):
|
||||
setup_basic_auth('admin', 'admin', 'admin')
|
||||
known_tokens = '/srv/kubernetes/known_tokens.csv'
|
||||
if not os.path.isfile(known_tokens):
|
||||
setup_tokens(None, 'admin', 'admin')
|
||||
setup_tokens(None, 'kubelet', 'kubelet')
|
||||
setup_tokens(None, 'kube_proxy', 'kube_proxy')
|
||||
# Generate the default service account token key
|
||||
os.makedirs('/etc/kubernetes', exist_ok=True)
|
||||
cmd = ['openssl', 'genrsa', '-out', '/etc/kubernetes/serviceaccount.key',
|
||||
|
||||
cmd = ['openssl', 'genrsa', '-out', service_key,
|
||||
'2048']
|
||||
check_call(cmd)
|
||||
api_opts.add('--service-account-key-file',
|
||||
'/etc/kubernetes/serviceaccount.key')
|
||||
controller_opts.add('--service-account-private-key-file',
|
||||
'/etc/kubernetes/serviceaccount.key')
|
||||
api_opts.add('--service-account-key-file', service_key)
|
||||
controller_opts.add('--service-account-private-key-file', service_key)
|
||||
|
||||
# read service account key for syndication
|
||||
leader_data = {}
|
||||
for f in [known_tokens, basic_auth, service_key]:
|
||||
with open(f, 'r') as fp:
|
||||
leader_data[f] = fp.read()
|
||||
|
||||
# this is slightly opaque, but we are sending file contents under its file
|
||||
# path as a key.
|
||||
# eg:
|
||||
# {'/etc/kubernetes/serviceaccount.key': 'RSA:2471731...'}
|
||||
charms.leadership.leader_set(leader_data)
|
||||
|
||||
set_state('authentication.setup')
|
||||
|
||||
|
||||
@when_not('leadership.is_leader')
|
||||
@when('kubernetes-master.components.installed')
|
||||
@when_not('authentication.setup')
|
||||
def setup_non_leader_authentication():
|
||||
api_opts = FlagManager('kube-apiserver')
|
||||
controller_opts = FlagManager('kube-controller-manager')
|
||||
|
||||
service_key = '/etc/kubernetes/serviceaccount.key'
|
||||
basic_auth = '/srv/kubernetes/basic_auth.csv'
|
||||
known_tokens = '/srv/kubernetes/known_tokens.csv'
|
||||
|
||||
# This races with other codepaths, and seems to require being created first
|
||||
# This block may be extracted later, but for now seems to work as intended
|
||||
os.makedirs('/etc/kubernetes', exist_ok=True)
|
||||
os.makedirs('/srv/kubernetes', exist_ok=True)
|
||||
|
||||
hookenv.status_set('maintenance', 'Rendering authentication templates.')
|
||||
|
||||
# Set an array for looping logic
|
||||
keys = [service_key, basic_auth, known_tokens]
|
||||
for k in keys:
|
||||
# If the path does not exist, assume we need it
|
||||
if not os.path.exists(k):
|
||||
# Fetch data from leadership broadcast
|
||||
contents = charms.leadership.leader_get(k)
|
||||
# Default to logging the warning and wait for leader data to be set
|
||||
if contents is None:
|
||||
msg = "Waiting on leaders crypto keys."
|
||||
hookenv.status_set('waiting', msg)
|
||||
hookenv.log('Missing content for file {}'.format(k))
|
||||
return
|
||||
# Write out the file and move on to the next item
|
||||
with open(k, 'w+') as fp:
|
||||
fp.write(contents)
|
||||
|
||||
api_opts.add('--basic-auth-file', basic_auth)
|
||||
api_opts.add('--token-auth-file', known_tokens)
|
||||
api_opts.add('--service-cluster-ip-range', service_cidr())
|
||||
api_opts.add('--service-account-key-file', service_key)
|
||||
controller_opts.add('--service-account-private-key-file', service_key)
|
||||
|
||||
set_state('authentication.setup')
|
||||
|
||||
@ -185,9 +245,7 @@ def idle_status():
|
||||
if not all_kube_system_pods_running():
|
||||
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
|
||||
elif hookenv.config('service-cidr') != service_cidr():
|
||||
msg = "Cannot change {0} to {1}".format(service_cidr(),
|
||||
hookenv.config('service-cidr'))
|
||||
hookenv.log(msg, level=hookenv.WARN)
|
||||
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
|
||||
hookenv.status_set('active', msg)
|
||||
else:
|
||||
hookenv.status_set('active', 'Kubernetes master running.')
|
||||
@ -305,7 +363,7 @@ def start_kube_dns():
|
||||
|
||||
context = {
|
||||
'arch': arch(),
|
||||
# The dictionary named pillar is a construct of the k8s template files.
|
||||
# The dictionary named 'pillar' is a construct of the k8s template file
|
||||
'pillar': {
|
||||
'dns_server': get_dns_ip(),
|
||||
'dns_replicas': 1,
|
||||
|
@ -45,31 +45,46 @@ def clean_addon_dir(addon_dir):
|
||||
os.makedirs(addon_dir)
|
||||
|
||||
|
||||
def run_with_logging(command):
|
||||
""" Run a command with controlled logging """
|
||||
log.debug("Running: %s" % command)
|
||||
process = subprocess.Popen(command, stderr=subprocess.PIPE)
|
||||
stderr = process.communicate()[1].rstrip()
|
||||
process.wait()
|
||||
if process.returncode != 0:
|
||||
log.error(stderr)
|
||||
raise Exception("%s: exit code %d" % (command, process.returncode))
|
||||
log.debug(stderr)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def kubernetes_repo():
|
||||
""" Shallow clone kubernetes repo and clean up when we are done """
|
||||
repo = "https://github.com/kubernetes/kubernetes.git"
|
||||
path = tempfile.mkdtemp(prefix="kubernetes")
|
||||
try:
|
||||
log.info("Cloning " + repo)
|
||||
cmd = ["git", "clone", "--depth", "1", repo, path]
|
||||
process = subprocess.Popen(cmd, stderr=subprocess.PIPE)
|
||||
stderr = process.communicate()[1].rstrip()
|
||||
process.wait()
|
||||
if process.returncode != 0:
|
||||
log.error(stderr)
|
||||
raise Exception("clone failed: exit code %d" % process.returncode)
|
||||
log.debug(stderr)
|
||||
yield path
|
||||
finally:
|
||||
shutil.rmtree(path)
|
||||
""" Yield a kubernetes repo to copy addons from.
|
||||
|
||||
If KUBE_VERSION is set, this will clone the local repo and checkout the
|
||||
corresponding branch. Otherwise, the local branch will be used. """
|
||||
repo = os.path.abspath("../../../..")
|
||||
if "KUBE_VERSION" in os.environ:
|
||||
branch = os.environ["KUBE_VERSION"]
|
||||
log.info("Cloning %s with branch %s" % (repo, branch))
|
||||
path = tempfile.mkdtemp(prefix="kubernetes")
|
||||
try:
|
||||
cmd = ["git", "clone", repo, path, "-b", branch]
|
||||
run_with_logging(cmd)
|
||||
yield path
|
||||
finally:
|
||||
shutil.rmtree(path)
|
||||
else:
|
||||
log.info("Using local repo " + repo)
|
||||
yield repo
|
||||
|
||||
|
||||
def add_addon(source, dest):
|
||||
""" Add an addon manifest from the given source.
|
||||
def add_addon(repo, source, dest):
|
||||
""" Add an addon manifest from the given repo and source.
|
||||
|
||||
Any occurrences of 'amd64' are replaced with '{{ arch }}' so the charm can
|
||||
fill it in during deployment. """
|
||||
source = os.path.join(repo, "cluster/addons", source)
|
||||
if os.path.isdir(dest):
|
||||
dest = os.path.join(dest, os.path.basename(source))
|
||||
log.debug("Copying: %s -> %s" % (source, dest))
|
||||
@ -86,20 +101,26 @@ def update_addons(dest):
|
||||
with kubernetes_repo() as repo:
|
||||
log.info("Copying addons to charm")
|
||||
clean_addon_dir(dest)
|
||||
add_addon(repo + "/cluster/addons/dashboard/dashboard-controller.yaml",
|
||||
dest)
|
||||
add_addon(repo + "/cluster/addons/dashboard/dashboard-service.yaml",
|
||||
dest)
|
||||
add_addon(repo + "/cluster/addons/dns/kubedns-controller.yaml.in",
|
||||
dest + "/kubedns-controller.yaml")
|
||||
add_addon(repo + "/cluster/addons/dns/kubedns-svc.yaml.in",
|
||||
dest + "/kubedns-svc.yaml")
|
||||
influxdb = "/cluster/addons/cluster-monitoring/influxdb"
|
||||
add_addon(repo + influxdb + "/grafana-service.yaml", dest)
|
||||
add_addon(repo + influxdb + "/heapster-controller.yaml", dest)
|
||||
add_addon(repo + influxdb + "/heapster-service.yaml", dest)
|
||||
add_addon(repo + influxdb + "/influxdb-grafana-controller.yaml", dest)
|
||||
add_addon(repo + influxdb + "/influxdb-service.yaml", dest)
|
||||
add_addon(repo, "dashboard/dashboard-controller.yaml", dest)
|
||||
add_addon(repo, "dashboard/dashboard-service.yaml", dest)
|
||||
try:
|
||||
add_addon(repo, "dns/kubedns-controller.yaml.in",
|
||||
dest + "/kubedns-controller.yaml")
|
||||
add_addon(repo, "dns/kubedns-svc.yaml.in",
|
||||
dest + "/kubedns-svc.yaml")
|
||||
except IOError as e:
|
||||
# fall back to the older filenames
|
||||
log.debug(e)
|
||||
add_addon(repo, "dns/skydns-rc.yaml.in",
|
||||
dest + "/kubedns-controller.yaml")
|
||||
add_addon(repo, "dns/skydns-svc.yaml.in",
|
||||
dest + "/kubedns-svc.yaml")
|
||||
influxdb = "cluster-monitoring/influxdb"
|
||||
add_addon(repo, influxdb + "/grafana-service.yaml", dest)
|
||||
add_addon(repo, influxdb + "/heapster-controller.yaml", dest)
|
||||
add_addon(repo, influxdb + "/heapster-service.yaml", dest)
|
||||
add_addon(repo, influxdb + "/influxdb-grafana-controller.yaml", dest)
|
||||
add_addon(repo, influxdb + "/influxdb-service.yaml", dest)
|
||||
|
||||
# Entry points
|
||||
|
||||
@ -151,8 +172,8 @@ def parse_args():
|
||||
def main():
|
||||
""" Update addons into the layer's templates/addons folder """
|
||||
parse_args()
|
||||
dest = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
"../templates/addons"))
|
||||
os.chdir(os.path.join(os.path.dirname(__file__), ".."))
|
||||
dest = "templates/addons"
|
||||
update_addons(dest)
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user