mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Merge pull request #41351 from chuckbutler/multi-master-patch
Automatic merge from submit-queue (batch tested with PRs 40665, 41094, 41351, 41721, 41843) Multi master patch **What this PR does / why we need it**: Corrects a sync files issue present when running in a HA Master configuration. This PR adds logic to syncronize on first deployment for `/etc/kubernetes/serviceaccount.key` which will cause cypto verification failure if not 1:1 on each master unit. Additionally syncs basic_auth and additional files in /srv/kubernetes. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #41019 **Special notes for your reviewer**: This requires PR #41251 as a dependency before merging. **Release note**: ```release-note Juju - K8s master charm now properly keeps distributed master files in sync for an HA control plane. ```
This commit is contained in:
commit
3701e54eb1
@ -2,6 +2,7 @@ repo: https://github.com/kubernetes/kubernetes.git
|
|||||||
includes:
|
includes:
|
||||||
- 'layer:basic'
|
- 'layer:basic'
|
||||||
- 'layer:tls-client'
|
- 'layer:tls-client'
|
||||||
|
- 'layer:leadership'
|
||||||
- 'layer:debug'
|
- 'layer:debug'
|
||||||
- 'interface:etcd'
|
- 'interface:etcd'
|
||||||
- 'interface:http'
|
- 'interface:http'
|
||||||
|
@ -21,6 +21,8 @@ import socket
|
|||||||
import string
|
import string
|
||||||
import json
|
import json
|
||||||
|
|
||||||
|
import charms.leadership
|
||||||
|
|
||||||
from shlex import split
|
from shlex import split
|
||||||
from subprocess import call
|
from subprocess import call
|
||||||
from subprocess import check_call
|
from subprocess import check_call
|
||||||
@ -140,34 +142,92 @@ def configure_cni(cni):
|
|||||||
cni.set_config(is_master=True, kubeconfig_path='')
|
cni.set_config(is_master=True, kubeconfig_path='')
|
||||||
|
|
||||||
|
|
||||||
|
@when('leadership.is_leader')
|
||||||
@when('kubernetes-master.components.installed')
|
@when('kubernetes-master.components.installed')
|
||||||
@when_not('authentication.setup')
|
@when_not('authentication.setup')
|
||||||
def setup_authentication():
|
def setup_leader_authentication():
|
||||||
'''Setup basic authentication and token access for the cluster.'''
|
'''Setup basic authentication and token access for the cluster.'''
|
||||||
api_opts = FlagManager('kube-apiserver')
|
api_opts = FlagManager('kube-apiserver')
|
||||||
controller_opts = FlagManager('kube-controller-manager')
|
controller_opts = FlagManager('kube-controller-manager')
|
||||||
|
|
||||||
api_opts.add('--basic-auth-file', '/srv/kubernetes/basic_auth.csv')
|
service_key = '/etc/kubernetes/serviceaccount.key'
|
||||||
api_opts.add('--token-auth-file', '/srv/kubernetes/known_tokens.csv')
|
basic_auth = '/srv/kubernetes/basic_auth.csv'
|
||||||
|
known_tokens = '/srv/kubernetes/known_tokens.csv'
|
||||||
|
|
||||||
|
api_opts.add('--basic-auth-file', basic_auth)
|
||||||
|
api_opts.add('--token-auth-file', known_tokens)
|
||||||
api_opts.add('--service-cluster-ip-range', service_cidr())
|
api_opts.add('--service-cluster-ip-range', service_cidr())
|
||||||
hookenv.status_set('maintenance', 'Rendering authentication templates.')
|
hookenv.status_set('maintenance', 'Rendering authentication templates.')
|
||||||
htaccess = '/srv/kubernetes/basic_auth.csv'
|
if not os.path.isfile(basic_auth):
|
||||||
if not os.path.isfile(htaccess):
|
|
||||||
setup_basic_auth('admin', 'admin', 'admin')
|
setup_basic_auth('admin', 'admin', 'admin')
|
||||||
known_tokens = '/srv/kubernetes/known_tokens.csv'
|
|
||||||
if not os.path.isfile(known_tokens):
|
if not os.path.isfile(known_tokens):
|
||||||
setup_tokens(None, 'admin', 'admin')
|
setup_tokens(None, 'admin', 'admin')
|
||||||
setup_tokens(None, 'kubelet', 'kubelet')
|
setup_tokens(None, 'kubelet', 'kubelet')
|
||||||
setup_tokens(None, 'kube_proxy', 'kube_proxy')
|
setup_tokens(None, 'kube_proxy', 'kube_proxy')
|
||||||
# Generate the default service account token key
|
# Generate the default service account token key
|
||||||
os.makedirs('/etc/kubernetes', exist_ok=True)
|
os.makedirs('/etc/kubernetes', exist_ok=True)
|
||||||
cmd = ['openssl', 'genrsa', '-out', '/etc/kubernetes/serviceaccount.key',
|
|
||||||
|
cmd = ['openssl', 'genrsa', '-out', service_key,
|
||||||
'2048']
|
'2048']
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
api_opts.add('--service-account-key-file',
|
api_opts.add('--service-account-key-file', service_key)
|
||||||
'/etc/kubernetes/serviceaccount.key')
|
controller_opts.add('--service-account-private-key-file', service_key)
|
||||||
controller_opts.add('--service-account-private-key-file',
|
|
||||||
'/etc/kubernetes/serviceaccount.key')
|
# read service account key for syndication
|
||||||
|
leader_data = {}
|
||||||
|
for f in [known_tokens, basic_auth, service_key]:
|
||||||
|
with open(f, 'r') as fp:
|
||||||
|
leader_data[f] = fp.read()
|
||||||
|
|
||||||
|
# this is slightly opaque, but we are sending file contents under its file
|
||||||
|
# path as a key.
|
||||||
|
# eg:
|
||||||
|
# {'/etc/kubernetes/serviceaccount.key': 'RSA:2471731...'}
|
||||||
|
charms.leadership.leader_set(leader_data)
|
||||||
|
|
||||||
|
set_state('authentication.setup')
|
||||||
|
|
||||||
|
|
||||||
|
@when_not('leadership.is_leader')
|
||||||
|
@when('kubernetes-master.components.installed')
|
||||||
|
@when_not('authentication.setup')
|
||||||
|
def setup_non_leader_authentication():
|
||||||
|
api_opts = FlagManager('kube-apiserver')
|
||||||
|
controller_opts = FlagManager('kube-controller-manager')
|
||||||
|
|
||||||
|
service_key = '/etc/kubernetes/serviceaccount.key'
|
||||||
|
basic_auth = '/srv/kubernetes/basic_auth.csv'
|
||||||
|
known_tokens = '/srv/kubernetes/known_tokens.csv'
|
||||||
|
|
||||||
|
# This races with other codepaths, and seems to require being created first
|
||||||
|
# This block may be extracted later, but for now seems to work as intended
|
||||||
|
os.makedirs('/etc/kubernetes', exist_ok=True)
|
||||||
|
os.makedirs('/srv/kubernetes', exist_ok=True)
|
||||||
|
|
||||||
|
hookenv.status_set('maintenance', 'Rendering authentication templates.')
|
||||||
|
|
||||||
|
# Set an array for looping logic
|
||||||
|
keys = [service_key, basic_auth, known_tokens]
|
||||||
|
for k in keys:
|
||||||
|
# If the path does not exist, assume we need it
|
||||||
|
if not os.path.exists(k):
|
||||||
|
# Fetch data from leadership broadcast
|
||||||
|
contents = charms.leadership.leader_get(k)
|
||||||
|
# Default to logging the warning and wait for leader data to be set
|
||||||
|
if contents is None:
|
||||||
|
msg = "Waiting on leaders crypto keys."
|
||||||
|
hookenv.status_set('waiting', msg)
|
||||||
|
hookenv.log('Missing content for file {}'.format(k))
|
||||||
|
return
|
||||||
|
# Write out the file and move on to the next item
|
||||||
|
with open(k, 'w+') as fp:
|
||||||
|
fp.write(contents)
|
||||||
|
|
||||||
|
api_opts.add('--basic-auth-file', basic_auth)
|
||||||
|
api_opts.add('--token-auth-file', known_tokens)
|
||||||
|
api_opts.add('--service-cluster-ip-range', service_cidr())
|
||||||
|
api_opts.add('--service-account-key-file', service_key)
|
||||||
|
controller_opts.add('--service-account-private-key-file', service_key)
|
||||||
|
|
||||||
set_state('authentication.setup')
|
set_state('authentication.setup')
|
||||||
|
|
||||||
@ -185,9 +245,7 @@ def idle_status():
|
|||||||
if not all_kube_system_pods_running():
|
if not all_kube_system_pods_running():
|
||||||
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
|
hookenv.status_set('waiting', 'Waiting for kube-system pods to start')
|
||||||
elif hookenv.config('service-cidr') != service_cidr():
|
elif hookenv.config('service-cidr') != service_cidr():
|
||||||
msg = "Cannot change {0} to {1}".format(service_cidr(),
|
msg = 'WARN: cannot change service-cidr, still using ' + service_cidr()
|
||||||
hookenv.config('service-cidr'))
|
|
||||||
hookenv.log(msg, level=hookenv.WARN)
|
|
||||||
hookenv.status_set('active', msg)
|
hookenv.status_set('active', msg)
|
||||||
else:
|
else:
|
||||||
hookenv.status_set('active', 'Kubernetes master running.')
|
hookenv.status_set('active', 'Kubernetes master running.')
|
||||||
@ -305,7 +363,7 @@ def start_kube_dns():
|
|||||||
|
|
||||||
context = {
|
context = {
|
||||||
'arch': arch(),
|
'arch': arch(),
|
||||||
# The dictionary named pillar is a construct of the k8s template files.
|
# The dictionary named 'pillar' is a construct of the k8s template file
|
||||||
'pillar': {
|
'pillar': {
|
||||||
'dns_server': get_dns_ip(),
|
'dns_server': get_dns_ip(),
|
||||||
'dns_replicas': 1,
|
'dns_replicas': 1,
|
||||||
|
@ -45,31 +45,46 @@ def clean_addon_dir(addon_dir):
|
|||||||
os.makedirs(addon_dir)
|
os.makedirs(addon_dir)
|
||||||
|
|
||||||
|
|
||||||
|
def run_with_logging(command):
|
||||||
|
""" Run a command with controlled logging """
|
||||||
|
log.debug("Running: %s" % command)
|
||||||
|
process = subprocess.Popen(command, stderr=subprocess.PIPE)
|
||||||
|
stderr = process.communicate()[1].rstrip()
|
||||||
|
process.wait()
|
||||||
|
if process.returncode != 0:
|
||||||
|
log.error(stderr)
|
||||||
|
raise Exception("%s: exit code %d" % (command, process.returncode))
|
||||||
|
log.debug(stderr)
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def kubernetes_repo():
|
def kubernetes_repo():
|
||||||
""" Shallow clone kubernetes repo and clean up when we are done """
|
""" Yield a kubernetes repo to copy addons from.
|
||||||
repo = "https://github.com/kubernetes/kubernetes.git"
|
|
||||||
path = tempfile.mkdtemp(prefix="kubernetes")
|
If KUBE_VERSION is set, this will clone the local repo and checkout the
|
||||||
try:
|
corresponding branch. Otherwise, the local branch will be used. """
|
||||||
log.info("Cloning " + repo)
|
repo = os.path.abspath("../../../..")
|
||||||
cmd = ["git", "clone", "--depth", "1", repo, path]
|
if "KUBE_VERSION" in os.environ:
|
||||||
process = subprocess.Popen(cmd, stderr=subprocess.PIPE)
|
branch = os.environ["KUBE_VERSION"]
|
||||||
stderr = process.communicate()[1].rstrip()
|
log.info("Cloning %s with branch %s" % (repo, branch))
|
||||||
process.wait()
|
path = tempfile.mkdtemp(prefix="kubernetes")
|
||||||
if process.returncode != 0:
|
try:
|
||||||
log.error(stderr)
|
cmd = ["git", "clone", repo, path, "-b", branch]
|
||||||
raise Exception("clone failed: exit code %d" % process.returncode)
|
run_with_logging(cmd)
|
||||||
log.debug(stderr)
|
yield path
|
||||||
yield path
|
finally:
|
||||||
finally:
|
shutil.rmtree(path)
|
||||||
shutil.rmtree(path)
|
else:
|
||||||
|
log.info("Using local repo " + repo)
|
||||||
|
yield repo
|
||||||
|
|
||||||
|
|
||||||
def add_addon(source, dest):
|
def add_addon(repo, source, dest):
|
||||||
""" Add an addon manifest from the given source.
|
""" Add an addon manifest from the given repo and source.
|
||||||
|
|
||||||
Any occurrences of 'amd64' are replaced with '{{ arch }}' so the charm can
|
Any occurrences of 'amd64' are replaced with '{{ arch }}' so the charm can
|
||||||
fill it in during deployment. """
|
fill it in during deployment. """
|
||||||
|
source = os.path.join(repo, "cluster/addons", source)
|
||||||
if os.path.isdir(dest):
|
if os.path.isdir(dest):
|
||||||
dest = os.path.join(dest, os.path.basename(source))
|
dest = os.path.join(dest, os.path.basename(source))
|
||||||
log.debug("Copying: %s -> %s" % (source, dest))
|
log.debug("Copying: %s -> %s" % (source, dest))
|
||||||
@ -86,20 +101,26 @@ def update_addons(dest):
|
|||||||
with kubernetes_repo() as repo:
|
with kubernetes_repo() as repo:
|
||||||
log.info("Copying addons to charm")
|
log.info("Copying addons to charm")
|
||||||
clean_addon_dir(dest)
|
clean_addon_dir(dest)
|
||||||
add_addon(repo + "/cluster/addons/dashboard/dashboard-controller.yaml",
|
add_addon(repo, "dashboard/dashboard-controller.yaml", dest)
|
||||||
dest)
|
add_addon(repo, "dashboard/dashboard-service.yaml", dest)
|
||||||
add_addon(repo + "/cluster/addons/dashboard/dashboard-service.yaml",
|
try:
|
||||||
dest)
|
add_addon(repo, "dns/kubedns-controller.yaml.in",
|
||||||
add_addon(repo + "/cluster/addons/dns/kubedns-controller.yaml.in",
|
dest + "/kubedns-controller.yaml")
|
||||||
dest + "/kubedns-controller.yaml")
|
add_addon(repo, "dns/kubedns-svc.yaml.in",
|
||||||
add_addon(repo + "/cluster/addons/dns/kubedns-svc.yaml.in",
|
dest + "/kubedns-svc.yaml")
|
||||||
dest + "/kubedns-svc.yaml")
|
except IOError as e:
|
||||||
influxdb = "/cluster/addons/cluster-monitoring/influxdb"
|
# fall back to the older filenames
|
||||||
add_addon(repo + influxdb + "/grafana-service.yaml", dest)
|
log.debug(e)
|
||||||
add_addon(repo + influxdb + "/heapster-controller.yaml", dest)
|
add_addon(repo, "dns/skydns-rc.yaml.in",
|
||||||
add_addon(repo + influxdb + "/heapster-service.yaml", dest)
|
dest + "/kubedns-controller.yaml")
|
||||||
add_addon(repo + influxdb + "/influxdb-grafana-controller.yaml", dest)
|
add_addon(repo, "dns/skydns-svc.yaml.in",
|
||||||
add_addon(repo + influxdb + "/influxdb-service.yaml", dest)
|
dest + "/kubedns-svc.yaml")
|
||||||
|
influxdb = "cluster-monitoring/influxdb"
|
||||||
|
add_addon(repo, influxdb + "/grafana-service.yaml", dest)
|
||||||
|
add_addon(repo, influxdb + "/heapster-controller.yaml", dest)
|
||||||
|
add_addon(repo, influxdb + "/heapster-service.yaml", dest)
|
||||||
|
add_addon(repo, influxdb + "/influxdb-grafana-controller.yaml", dest)
|
||||||
|
add_addon(repo, influxdb + "/influxdb-service.yaml", dest)
|
||||||
|
|
||||||
# Entry points
|
# Entry points
|
||||||
|
|
||||||
@ -151,8 +172,8 @@ def parse_args():
|
|||||||
def main():
|
def main():
|
||||||
""" Update addons into the layer's templates/addons folder """
|
""" Update addons into the layer's templates/addons folder """
|
||||||
parse_args()
|
parse_args()
|
||||||
dest = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
os.chdir(os.path.join(os.path.dirname(__file__), ".."))
|
||||||
"../templates/addons"))
|
dest = "templates/addons"
|
||||||
update_addons(dest)
|
update_addons(dest)
|
||||||
|
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user