mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 23:37:01 +00:00
Merge pull request #2387 from erictune/secure_port_on
Kubelet talks securely to apiserver
This commit is contained in:
commit
d1768cc8da
@ -28,3 +28,17 @@ EOF
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/nginx
|
||||
echo $MASTER_HTPASSWD > /srv/salt-overlay/salt/nginx/htpasswd
|
||||
|
||||
# Generate and distribute a shared secret (bearer token) to
|
||||
# apiserver and kubelet so that kubelet can authenticate to
|
||||
# apiserver to send events.
|
||||
# This works on CoreOS, so it should work on a lot of distros.
|
||||
kubelet_token=$(cat /dev/urandom | base64 | tr -d "=+/" | dd bs=32 count=1 2> /dev/null)
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/kube-apiserver
|
||||
known_tokens_file="/srv/salt-overlay/salt/kube-apiserver/known_tokens.csv"
|
||||
(umask u=rw,go= ; echo "$kubelet_token,kubelet,kubelet" > $known_tokens_file)
|
||||
|
||||
mkdir -p /srv/salt-overlay/salt/kubelet
|
||||
kubelet_auth_file="/srv/salt-overlay/salt/kubelet/kubernetes_auth"
|
||||
(umask u=rw,go= ; echo "{\"BearerToken\": \"$kubelet_token\", \"Insecure\": true }" > $kubelet_auth_file)
|
||||
|
@ -30,4 +30,14 @@
|
||||
{% set cert_file = "-tls_cert_file=/srv/kubernetes/server.cert" %}
|
||||
{% set key_file = "-tls_private_key_file=/srv/kubernetes/server.key" %}
|
||||
|
||||
DAEMON_ARGS="{{daemon_args}} {{address}} {{etcd_servers}} {{ cloud_provider }} --allow_privileged={{pillar['allow_privileged']}} {{portal_net}} {{cert_file}} {{key_file}}"
|
||||
{% set secure_port = "-secure_port=6443" %}
|
||||
{% set token_auth_file = "-token_auth_file=/dev/null" %}
|
||||
|
||||
{% if grains.cloud is defined %}
|
||||
{% if grains.cloud == 'gce' %}
|
||||
# TODO: generate and distribute tokens for other cloud providers.
|
||||
{% set token_auth_file = "-token_auth_file=/srv/kubernetes/known_tokens.csv" %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
DAEMON_ARGS="{{daemon_args}} {{address}} {{etcd_servers}} {{ cloud_provider }} --allow_privileged={{pillar['allow_privileged']}} {{portal_net}} {{cert_file}} {{key_file}} {{secure_port}} {{token_auth_file}}"
|
||||
|
@ -38,6 +38,19 @@
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if grains.cloud is defined %}
|
||||
{% if grains.cloud == 'gce' %}
|
||||
# TODO: generate and distribute tokens on other cloud providers.
|
||||
/srv/kubernetes/known_tokens.csv:
|
||||
file.managed:
|
||||
- source: salt://kube-apiserver/known_tokens.csv
|
||||
- user: kube-apiserver
|
||||
- group: kube-apiserver
|
||||
- mode: 400
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
kube-apiserver:
|
||||
group.present:
|
||||
- system: True
|
||||
|
@ -9,6 +9,13 @@
|
||||
{% set etcd_servers = "-etcd_servers=http://" + ips[0][0] + ":4001" %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.apiservers is defined %}
|
||||
{% set apiservers = "-api_servers=https://" + grains.apiservers + ":6443" %}
|
||||
{% else %}
|
||||
{% set ips = salt['mine.get']('roles:kubernetes-master', 'network.ip_addrs', 'grain').values() %}
|
||||
{% set apiservers = "-api_servers=https://" + ips[0][0] + ":6443" %}
|
||||
{% endif %}
|
||||
|
||||
{% set address = "-address=0.0.0.0" %}
|
||||
{% set config = "-config=/etc/kubernetes/manifests" %}
|
||||
{% set hostname_override = "" %}
|
||||
@ -16,5 +23,6 @@
|
||||
{% set hostname_override = " -hostname_override=" + grains.minion_ip %}
|
||||
{% endif %}
|
||||
|
||||
{% set auth_path = "-auth_path=/var/lib/kubelet/kubernetes_auth" %}
|
||||
|
||||
DAEMON_ARGS="{{daemon_args}} {{etcd_servers}} {{hostname_override}} {{address}} {{config}} --allow_privileged={{pillar['allow_privileged']}}"
|
||||
DAEMON_ARGS="{{daemon_args}} {{etcd_servers}} {{apiservers}} {{auth_path}} {{hostname_override}} {{address}} {{config}} --allow_privileged={{pillar['allow_privileged']}}"
|
||||
|
@ -38,6 +38,14 @@
|
||||
|
||||
{% endif %}
|
||||
|
||||
# Kubelet will run without this file but will not be able to send events to the apiserver.
|
||||
/var/lib/kubelet/kubernetes_auth:
|
||||
file.managed:
|
||||
- source: salt://kubelet/kubernetes_auth
|
||||
- user: root
|
||||
- group: root
|
||||
- mode: 400
|
||||
|
||||
kubelet:
|
||||
group.present:
|
||||
- system: True
|
||||
@ -57,4 +65,5 @@ kubelet:
|
||||
{% if grains['os_family'] != 'RedHat' %}
|
||||
- file: /etc/init.d/kubelet
|
||||
{% endif %}
|
||||
- file: /var/lib/kubelet/kubernetes_auth
|
||||
|
||||
|
@ -169,6 +169,86 @@ func TestPodUpdate(c *client.Client) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// TestKubeletSendsEvent checks that kubelets and scheduler send events about pods scheduling and running.
|
||||
func TestKubeletSendsEvent(c *client.Client) bool {
|
||||
provider := os.Getenv("KUBERNETES_PROVIDER")
|
||||
if provider == "" {
|
||||
glog.Errorf("unable to detect cloud type.")
|
||||
return false
|
||||
}
|
||||
if provider != "gce" {
|
||||
glog.Infof("skipping TestKubeletSendsEvent on cloud provider %s", provider)
|
||||
return true
|
||||
}
|
||||
|
||||
podClient := c.Pods(api.NamespaceDefault)
|
||||
|
||||
pod := loadPodOrDie("./api/examples/pod.json")
|
||||
value := strconv.Itoa(time.Now().Nanosecond())
|
||||
pod.Labels["time"] = value
|
||||
|
||||
_, err := podClient.Create(pod)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create pod: %v", err)
|
||||
return false
|
||||
}
|
||||
defer podClient.Delete(pod.Name)
|
||||
waitForPodRunning(c, pod.Name)
|
||||
pods, err := podClient.List(labels.SelectorFromSet(labels.Set(map[string]string{"time": value})))
|
||||
if len(pods.Items) != 1 {
|
||||
glog.Errorf("Failed to find the correct pod")
|
||||
return false
|
||||
}
|
||||
|
||||
_, err = podClient.Get(pod.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get pod: %v", err)
|
||||
return false
|
||||
}
|
||||
|
||||
// Check for scheduler event about the pod.
|
||||
events, err := c.Events(api.NamespaceDefault).List(
|
||||
labels.Everything(),
|
||||
labels.Set{
|
||||
"involvedObject.name": pod.Name,
|
||||
"involvedObject.kind": "Pod",
|
||||
"involvedObject.namespace": api.NamespaceDefault,
|
||||
"source": "scheduler",
|
||||
"time": value,
|
||||
}.AsSelector(),
|
||||
)
|
||||
if err != nil {
|
||||
glog.Error("Error while listing events:", err)
|
||||
return false
|
||||
}
|
||||
if len(events.Items) == 0 {
|
||||
glog.Error("Didn't see any scheduler events even though pod was running.")
|
||||
return false
|
||||
}
|
||||
glog.Info("Saw scheduler event for our pod.")
|
||||
|
||||
// Check for kubelet event about the pod.
|
||||
events, err = c.Events(api.NamespaceDefault).List(
|
||||
labels.Everything(),
|
||||
labels.Set{
|
||||
"involvedObject.name": pod.Name,
|
||||
"involvedObject.kind": "BoundPod",
|
||||
"involvedObject.namespace": api.NamespaceDefault,
|
||||
"source": "kubelet",
|
||||
}.AsSelector(),
|
||||
)
|
||||
if err != nil {
|
||||
glog.Error("Error while listing events:", err)
|
||||
return false
|
||||
}
|
||||
if len(events.Items) == 0 {
|
||||
glog.Error("Didn't see any kubelet events even though pod was running.")
|
||||
return false
|
||||
}
|
||||
glog.Info("Saw kubelet event for our pod.")
|
||||
return true
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
runtime.GOMAXPROCS(runtime.NumCPU())
|
||||
@ -186,6 +266,7 @@ func main() {
|
||||
|
||||
tests := []func(c *client.Client) bool{
|
||||
TestKubernetesROService,
|
||||
TestKubeletSendsEvent,
|
||||
// TODO(brendandburns): fix this test and re-add it: TestPodUpdate,
|
||||
}
|
||||
|
||||
@ -195,6 +276,8 @@ func main() {
|
||||
if !testPassed {
|
||||
passed = false
|
||||
}
|
||||
// TODO: clean up objects created during a test after the test, so cases
|
||||
// are independent.
|
||||
}
|
||||
if !passed {
|
||||
glog.Fatalf("Tests failed")
|
||||
|
Loading…
Reference in New Issue
Block a user