mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #41285 from liggitt/kube-scheduler-role
Automatic merge from submit-queue (batch tested with PRs 40297, 41285, 41211, 41243, 39735) Secure kube-scheduler This PR: * Adds a bootstrap `system:kube-scheduler` clusterrole * Adds a bootstrap clusterrolebinding to the `system:kube-scheduler` user * Sets up a kubeconfig for kube-scheduler on GCE (following the controller-manager pattern) * Switches kube-scheduler to running with kubeconfig against secured port (salt changes, beware) * Removes superuser permissions from kube-scheduler in local-up-cluster.sh * Adds detailed RBAC deny logging ```release-note On kube-up.sh clusters on GCE, kube-scheduler now contacts the API on the secured port. ```
This commit is contained in:
commit
e4a4fe4a89
@ -163,6 +163,9 @@ function create-master-auth {
|
||||
if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]]; then
|
||||
replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager"
|
||||
fi
|
||||
if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then
|
||||
replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler"
|
||||
fi
|
||||
if [[ -n "${KUBELET_TOKEN:-}" ]]; then
|
||||
replace_prefixed_line "${known_tokens_csv}" "${KUBELET_TOKEN}," "system:node:node-name,uid:kubelet,system:nodes"
|
||||
fi
|
||||
@ -360,6 +363,30 @@ current-context: service-account-context
|
||||
EOF
|
||||
}
|
||||
|
||||
function create-kubescheduler-kubeconfig {
|
||||
echo "Creating kube-scheduler kubeconfig file"
|
||||
mkdir -p /etc/srv/kubernetes/kube-scheduler
|
||||
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/kubeconfig
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kube-scheduler
|
||||
user:
|
||||
token: ${KUBE_SCHEDULER_TOKEN}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://localhost:443
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-scheduler
|
||||
name: kube-scheduler
|
||||
current-context: kube-scheduler
|
||||
EOF
|
||||
}
|
||||
|
||||
function create-master-etcd-auth {
|
||||
if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
|
||||
local -r auth_dir="/etc/srv/kubernetes"
|
||||
@ -952,10 +979,12 @@ function start-kube-controller-manager {
|
||||
# DOCKER_REGISTRY
|
||||
function start-kube-scheduler {
|
||||
echo "Start kubernetes scheduler"
|
||||
create-kubescheduler-kubeconfig
|
||||
prepare-log-file /var/log/kube-scheduler.log
|
||||
|
||||
# Calculate variables and set them in the manifest.
|
||||
params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}"
|
||||
params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig"
|
||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||
params+=" --feature-gates=${FEATURE_GATES}"
|
||||
fi
|
||||
@ -968,6 +997,7 @@ function start-kube-scheduler {
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
|
||||
remove-salt-config-comments "${src_file}"
|
||||
|
||||
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
|
||||
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
|
||||
@ -1270,8 +1300,9 @@ if [[ -n "${KUBE_USER:-}" ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# generate the controller manager token here since its only used on the master.
|
||||
# generate the controller manager and scheduler tokens here since they are only used on the master.
|
||||
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
|
||||
# KUBERNETES_CONTAINER_RUNTIME is set by the `kube-env` file, but it's a bit of a mouthful
|
||||
if [[ "${CONTAINER_RUNTIME:-}" == "" ]]; then
|
||||
|
@ -233,6 +233,9 @@ function create-master-auth {
|
||||
if [[ -n "${KUBE_CONTROLLER_MANAGER_TOKEN:-}" ]]; then
|
||||
replace_prefixed_line "${known_tokens_csv}" "${KUBE_CONTROLLER_MANAGER_TOKEN}," "system:kube-controller-manager,uid:system:kube-controller-manager"
|
||||
fi
|
||||
if [[ -n "${KUBE_SCHEDULER_TOKEN:-}" ]]; then
|
||||
replace_prefixed_line "${known_tokens_csv}" "${KUBE_SCHEDULER_TOKEN}," "system:kube-scheduler,uid:system:kube-scheduler"
|
||||
fi
|
||||
if [[ -n "${KUBELET_TOKEN:-}" ]]; then
|
||||
replace_prefixed_line "${known_tokens_csv}" "${KUBELET_TOKEN}," "system:node:node-name,uid:kubelet,system:nodes"
|
||||
fi
|
||||
@ -431,6 +434,30 @@ current-context: service-account-context
|
||||
EOF
|
||||
}
|
||||
|
||||
function create-kubescheduler-kubeconfig {
|
||||
echo "Creating kube-scheduler kubeconfig file"
|
||||
mkdir -p /etc/srv/kubernetes/kube-scheduler
|
||||
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/kubeconfig
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kube-scheduler
|
||||
user:
|
||||
token: ${KUBE_SCHEDULER_TOKEN}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://localhost:443
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-scheduler
|
||||
name: kube-scheduler
|
||||
current-context: kube-scheduler
|
||||
EOF
|
||||
}
|
||||
|
||||
function create-master-etcd-auth {
|
||||
if [[ -n "${ETCD_CA_CERT:-}" && -n "${ETCD_PEER_KEY:-}" && -n "${ETCD_PEER_CERT:-}" ]]; then
|
||||
local -r auth_dir="/etc/srv/kubernetes"
|
||||
@ -1021,10 +1048,12 @@ function start-kube-controller-manager {
|
||||
# DOCKER_REGISTRY
|
||||
function start-kube-scheduler {
|
||||
echo "Start kubernetes scheduler"
|
||||
create-kubescheduler-kubeconfig
|
||||
prepare-log-file /var/log/kube-scheduler.log
|
||||
|
||||
# Calculate variables and set them in the manifest.
|
||||
params="${SCHEDULER_TEST_LOG_LEVEL:-"--v=2"} ${SCHEDULER_TEST_ARGS:-}"
|
||||
params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig"
|
||||
if [[ -n "${FEATURE_GATES:-}" ]]; then
|
||||
params+=" --feature-gates=${FEATURE_GATES}"
|
||||
fi
|
||||
@ -1037,6 +1066,7 @@ function start-kube-scheduler {
|
||||
local -r src_file="${KUBE_HOME}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
|
||||
remove-salt-config-comments "${src_file}"
|
||||
|
||||
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
|
||||
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
|
||||
@ -1335,8 +1365,9 @@ if [[ -n "${KUBE_USER:-}" ]]; then
|
||||
fi
|
||||
fi
|
||||
|
||||
# generate the controller manager token here since its only used on the master.
|
||||
# generate the controller manager and scheduler tokens here since they are only used on the master.
|
||||
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
|
||||
setup-os-params
|
||||
config-ip-firewall
|
||||
|
@ -769,7 +769,7 @@ start_kube_scheduler() {
|
||||
if [ -n "${SCHEDULER_TEST_LOG_LEVEL:-}" ]; then
|
||||
log_level="${SCHEDULER_TEST_LOG_LEVEL}"
|
||||
fi
|
||||
params="${log_level} ${SCHEDULER_TEST_ARGS:-}"
|
||||
params="--master=127.0.0.1:8080 ${log_level} ${SCHEDULER_TEST_ARGS:-}"
|
||||
if [ -n "${SCHEDULING_ALGORITHM_PROVIDER:-}" ]; then
|
||||
params="${params} --algorithm-provider=${SCHEDULING_ALGORITHM_PROVIDER}"
|
||||
fi
|
||||
@ -779,6 +779,8 @@ start_kube_scheduler() {
|
||||
# Remove salt comments and replace variables with values
|
||||
src_file="${kube_home}/kube-manifests/kubernetes/gci-trusty/kube-scheduler.manifest"
|
||||
remove_salt_config_comments "${src_file}"
|
||||
|
||||
sed -i -e "s@{{srv_kube_path}}@/etc/srv/kubernetes@g" "${src_file}"
|
||||
sed -i -e "s@{{params}}@${params}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube_docker_registry'\]}}@${DOCKER_REGISTRY}@g" "${src_file}"
|
||||
sed -i -e "s@{{pillar\['kube-scheduler_docker_tag'\]}}@${kube_scheduler_docker_tag}@g" "${src_file}"
|
||||
|
@ -1,4 +1,5 @@
|
||||
{% set params = "" -%}
|
||||
{% set params = "--master=127.0.0.1:8080" -%}
|
||||
{% set srv_kube_path = "/srv/kubernetes" -%}
|
||||
|
||||
{% set log_level = pillar['log_level'] -%}
|
||||
{% if pillar['scheduler_test_log_level'] is defined -%}
|
||||
@ -47,7 +48,7 @@
|
||||
"command": [
|
||||
"/bin/sh",
|
||||
"-c",
|
||||
"/usr/local/bin/kube-scheduler --master=127.0.0.1:8080 {{params}} 1>>/var/log/kube-scheduler.log 2>&1"
|
||||
"/usr/local/bin/kube-scheduler {{params}} 1>>/var/log/kube-scheduler.log 2>&1"
|
||||
],
|
||||
"livenessProbe": {
|
||||
"httpGet": {
|
||||
@ -63,14 +64,23 @@
|
||||
"name": "logfile",
|
||||
"mountPath": "/var/log/kube-scheduler.log",
|
||||
"readOnly": false
|
||||
},
|
||||
{
|
||||
"name": "srvkube",
|
||||
"mountPath": "{{srv_kube_path}}",
|
||||
"readOnly": true
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes":[
|
||||
{ "name": "logfile",
|
||||
"hostPath": {
|
||||
"path": "/var/log/kube-scheduler.log"}
|
||||
{
|
||||
"name": "srvkube",
|
||||
"hostPath": {"path": "{{srv_kube_path}}"}
|
||||
},
|
||||
{
|
||||
"name": "logfile",
|
||||
"hostPath": {"path": "/var/log/kube-scheduler.log"}
|
||||
}
|
||||
]
|
||||
}}
|
||||
|
@ -402,11 +402,10 @@ function start_apiserver {
|
||||
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" client '"client auth"'
|
||||
|
||||
# Create client certs signed with client-ca, given id, given CN and a number of groups
|
||||
# NOTE: system:masters will be removed in the future
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet system:node:${HOSTNAME_OVERRIDE} system:nodes
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-proxy system:kube-proxy system:nodes
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:kube-controller-manager
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:scheduler system:masters
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:kube-scheduler
|
||||
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' admin system:admin system:masters
|
||||
|
||||
# Create auth proxy client ca
|
||||
|
@ -314,6 +314,28 @@ func ClusterRoles() []rbac.ClusterRole {
|
||||
rbac.NewRule("list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for the kube-scheduler
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-scheduler"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
eventsRule(),
|
||||
|
||||
// this is for leaderlease access
|
||||
// TODO: scope this to the kube-system namespace
|
||||
rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||
rbac.NewRule("get", "update", "patch", "delete").Groups(legacyGroup).Resources("endpoints").Names("kube-scheduler").RuleOrDie(),
|
||||
|
||||
// fundamental resources
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("nodes", "pods").RuleOrDie(),
|
||||
rbac.NewRule("create").Groups(legacyGroup).Resources("pods/binding", "bindings").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(),
|
||||
// things that select pods
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("services", "replicationcontrollers").RuleOrDie(),
|
||||
rbac.NewRule(Read...).Groups(extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
// things that pods use
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("persistentvolumeclaims", "persistentvolumes").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role for an external/out-of-tree persistent volume provisioner
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:persistent-volume-provisioner"},
|
||||
@ -343,6 +365,7 @@ func ClusterRoleBindings() []rbac.ClusterRoleBinding {
|
||||
rbac.NewClusterBinding("system:node").Groups(user.NodesGroup).BindingOrDie(),
|
||||
rbac.NewClusterBinding("system:node-proxier").Users(user.KubeProxy).BindingOrDie(),
|
||||
rbac.NewClusterBinding("system:kube-controller-manager").Users(user.KubeControllerManager).BindingOrDie(),
|
||||
rbac.NewClusterBinding("system:kube-scheduler").Users(user.KubeScheduler).BindingOrDie(),
|
||||
}
|
||||
addClusterRoleBindingLabel(rolebindings)
|
||||
return rolebindings
|
||||
|
@ -74,6 +74,23 @@ items:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: system:kube-controller-manager
|
||||
- apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:kube-scheduler
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:kube-scheduler
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: system:kube-scheduler
|
||||
- apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
|
@ -521,6 +521,89 @@ items:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:kube-scheduler
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
- kube-scheduler
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- delete
|
||||
- get
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- bindings
|
||||
- pods/binding
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- replicationcontrollers
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- replicasets
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
|
@ -22,6 +22,8 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"bytes"
|
||||
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
@ -51,11 +53,26 @@ func (r *RBACAuthorizer) Authorize(requestAttributes authorizer.Attributes) (boo
|
||||
if glog.V(2) {
|
||||
var operation string
|
||||
if requestAttributes.IsResourceRequest() {
|
||||
operation = fmt.Sprintf(
|
||||
"%q on \"%v.%v/%v\"",
|
||||
requestAttributes.GetVerb(),
|
||||
requestAttributes.GetResource(), requestAttributes.GetAPIGroup(), requestAttributes.GetSubresource(),
|
||||
)
|
||||
b := &bytes.Buffer{}
|
||||
b.WriteString(`"`)
|
||||
b.WriteString(requestAttributes.GetVerb())
|
||||
b.WriteString(`" resource "`)
|
||||
b.WriteString(requestAttributes.GetResource())
|
||||
if len(requestAttributes.GetAPIGroup()) > 0 {
|
||||
b.WriteString(`.`)
|
||||
b.WriteString(requestAttributes.GetAPIGroup())
|
||||
}
|
||||
if len(requestAttributes.GetSubresource()) > 0 {
|
||||
b.WriteString(`/`)
|
||||
b.WriteString(requestAttributes.GetSubresource())
|
||||
}
|
||||
b.WriteString(`"`)
|
||||
if len(requestAttributes.GetName()) > 0 {
|
||||
b.WriteString(` named "`)
|
||||
b.WriteString(requestAttributes.GetName())
|
||||
b.WriteString(`"`)
|
||||
}
|
||||
operation = b.String()
|
||||
} else {
|
||||
operation = fmt.Sprintf("%q nonResourceURL %q", requestAttributes.GetVerb(), requestAttributes.GetPath())
|
||||
}
|
||||
|
@ -79,4 +79,5 @@ const (
|
||||
// core kubernetes process identities
|
||||
KubeProxy = "system:kube-proxy"
|
||||
KubeControllerManager = "system:kube-controller-manager"
|
||||
KubeScheduler = "system:kube-scheduler"
|
||||
)
|
||||
|
@ -160,6 +160,30 @@ current-context: service-account-context
|
||||
EOF
|
||||
}
|
||||
|
||||
function create-kubescheduler-kubeconfig {
|
||||
echo "Creating kube-scheduler kubeconfig file"
|
||||
mkdir -p /etc/srv/kubernetes/kube-scheduler
|
||||
cat <<EOF >/etc/srv/kubernetes/kube-scheduler/kubeconfig
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kube-scheduler
|
||||
user:
|
||||
token: ${KUBE_SCHEDULER_TOKEN}
|
||||
clusters:
|
||||
- name: local
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://localhost:443
|
||||
contexts:
|
||||
- context:
|
||||
cluster: local
|
||||
user: kube-scheduler
|
||||
name: kube-scheduler
|
||||
current-context: kube-scheduler
|
||||
EOF
|
||||
}
|
||||
|
||||
function assemble-docker-flags {
|
||||
echo "Assemble docker command line flags"
|
||||
local docker_opts="-p /var/run/docker.pid --iptables=false --ip-masq=false"
|
||||
@ -346,7 +370,7 @@ function compute-kube-controller-manager-params {
|
||||
# Computes command line arguments to be passed to scheduler.
|
||||
function compute-kube-scheduler-params {
|
||||
local params="${SCHEDULER_TEST_ARGS:-}"
|
||||
params+=" --master=127.0.0.1:8080"
|
||||
params+=" --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig"
|
||||
echo "${params}"
|
||||
}
|
||||
|
||||
@ -405,11 +429,16 @@ setup-kubelet-dir
|
||||
delete-default-etcd-configs
|
||||
compute-etcd-variables
|
||||
|
||||
# Setup authentication token and kubeconfig for controller-manager.
|
||||
# Setup authentication token and kubeconfig for kube-controller-manager.
|
||||
KUBE_CONTROLLER_MANAGER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
echo "${KUBE_CONTROLLER_MANAGER_TOKEN},system:kube-controller-manager,uid:system:kube-controller-manager" >> /etc/srv/kubernetes/known_tokens.csv
|
||||
create-kubecontrollermanager-kubeconfig
|
||||
|
||||
# Setup authentication token and kubeconfig for kube-scheduler.
|
||||
KUBE_SCHEDULER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
echo "${KUBE_SCHEDULER_TOKEN},system:kube-scheduler,uid:system:kube-scheduler" >> /etc/srv/kubernetes/known_tokens.csv
|
||||
create-kubescheduler-kubeconfig
|
||||
|
||||
# Mount master PD for etcd and create symbolic links to it.
|
||||
{
|
||||
main_etcd_mount_point="/mnt/disks/master-pd"
|
||||
|
Loading…
Reference in New Issue
Block a user