Merge pull request #38512 from deads2k/fed-11-fix-client-cert-termination

Automatic merge from submit-queue

fix client cert handling for delegate authn

Builds on https://github.com/kubernetes/kubernetes/pull/38409.

The client cert wasn't presented by the API server, so tools didn't send it.  These will start getting caught as we add usage into e2e. Once we split genericapiserver out, we can have a different style integration test that hits these too.
This commit is contained in:
Kubernetes Submit Queue 2016-12-12 08:01:22 -08:00 committed by GitHub
commit 492f8d412f
16 changed files with 610 additions and 193 deletions

View File

@ -0,0 +1,50 @@
kind: ReplicationController
apiVersion: v1
metadata:
name: etcd
labels:
etcd: "true"
spec:
replicas: 1
selector:
etcd: "true"
template:
metadata:
labels:
etcd: "true"
spec:
containers:
- name: etcd
image: quay.io/coreos/etcd:v3.0.15
command:
- "etcd"
- "--listen-client-urls=https://0.0.0.0:4001"
- "--advertise-client-urls=https://etcd.kube-public.svc:4001"
- "--trusted-ca-file=/var/run/serving-ca/ca.crt"
- "--cert-file=/var/run/serving-cert/tls.crt"
- "--key-file=/var/run/serving-cert/tls.key"
- "--client-cert-auth=true"
- "--listen-peer-urls=https://0.0.0.0:7001"
- "--initial-advertise-peer-urls=https://etcd.kube-public.svc:7001"
- "--peer-trusted-ca-file=/var/run/serving-ca/ca.crt"
- "--peer-cert-file=/var/run/serving-cert/tls.crt"
- "--peer-key-file=/var/run/serving-cert/tls.key"
- "--peer-client-cert-auth=true"
- "--initial-cluster=default=https://etcd.kube-public.svc:7001"
ports:
- containerPort: 4001
volumeMounts:
- mountPath: /var/run/serving-cert
name: volume-serving-cert
- mountPath: /var/run/serving-ca
name: volume-etcd-ca
volumes:
- secret:
defaultMode: 420
secretName: serving-etcd
name: volume-serving-cert
- configMap:
defaultMode: 420
name: etcd-ca
name: volume-etcd-ca

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: etcd
spec:
ports:
- port: 4001
protocol: TCP
targetPort: 4001
selector:
etcd: "true"

View File

@ -0,0 +1,86 @@
kind: ReplicationController
apiVersion: v1
metadata:
name: kubernetes-discovery
labels:
kubernetes-discovery: "true"
spec:
replicas: 1
selector:
kubernetes-discovery: "true"
template:
metadata:
labels:
kubernetes-discovery: "true"
spec:
containers:
- name: kubernetes-discovery
image: kubernetes-discovery:latest
imagePullPolicy: Never
args:
- "--tls-cert-file=/var/run/serving-cert/tls.crt"
- "--tls-private-key-file=/var/run/serving-cert/tls.key"
- "--tls-ca-file=/var/run/serving-ca/ca.crt"
- "--client-ca-file=/var/run/client-ca/ca.crt"
- "--authentication-kubeconfig=/var/run/auth-kubeconfig/kubeconfig"
- "--authorization-kubeconfig=/var/run/auth-kubeconfig/kubeconfig"
- "--requestheader-username-headers=X-Remote-User"
- "--requestheader-group-headers=X-Remote-Group"
- "--requestheader-extra-headers-prefix=X-Remote-Extra-"
- "--requestheader-client-ca-file=/var/run/request-header-ca/ca.crt"
- "--etcd-servers=https://etcd.kube-public.svc:4001"
- "--etcd-certfile=/var/run/etcd-client-cert/tls.crt"
- "--etcd-keyfile=/var/run/etcd-client-cert/tls.key"
- "--etcd-cafile=/var/run/etcd-ca/ca.crt"
ports:
- containerPort: 443
volumeMounts:
- mountPath: /var/run/request-header-ca
name: volume-request-header-ca
- mountPath: /var/run/client-ca
name: volume-client-ca
- mountPath: /var/run/auth-proxy-client
name: volume-auth-proxy-client
- mountPath: /var/run/auth-kubeconfig
name: volume-auth-kubeconfig
- mountPath: /var/run/etcd-client-cert
name: volume-etcd-client-cert
- mountPath: /var/run/serving-ca
name: volume-serving-ca
- mountPath: /var/run/serving-cert
name: volume-serving-cert
- mountPath: /var/run/etcd-ca
name: volume-etcd-ca
volumes:
- configMap:
defaultMode: 420
name: request-header-ca
name: volume-request-header-ca
- configMap:
defaultMode: 420
name: client-ca
name: volume-client-ca
- name: volume-auth-proxy-client
secret:
defaultMode: 420
secretName: auth-proxy-client
- name: volume-auth-kubeconfig
secret:
defaultMode: 420
secretName: discovery-auth-kubeconfig
- name: volume-etcd-client-cert
secret:
defaultMode: 420
secretName: discovery-etcd
- name: volume-serving-cert
secret:
defaultMode: 420
secretName: serving-discovery
- configMap:
defaultMode: 420
name: discovery-ca
name: volume-serving-ca
- configMap:
defaultMode: 420
name: etcd-ca
name: volume-etcd-ca

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
labels:
kubernetes-discovery: "true"
name: kubernetes-discovery
spec:
ports:
- port: 443
protocol: TCP
nodePort: 31090
targetPort: 443
selector:
kubernetes-discovery: "true"
type: NodePort

View File

@ -0,0 +1,18 @@
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
FROM fedora
MAINTAINER David Eads <deads@redhat.com>
ADD kubernetes-discovery /
ENTRYPOINT ["/kubernetes-discovery"]

View File

@ -0,0 +1,28 @@
#!/bin/bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../..
source "${KUBE_ROOT}/hack/lib/util.sh"
# Register function to be called on EXIT to remove generated binary.
function cleanup {
rm "${KUBE_ROOT}/cmd/kubernetes-discovery/artifacts/simple-image/kubernetes-discovery"
}
trap cleanup EXIT
cp -v ${KUBE_ROOT}/_output/local/bin/linux/amd64/kubernetes-discovery "${KUBE_ROOT}/cmd/kubernetes-discovery/artifacts/simple-image/kubernetes-discovery"
docker build -t kubernetes-discovery:latest ${KUBE_ROOT}/cmd/kubernetes-discovery/artifacts/simple-image

View File

@ -61,9 +61,10 @@ func NewCommandStartDiscoveryServer(out, err io.Writer) *cobra.Command {
StdOut: out,
StdErr: err,
}
o.Etcd.StorageConfig.Type = storagebackend.StorageTypeETCD3
o.Etcd.StorageConfig.Prefix = defaultEtcdPathPrefix
o.Etcd.StorageConfig.Codec = api.Codecs.LegacyCodec(v1alpha1.SchemeGroupVersion)
o.SecureServing.ServingOptions.BindPort = 9090
o.SecureServing.ServingOptions.BindPort = 443
cmd := &cobra.Command{
Short: "Launch a discovery summarizer and proxy server",

View File

@ -32,7 +32,7 @@ kube::util::wait_for_url() {
local i
for i in $(seq 1 $times); do
local out
if out=$(curl -gkfs $url 2>/dev/null); then
if out=$(curl --max-time 1 -gkfs $url 2>/dev/null); then
kube::log::status "On try ${i}, ${prefix}: ${out}"
return 0
fi
@ -443,4 +443,137 @@ kube::util::download_file() {
return 1
}
# Test whether cfssl and cfssljson are installed.
# Sets:
# CFSSL_BIN: The path of the installed cfssl binary
# CFSSLJSON_BIN: The path of the installed cfssljson binary
function kube::util::test_cfssl_installed {
if ! command -v cfssl &>/dev/null || ! command -v cfssljson &>/dev/null; then
echo "Failed to successfully run 'cfssl', please verify that cfssl and cfssljson are in \$PATH."
echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
exit 1
fi
CFSSL_BIN=$(command -v cfssl)
CFSSLJSON_BIN=$(command -v cfssljson)
}
# Test whether openssl is installed.
# Sets:
# OPENSSL_BIN: The path to the openssl binary to use
function kube::util::test_openssl_installed {
openssl version >& /dev/null
if [ "$?" != "0" ]; then
echo "Failed to run openssl. Please ensure openssl is installed"
exit 1
fi
OPENSSL_BIN=$(command -v openssl)
}
# creates a client CA, args are sudo, dest-dir, ca-id, purpose
# purpose is dropped in after "key encipherment", you usually want
# '"client auth"'
# '"server auth"'
# '"client auth","server auth"'
function kube::util::create_signing_certkey {
local sudo=$1
local dest_dir=$2
local id=$3
local purpose=$4
# Create client ca
${sudo} /bin/bash -e <<EOF
rm -f "${dest_dir}/${id}-ca.crt" "${dest_dir}/${id}-ca.key"
${OPENSSL_BIN} req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout "${dest_dir}/${id}-ca.key" -out "${dest_dir}/${id}-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment",${purpose}]}}}' > "${dest_dir}/${id}-ca-config.json"
EOF
}
# signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups...
function kube::util::create_client_certkey {
local sudo=$1
local dest_dir=$2
local ca=$3
local id=$4
local cn=${5:-$4}
local groups=""
local SEP=""
shift 5
while [ -n "${1:-}" ]; do
groups+="${SEP}{\"O\":\"$1\"}"
SEP=","
shift 1
done
${sudo} /bin/bash -e <<EOF
cd ${dest_dir}
echo '{"CN":"${cn}","names":[${groups}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare client-${id}
mv "client-${id}-key.pem" "client-${id}.key"
mv "client-${id}.pem" "client-${id}.crt"
rm -f "client-${id}.csr"
EOF
}
# signs a serving certificate: args are sudo, dest-dir, ca, filename (roughly), subject, hosts...
function kube::util::create_serving_certkey {
local sudo=$1
local dest_dir=$2
local ca=$3
local id=$4
local cn=${5:-$4}
local hosts=""
local SEP=""
shift 5
while [ -n "${1:-}" ]; do
hosts+="${SEP}\"$1\""
SEP=","
shift 1
done
${sudo} /bin/bash -e <<EOF
cd ${dest_dir}
echo '{"CN":"${cn}","hosts":[${hosts}],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare serving-${id}
mv "serving-${id}-key.pem" "serving-${id}.key"
mv "serving-${id}.pem" "serving-${id}.crt"
rm -f "serving-${id}.csr"
EOF
}
# creates a self-contained kubeconfig: args are sudo, dest-dir, ca file, host, port, client id, token(optional)
function kube::util::write_client_kubeconfig {
local sudo=$1
local dest_dir=$2
local ca_file=$3
local api_host=$4
local api_port=$5
local client_id=$6
local token=${7:-}
cat <<EOF | ${sudo} tee "${dest_dir}"/${client_id}.kubeconfig > /dev/null
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: ${ca_file}
server: https://${api_host}:${api_port}/
name: local-up-cluster
users:
- user:
token: ${token}
client-certificate: ${dest_dir}/client-${client_id}.crt
client-key: ${dest_dir}/client-${client_id}.key
name: local-up-cluster
contexts:
- context:
cluster: local-up-cluster
user: local-up-cluster
name: local-up-cluster
current-context: local-up-cluster
EOF
# flatten the kubeconfig files to make them self contained
username=$(whoami)
${sudo} /bin/bash -e <<EOF
$(kube::util::find-binary kubectl) --kubeconfig="${dest_dir}/${client_id}.kubeconfig" config view --minify --flatten > "/tmp/${client_id}.kubeconfig"
mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig"
chown ${username} "${dest_dir}/${client_id}.kubeconfig"
EOF
}
# ex: ts=2 sw=2 et filetype=sh

View File

@ -14,6 +14,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
# This command builds and runs a local kubernetes cluster. It's just like
# local-up.sh, but this one launches the three separate binaries.
# You may need to run this as root to allow kubelet to open docker's socket,
@ -31,7 +33,6 @@ KUBELET_AUTHENTICATION_WEBHOOK=${KUBELET_AUTHENTICATION_WEBHOOK:-""}
NET_PLUGIN=${NET_PLUGIN:-""}
# Place the binaries required by NET_PLUGIN in this directory, eg: "/home/kubernetes/bin".
NET_PLUGIN_DIR=${NET_PLUGIN_DIR:-""}
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
SERVICE_CLUSTER_IP_RANGE=${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/24}
# if enabled, must set CGROUP_ROOT
EXPERIMENTAL_CGROUPS_PER_QOS=${EXPERIMENTAL_CGROUPS_PER_QOS:-false}
@ -68,7 +69,6 @@ KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
export KUBE_CACHE_MUTATION_DETECTOR
# START_MODE can be 'all', 'kubeletonly', or 'nokubelet'
START_MODE=${START_MODE:-"all"}
@ -111,7 +111,7 @@ function guess_built_binary_path {
}
### Allow user to supply the source directory.
GO_OUT=""
GO_OUT=${GO_OUT:-}
while getopts "o:O" OPTION
do
case $OPTION in
@ -148,20 +148,6 @@ function test_docker {
fi
}
# Test whether cfssl and cfssljson are installed.
# Sets:
# CFSSL_BIN: The path of the installed cfssl binary
# CFSSLJSON_BIN: The path of the installed cfssljson binary
function test_cfssl_installed {
if ! command -v cfssl &>/dev/null || ! command -v cfssljson &>/dev/null; then
echo "Failed to successfully run 'cfssl', please verify that cfssl and cfssljson are in \$PATH."
echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
exit 1
fi
CFSSL_BIN=$(command -v cfssl)
CFSSLJSON_BIN=$(command -v cfssljson)
}
function test_rkt {
if [[ -n "${RKT_PATH}" ]]; then
${RKT_PATH} list 2> /dev/null 1> /dev/null
@ -178,17 +164,6 @@ function test_rkt {
fi
}
# Test whether openssl is installed.
# Sets:
# OPENSSL_BIN: The path to the openssl binary to use
function test_openssl_installed {
openssl version >& /dev/null
if [ "$?" != "0" ]; then
echo "Failed to run openssl. Please ensure openssl is installed"
exit 1
fi
OPENSSL_BIN=$(command -v openssl)
}
# Shut down anyway if there's an error.
set +e
@ -218,7 +193,6 @@ ENABLE_CONTROLLER_ATTACH_DETACH=${ENABLE_CONTROLLER_ATTACH_DETACH:-"true"} # cur
CERT_DIR=${CERT_DIR:-"/var/run/kubernetes"}
ROOT_CA_FILE=$CERT_DIR/apiserver.crt
EXPERIMENTAL_CRI=${EXPERIMENTAL_CRI:-"false"}
DISCOVERY_SECURE_PORT=${DISCOVERY_SECURE_PORT:-9090}
# Ensure CERT_DIR is created for auto-generated crt/key and kubeconfig
@ -322,10 +296,6 @@ cleanup()
[[ -n "${APISERVER_PID-}" ]] && APISERVER_PIDS=$(pgrep -P ${APISERVER_PID} ; ps -o pid= -p ${APISERVER_PID})
[[ -n "${APISERVER_PIDS-}" ]] && sudo kill ${APISERVER_PIDS}
# Check if the discovery server is still running
[[ -n "${DISCOVERY_PID-}" ]] && DISCOVERY_PIDS=$(pgrep -P ${DISCOVERY_PID} ; ps -o pid= -p ${DISCOVERY_PID})
[[ -n "${DISCOVERY_PIDS-}" ]] && sudo kill ${DISCOVERY_PIDS}
# Check if the controller-manager is still running
[[ -n "${CTLRMGR_PID-}" ]] && CTLRMGR_PIDS=$(pgrep -P ${CTLRMGR_PID} ; ps -o pid= -p ${CTLRMGR_PID})
[[ -n "${CTLRMGR_PIDS-}" ]] && sudo kill ${CTLRMGR_PIDS}
@ -368,59 +338,6 @@ function set_service_accounts {
fi
}
function create_client_certkey {
local CA=$1
local ID=$2
local CN=${3:-$2}
local NAMES=""
local SEP=""
shift 3
while [ -n "${1:-}" ]; do
NAMES+="${SEP}{\"O\":\"$1\"}"
SEP=","
shift 1
done
${CONTROLPLANE_SUDO} /bin/bash -e <<EOF
cd ${CERT_DIR}
echo '{"CN":"${CN}","names":[${NAMES}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | "${CFSSL_BIN}" gencert -ca=${CA}.crt -ca-key=${CA}.key -config=client-ca-config.json - | "${CFSSLJSON_BIN}" -bare client-${ID}
mv "client-${ID}-key.pem" "client-${ID}.key"
mv "client-${ID}.pem" "client-${ID}.crt"
rm -f "client-${ID}.csr"
EOF
}
function write_client_kubeconfig {
cat <<EOF | ${CONTROLPLANE_SUDO} tee "${CERT_DIR}"/$1.kubeconfig > /dev/null
apiVersion: v1
kind: Config
clusters:
- cluster:
certificate-authority: ${ROOT_CA_FILE}
server: https://${API_HOST}:${API_SECURE_PORT}/
name: local-up-cluster
users:
- user:
token: ${KUBECONFIG_TOKEN:-}
client-certificate: ${CERT_DIR}/client-$1.crt
client-key: ${CERT_DIR}/client-$1.key
name: local-up-cluster
contexts:
- context:
cluster: local-up-cluster
user: local-up-cluster
name: local-up-cluster
current-context: local-up-cluster
EOF
# flatten the kubeconfig files to make them self contained
username=$(whoami)
${CONTROLPLANE_SUDO} /bin/bash -e <<EOF
${GO_OUT}/kubectl --kubeconfig="${CERT_DIR}/$1.kubeconfig" config view --minify --flatten > "/tmp/$1.kubeconfig"
mv -f "/tmp/$1.kubeconfig" "${CERT_DIR}/$1.kubeconfig"
chown ${username} "${CERT_DIR}/$1.kubeconfig"
EOF
}
function start_apiserver {
security_admission=""
if [[ -z "${ALLOW_SECURITY_CONTEXT}" ]]; then
@ -462,27 +379,19 @@ function start_apiserver {
fi
# Create client ca
${CONTROLPLANE_SUDO} /bin/bash -e <<EOF
rm -f "${CERT_DIR}/client-ca.crt" "${CERT_DIR}/client-ca.key"
"${OPENSSL_BIN}" req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout "${CERT_DIR}/client-ca.key" -out "${CERT_DIR}/client-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "${CERT_DIR}/client-ca-config.json"
EOF
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" client '"client auth"'
# Create client certs signed with client-ca, given id, given CN and a number of groups
# NOTE: system:masters will be removed in the future
create_client_certkey client-ca kubelet system:node:${HOSTNAME_OVERRIDE} system:nodes
create_client_certkey client-ca kube-proxy system:kube-proxy system:nodes
create_client_certkey client-ca controller system:controller system:masters
create_client_certkey client-ca scheduler system:scheduler system:masters
create_client_certkey client-ca admin system:admin system:masters
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kubelet system:node:${HOSTNAME_OVERRIDE} system:nodes
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' kube-proxy system:kube-proxy system:nodes
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' controller system:controller system:masters
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' scheduler system:scheduler system:masters
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" 'client-ca' admin system:admin system:masters
# Create auth proxy client ca
sudo /bin/bash -e <<EOF
rm -f "${CERT_DIR}/auth-proxy-client-ca.crt" "${CERT_DIR}/auth-proxy-client-ca.key"
"${OPENSSL_BIN}" req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout "${CERT_DIR}/auth-proxy-client-ca.key" -out "${CERT_DIR}/auth-proxy-client-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment","client auth"]}}}' > "${CERT_DIR}/auth-proxy-client-ca-config.json"
EOF
create_client_certkey auth-proxy-client-ca auth-proxy system:auth-proxy
kube::util::create_signing_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header '"client auth"'
kube::util::create_client_certkey "${CONTROLPLANE_SUDO}" "${CERT_DIR}" request-header-ca auth-proxy system:auth-proxy
APISERVER_LOG=/tmp/kube-apiserver.log
${CONTROLPLANE_SUDO} "${GO_OUT}/hyperkube" apiserver ${anytoken_arg} ${authorizer_arg} ${priv_arg} ${runtime_config}\
@ -506,7 +415,7 @@ EOF
--requestheader-username-headers=X-Remote-User \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-client-ca-file="${CERT_DIR}/auth-proxy-client-ca.crt" \
--requestheader-client-ca-file="${CERT_DIR}/request-header-ca.crt" \
--requestheader-allowed-names=system:auth-proxy \
--cors-allowed-origins="${API_CORS_ALLOWED_ORIGINS}" >"${APISERVER_LOG}" 2>&1 &
APISERVER_PID=$!
@ -516,11 +425,11 @@ EOF
kube::util::wait_for_url "https://${API_HOST}:${API_SECURE_PORT}/version" "apiserver: " 1 ${WAIT_FOR_URL_API_SERVER} || exit 1
# Create kubeconfigs for all components, using client certs
write_client_kubeconfig admin
write_client_kubeconfig kubelet
write_client_kubeconfig kube-proxy
write_client_kubeconfig controller
write_client_kubeconfig scheduler
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" admin
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kubelet
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" kube-proxy
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" controller
kube::util::write_client_kubeconfig "${CONTROLPLANE_SUDO}" "${CERT_DIR}" "${ROOT_CA_FILE}" "${API_HOST}" "${API_SECURE_PORT}" scheduler
if [[ -z "${AUTH_ARGS}" ]]; then
if [[ "${ALLOW_ANY_TOKEN}" = true ]]; then
@ -537,54 +446,6 @@ EOF
fi
}
# start_discovery relies on certificates created by start_apiserver
function start_discovery {
# TODO generate serving certificates
create_client_certkey client-ca discovery-auth system:discovery-auth
write_client_kubeconfig discovery-auth
# grant permission to run delegated authentication and authorization checks
if [[ "${ENABLE_RBAC}" = true ]]; then
${KUBECTL} ${AUTH_ARGS} create clusterrolebinding discovery:system:auth-delegator --clusterrole=system:auth-delegator --user=system:discovery-auth
fi
curl --silent -k -g $API_HOST:$DISCOVERY_SECURE_PORT
if [ ! $? -eq 0 ]; then
echo "Kubernetes Discovery secure port is free, proceeding..."
else
echo "ERROR starting Kubernetes Discovery, exiting. Some process on $API_HOST is serving already on $DISCOVERY_SECURE_PORT"
return
fi
${CONTROLPLANE_SUDO} cp "${CERT_DIR}/admin.kubeconfig" "${CERT_DIR}/admin-discovery.kubeconfig"
${CONTROLPLANE_SUDO} ${GO_OUT}/kubectl config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-discovery.kubeconfig" --insecure-skip-tls-verify --server="https://${API_HOST}:${DISCOVERY_SECURE_PORT}"
DISCOVERY_SERVER_LOG=/tmp/kubernetes-discovery.log
${CONTROLPLANE_SUDO} "${GO_OUT}/kubernetes-discovery" \
--cert-dir="${CERT_DIR}" \
--client-ca-file="${CERT_DIR}/client-ca.crt" \
--authentication-kubeconfig="${CERT_DIR}/discovery-auth.kubeconfig" \
--authorization-kubeconfig="${CERT_DIR}/discovery-auth.kubeconfig" \
--requestheader-username-headers=X-Remote-User \
--requestheader-group-headers=X-Remote-Group \
--requestheader-extra-headers-prefix=X-Remote-Extra- \
--requestheader-client-ca-file="${CERT_DIR}/auth-proxy-client-ca.crt" \
--requestheader-allowed-names=system:auth-proxy \
--bind-address="${API_BIND_ADDR}" \
--secure-port="${DISCOVERY_SECURE_PORT}" \
--tls-ca-file="${ROOT_CA_FILE}" \
--etcd-servers="http://${ETCD_HOST}:${ETCD_PORT}" >"${DISCOVERY_SERVER_LOG}" 2>&1 &
DISCOVERY_PID=$!
# Wait for kubernetes-discovery to come up before launching the rest of the components.
echo "Waiting for kubernetes-discovery to come up"
kube::util::wait_for_url "https://${API_HOST}:${DISCOVERY_SECURE_PORT}/version" "kubernetes-discovery: " 1 ${WAIT_FOR_URL_API_SERVER} || exit 1
# create the "normal" api services for the core API server
${CONTROLPLANE_SUDO} ${GO_OUT}/kubectl create -f "${KUBE_ROOT}/cmd/kubernetes-discovery/artifacts/core-apiservices" --kubeconfig="${CERT_DIR}/admin-discovery.kubeconfig"
}
function start_controller_manager {
node_cidr_args=""
if [[ "${NET_PLUGIN}" == "kubenet" ]]; then
@ -750,29 +611,29 @@ function start_kubeproxy {
function start_kubedns {
if [[ "${ENABLE_CLUSTER_DNS}" = true ]]; then
echo "Creating kube-system namespace"
sed -e "s/{{ pillar\['dns_replicas'\] }}/${DNS_REPLICAS}/g;s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g;" "${KUBE_ROOT}/cluster/addons/dns/skydns-rc.yaml.in" >| skydns-rc.yaml
sed -e "s/{{ pillar\['dns_replicas'\] }}/${DNS_REPLICAS}/g;s/{{ pillar\['dns_domain'\] }}/${DNS_DOMAIN}/g;" "${KUBE_ROOT}/cluster/addons/dns/kubedns-controller.yaml.in" >| kubedns-rc.yaml
if [[ "${FEDERATION:-}" == "true" ]]; then
FEDERATIONS_DOMAIN_MAP="${FEDERATIONS_DOMAIN_MAP:-}"
if [[ -z "${FEDERATIONS_DOMAIN_MAP}" && -n "${FEDERATION_NAME:-}" && -n "${DNS_ZONE_NAME:-}" ]]; then
FEDERATIONS_DOMAIN_MAP="${FEDERATION_NAME}=${DNS_ZONE_NAME}"
fi
if [[ -n "${FEDERATIONS_DOMAIN_MAP}" ]]; then
sed -i -e "s/{{ pillar\['federations_domain_map'\] }}/- --federations=${FEDERATIONS_DOMAIN_MAP}/g" skydns-rc.yaml
sed -i -e "s/{{ pillar\['federations_domain_map'\] }}/- --federations=${FEDERATIONS_DOMAIN_MAP}/g" kubedns-rc.yaml
else
sed -i -e "/{{ pillar\['federations_domain_map'\] }}/d" skydns-rc.yaml
sed -i -e "/{{ pillar\['federations_domain_map'\] }}/d" kubedns-rc.yaml
fi
else
sed -i -e "/{{ pillar\['federations_domain_map'\] }}/d" skydns-rc.yaml
sed -i -e "/{{ pillar\['federations_domain_map'\] }}/d" kubedns-rc.yaml
fi
sed -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" "${KUBE_ROOT}/cluster/addons/dns/skydns-svc.yaml.in" >| skydns-svc.yaml
sed -e "s/{{ pillar\['dns_server'\] }}/${DNS_SERVER_IP}/g" "${KUBE_ROOT}/cluster/addons/dns/kubedns-svc.yaml.in" >| kubedns-svc.yaml
# TODO update to dns role once we have one.
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" create clusterrolebinding system:kube-dns --clusterrole=cluster-admin --serviceaccount=kube-system:default
# use kubectl to create skydns rc and service
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f skydns-rc.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f skydns-svc.yaml
# use kubectl to create kubedns rc and service
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kubedns-rc.yaml
${KUBECTL} --kubeconfig="${CERT_DIR}/admin.kubeconfig" --namespace=kube-system create -f kubedns-svc.yaml
echo "Kube-dns rc and service successfully deployed."
rm skydns-rc.yaml skydns-svc.yaml
rm kubedns-rc.yaml kubedns-svc.yaml
fi
}
@ -786,7 +647,6 @@ Logs:
${CTLRMGR_LOG:-}
${PROXY_LOG:-}
${SCHEDULER_LOG:-}
${DISCOVERY_SERVER_LOG:-}
EOF
fi
@ -833,8 +693,8 @@ if [[ "${START_MODE}" != "kubeletonly" ]]; then
test_apiserver_off
fi
test_openssl_installed
test_cfssl_installed
kube::util::test_openssl_installed
kube::util::test_cfssl_installed
### IF the user didn't supply an output/ for the build... Then we detect.
if [ "$GO_OUT" == "" ]; then
@ -861,11 +721,6 @@ if [[ "${START_MODE}" != "nokubelet" ]]; then
start_kubelet
fi
START_DISCOVERY=${START_DISCOVERY:-false}
if [[ "${START_DISCOVERY}" = true ]]; then
start_discovery
fi
print_success
if [[ "${ENABLE_DAEMON}" = false ]]; then

110
hack/local-up-discovery.sh Executable file
View File

@ -0,0 +1,110 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# starts kubernetes-discovery as a pod after you've run `local-up-cluster.sh`
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
DISCOVERY_SECURE_PORT=${DISCOVERY_SECURE_PORT:-31090}
API_HOST=${API_HOST:-localhost}
API_HOST_IP=${API_HOST_IP:-"127.0.0.1"}
CERT_DIR=${CERT_DIR:-"/var/run/kubernetes"}
ROOT_CA_FILE=$CERT_DIR/apiserver.crt
# Ensure CERT_DIR is created for auto-generated crt/key and kubeconfig
mkdir -p "${CERT_DIR}" &>/dev/null || sudo mkdir -p "${CERT_DIR}"
sudo=$(test -w "${CERT_DIR}" || echo "sudo -E")
kubectl=$(kube::util::find-binary kubectl)
function kubectl_core {
${kubectl} --kubeconfig="${CERT_DIR}/admin.kubeconfig" $@
}
function sudo_kubectl_core {
${sudo} ${kubectl} --kubeconfig="${CERT_DIR}/admin.kubeconfig" $@
}
# start_discovery relies on certificates created by start_apiserver
function start_discovery {
kube::util::create_signing_certkey "${sudo}" "${CERT_DIR}" "discovery" '"server auth"'
# sign the discovery cert to be good for the local node too, so that we can trust it
kube::util::create_serving_certkey "${sudo}" "${CERT_DIR}" "discovery-ca" discovery api.kube-public.svc "localhost" ${API_HOST_IP}
# Create serving and client CA. etcd only takes one arg
kube::util::create_signing_certkey "${sudo}" "${CERT_DIR}" "etcd" '"client auth","server auth"'
kube::util::create_serving_certkey "${sudo}" "${CERT_DIR}" "etcd-ca" etcd etcd.kube-public.svc
# etcd doesn't seem to have separate signers for serving and client trust
kube::util::create_client_certkey "${sudo}" "${CERT_DIR}" "etcd-ca" discovery-etcd discovery-etcd
# create credentials for running delegated authn/authz checks
# "client-ca" is created when you start the apiserver
kube::util::create_client_certkey "${sudo}" "${CERT_DIR}" "client-ca" discovery-auth system:discovery-auth
kube::util::write_client_kubeconfig "${sudo}" "${CERT_DIR}" "${ROOT_CA_FILE}" "kubernetes.default.svc" 443 discovery-auth
# ${kubectl} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/discovery-auth.kubeconfig" --insecure-skip-tls-verify
# don't fail if the namespace already exists or something
# If this fails for some reason, the script will fail during creation of other resources
kubectl_core create namespace kube-public || true
# grant permission to run delegated authentication and authorization checks
kubectl_core delete clusterrolebinding discovery:system:auth-delegator > /dev/null 2>&1 || true
kubectl_core create clusterrolebinding discovery:system:auth-delegator --clusterrole=system:auth-delegator --user=system:discovery-auth
# make sure the resources we're about to create don't exist
kubectl_core -n kube-public delete secret auth-proxy-client serving-etcd serving-discovery discovery-etcd discovery-auth-kubeconfig > /dev/null 2>&1 || true
kubectl_core -n kube-public delete configmap etcd-ca discovery-ca client-ca request-header-ca > /dev/null 2>&1 || true
kubectl_core -n kube-public delete -f "${KUBE_ROOT}/cmd/kubernetes-discovery/artifacts/local-cluster-up" > /dev/null 2>&1 || true
sudo_kubectl_core -n kube-public create secret tls auth-proxy-client --cert="${CERT_DIR}/client-auth-proxy.crt" --key="${CERT_DIR}/client-auth-proxy.key"
sudo_kubectl_core -n kube-public create secret tls serving-etcd --cert="${CERT_DIR}/serving-etcd.crt" --key="${CERT_DIR}/serving-etcd.key"
sudo_kubectl_core -n kube-public create secret tls serving-discovery --cert="${CERT_DIR}/serving-discovery.crt" --key="${CERT_DIR}/serving-discovery.key"
sudo_kubectl_core -n kube-public create secret tls discovery-etcd --cert="${CERT_DIR}/client-discovery-etcd.crt" --key="${CERT_DIR}/client-discovery-etcd.key"
kubectl_core -n kube-public create secret generic discovery-auth-kubeconfig --from-file="kubeconfig=${CERT_DIR}/discovery-auth.kubeconfig"
kubectl_core -n kube-public create configmap etcd-ca --from-file="ca.crt=${CERT_DIR}/etcd-ca.crt" || true
kubectl_core -n kube-public create configmap discovery-ca --from-file="ca.crt=${CERT_DIR}/discovery-ca.crt" || true
kubectl_core -n kube-public create configmap client-ca --from-file="ca.crt=${CERT_DIR}/client-ca.crt" || true
kubectl_core -n kube-public create configmap request-header-ca --from-file="ca.crt=${CERT_DIR}/request-header-ca.crt" || true
${KUBE_ROOT}/cmd/kubernetes-discovery/hack/build-image.sh
kubectl_core -n kube-public create -f "${KUBE_ROOT}/cmd/kubernetes-discovery/artifacts/local-cluster-up"
${sudo} cp "${CERT_DIR}/admin.kubeconfig" "${CERT_DIR}/admin-discovery.kubeconfig"
${sudo} chown ${username} "${CERT_DIR}/admin-discovery.kubeconfig"
${kubectl} config set-cluster local-up-cluster --kubeconfig="${CERT_DIR}/admin-discovery.kubeconfig" --certificate-authority="${CERT_DIR}/discovery-ca.crt" --embed-certs --server="https://${API_HOST_IP}:${DISCOVERY_SECURE_PORT}"
# Wait for kubernetes-discovery to come up before launching the rest of the components.
# this should work since we're creating a node port service
echo "Waiting for kubernetes-discovery to come up: https://${API_HOST_IP}:${DISCOVERY_SECURE_PORT}/version"
kube::util::wait_for_url "https://${API_HOST_IP}:${DISCOVERY_SECURE_PORT}/version" "kubernetes-discovery: " 1 60 || exit 1
# something is weird with the proxy
sleep 1
# create the "normal" api services for the core API server
${kubectl} --kubeconfig="${CERT_DIR}/admin-discovery.kubeconfig" create -f "${KUBE_ROOT}/cmd/kubernetes-discovery/artifacts/core-apiservices"
}
kube::util::test_openssl_installed
kube::util::test_cfssl_installed
start_discovery
echo "kuberentes-discovery available at https://${API_HOST_IP}:${DISCOVERY_SECURE_PORT} from 'api.kube-public.svc'"

View File

@ -1,7 +1,5 @@
Vagrantfile: node_ip = $node_ips[n]
cluster/addons/addon-manager/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits
cluster/addons/registry/images/Dockerfile:ADD run_proxy.sh /usr/bin/run_proxy
cluster/addons/registry/images/Dockerfile:CMD ["/usr/bin/run_proxy"]
cluster/aws/templates/configure-vm-aws.sh: # We set the hostname_override to the full EC2 private dns name
cluster/aws/templates/configure-vm-aws.sh: api_servers: '${API_SERVERS}'
cluster/aws/templates/configure-vm-aws.sh: env-to-grains "hostname_override"
@ -88,6 +86,7 @@ federation/deploy/config.json.sample: "num_nodes": 3,
hack/e2e.go:.phase1.cloud_provider="gce"
hack/e2e.go:.phase1.cluster_name="{{.Cluster}}"
hack/e2e.go:.phase1.num_nodes=4
hack/lib/util.sh: local api_port=$5
hack/local-up-cluster.sh: advertise_address="--advertise_address=${API_HOST_IP}"
hack/local-up-cluster.sh: runtime_config="--runtime-config=${RUNTIME_CONFIG}"
hack/local-up-cluster.sh: advertise_address=""

View File

@ -316,9 +316,28 @@ func (c *Config) ApplyAuthenticationOptions(o *options.BuiltInAuthenticationOpti
return c, nil
}
var err error
if o.ClientCert != nil {
c, err = c.applyClientCert(o.ClientCert.ClientCA)
if err != nil {
return nil, fmt.Errorf("unable to load client CA file: %v", err)
}
}
if o.RequestHeader != nil {
c, err = c.applyClientCert(o.RequestHeader.ClientCAFile)
if err != nil {
return nil, fmt.Errorf("unable to load client CA file: %v", err)
}
}
c.SupportsBasicAuth = len(o.PasswordFile.BasicAuthFile) > 0
return c, nil
}
func (c *Config) applyClientCert(clientCAFile string) (*Config, error) {
if c.SecureServingInfo != nil {
if o.ClientCert != nil && len(o.ClientCert.ClientCA) > 0 {
clientCAs, err := certutil.CertsFromFile(o.ClientCert.ClientCA)
if len(clientCAFile) > 0 {
clientCAs, err := certutil.CertsFromFile(clientCAFile)
if err != nil {
return nil, fmt.Errorf("unable to load client CA file: %v", err)
}
@ -329,21 +348,8 @@ func (c *Config) ApplyAuthenticationOptions(o *options.BuiltInAuthenticationOpti
c.SecureServingInfo.ClientCA.AddCert(cert)
}
}
if o.RequestHeader != nil && len(o.RequestHeader.ClientCAFile) > 0 {
clientCAs, err := certutil.CertsFromFile(o.RequestHeader.ClientCAFile)
if err != nil {
return nil, fmt.Errorf("unable to load requestheader client CA file: %v", err)
}
if c.SecureServingInfo.ClientCA == nil {
c.SecureServingInfo.ClientCA = x509.NewCertPool()
}
for _, cert := range clientCAs {
c.SecureServingInfo.ClientCA.AddCert(cert)
}
}
}
c.SupportsBasicAuth = len(o.PasswordFile.BasicAuthFile) > 0
return c, nil
}
@ -352,6 +358,16 @@ func (c *Config) ApplyDelegatingAuthenticationOptions(o *options.DelegatingAuthe
return c, nil
}
var err error
c, err = c.applyClientCert(o.ClientCert.ClientCA)
if err != nil {
return nil, fmt.Errorf("unable to load client CA file: %v", err)
}
c, err = c.applyClientCert(o.RequestHeader.ClientCAFile)
if err != nil {
return nil, fmt.Errorf("unable to load client CA file: %v", err)
}
cfg, err := o.ToAuthenticationConfig()
if err != nil {
return nil, err

View File

@ -184,7 +184,7 @@ func ClusterRoles() []rbac.ClusterRole {
// TODO: restrict to creating a node with the same name they announce
rbac.NewRule("create", "get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
// TODO: restrict to the bound node once supported
rbac.NewRule("update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
rbac.NewRule("update", "patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
// TODO: restrict to the bound node as creator once supported
rbac.NewRule("create", "update", "patch").Groups(legacyGroup).Resources("events").RuleOrDie(),

View File

@ -167,6 +167,22 @@ func TestBootstrapClusterRoles(t *testing.T) {
testObjects(t, list, "cluster-roles.yaml")
}
func TestBootstrapClusterRoleBindings(t *testing.T) {
list := &api.List{}
names := sets.NewString()
roleBindings := map[string]runtime.Object{}
bootstrapRoleBindings := bootstrappolicy.ClusterRoleBindings()
for i := range bootstrapRoleBindings {
role := bootstrapRoleBindings[i]
names.Insert(role.Name)
roleBindings[role.Name] = &role
}
for _, name := range names.List() {
list.Items = append(list.Items, roleBindings[name])
}
testObjects(t, list, "cluster-role-bindings.yaml")
}
func TestBootstrapControllerRoles(t *testing.T) {
list := &api.List{}
names := sets.NewString()

View File

@ -0,0 +1,78 @@
apiVersion: v1
items:
- apiVersion: rbac.authorization.k8s.io/v1alpha1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: Group
name: system:masters
- apiVersion: rbac.authorization.k8s.io/v1alpha1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:basic-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:basic-user
subjects:
- kind: Group
name: system:authenticated
- kind: Group
name: system:unauthenticated
- apiVersion: rbac.authorization.k8s.io/v1alpha1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:discovery
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:discovery
subjects:
- kind: Group
name: system:authenticated
- kind: Group
name: system:unauthenticated
- apiVersion: rbac.authorization.k8s.io/v1alpha1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:node
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- kind: Group
name: system:nodes
- apiVersion: rbac.authorization.k8s.io/v1alpha1
kind: ClusterRoleBinding
metadata:
creationTimestamp: null
labels:
kubernetes.io/bootstrapping: rbac-defaults
name: system:node-proxier
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-proxier
subjects:
- kind: Group
name: system:nodes
kind: List
metadata: {}

View File

@ -436,6 +436,7 @@ items:
resources:
- nodes/status
verbs:
- patch
- update
- apiGroups:
- ""