Merge pull request #22744 from sdminonne/libvirt_coreos

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2016-03-10 09:36:19 -08:00
commit 1460f24746
9 changed files with 203 additions and 44 deletions

View File

@ -46,7 +46,8 @@ for ((i=0; i < NUM_NODES; i++)) do
done
NODE_CONTAINER_SUBNETS[$NUM_NODES]=$MASTER_CONTAINER_SUBNET
SERVICE_CLUSTER_IP_RANGE=10.11.0.0/16 # formerly PORTAL_NET
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.11.0.0/16}" # formerly PORTAL_NET
# Optional: Enable node logging.
ENABLE_NODE_LOGGING=false
@ -54,6 +55,6 @@ LOGGING_DESTINATION=elasticsearch
# Optional: Install cluster DNS.
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
DNS_SERVER_IP="10.11.0.254"
DNS_SERVER_IP="${SERVICE_CLUSTER_IP_RANGE%.*}.254"
DNS_DOMAIN="cluster.local"
DNS_REPLICAS=1

View File

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: kube-system

View File

@ -0,0 +1,10 @@
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
IP.1 = $ENV::WORKER_IP

View File

@ -0,0 +1,15 @@
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
subjectAltName = @alt_names
[alt_names]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster.local
IP.1 = $ENV::KUBERNETES_SVC
IP.2 = $ENV::MASTER_IP

View File

@ -1,37 +1,129 @@
apiVersion: v1
kind: ReplicationController
metadata:
labels:
k8s-app: skydns
name: skydns
name: kube-dns-v11
namespace: kube-system
labels:
k8s-app: kube-dns
version: v11
kubernetes.io/cluster-service: \"true\"
spec:
replicas: ${DNS_REPLICAS}
selector:
k8s-app: skydns
k8s-app: kube-dns
version: v11
template:
metadata:
labels:
k8s-app: skydns
k8s-app: kube-dns
version: v11
kubernetes.io/cluster-service: \"true\"
spec:
containers:
- args:
- \"/etcd\"
- \"-bind-addr=127.0.0.1\"
- \"-peer-bind-addr=127.0.0.1\"
image: quay.io/coreos/etcd:latest
name: etcd
- args:
- \"-domain=${DNS_DOMAIN}\"
image: kubernetes/kube2sky:1.0
name: kube2sky
- args:
- \"-machines=http://localhost:4001\"
- \"-addr=0.0.0.0:53\"
- \"-domain=${DNS_DOMAIN}.\"
image: kubernetes/skydns:2014-12-23-001
name: skydns
- name: etcd
image: gcr.io/google_containers/etcd-amd64:2.2.1
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
memory: 500Mi
requests:
cpu: 100m
memory: 50Mi
command:
- /usr/local/bin/etcd
- -data-dir
- /var/etcd/data
- -listen-client-urls
- http://127.0.0.1:2379,http://127.0.0.1:4001
- -advertise-client-urls
- http://127.0.0.1:2379,http://127.0.0.1:4001
- -initial-cluster-token
- skydns-etcd
volumeMounts:
- name: etcd-storage
mountPath: /var/etcd/data
- name: kube2sky
image: gcr.io/google_containers/kube2sky:1.14
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
# Kube2sky watches all pods.
memory: 200Mi
requests:
cpu: 100m
memory: 50Mi
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 30
timeoutSeconds: 5
args:
# command = "/kube2sky"
- --domain=${DNS_DOMAIN}
- name: skydns
image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
cpu: 100m
memory: 200Mi
requests:
cpu: 100m
memory: 50Mi
args:
# command = "/skydns"
- -machines=http://127.0.0.1:4001
- -addr=0.0.0.0:53
- -ns-rotate=false
- -domain=${DNS_DOMAIN}.
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
- name: healthz
image: gcr.io/google_containers/exechealthz:1.0
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
args:
- -cmd=nslookup kubernetes.default.svc.${DNS_DOMAIN} 127.0.0.1 >/dev/null
- -port=8080
ports:
- containerPort: 8080
protocol: TCP
volumes:
- name: etcd-storage
emptyDir: {}
dnsPolicy: Default # Don't use cluster DNS.

View File

@ -1,15 +1,20 @@
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: skydns
name: skydns
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: \"true\"
kubernetes.io/name: \"KubeDNS\"
spec:
clusterIP: ${DNS_SERVER_IP}
ports:
- port: 53
protocol: UDP
targetPort: 53
selector:
k8s-app: skydns
k8s-app: kube-dns
clusterIP: ${DNS_SERVER_IP}
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP

View File

@ -14,13 +14,17 @@ coreos:
[Service]
ExecStart=/opt/kubernetes/bin/kube-apiserver \
--service-account-key-file=/opt/kubernetes/certs/kube-serviceaccount.key \
--tls-cert-file=./opt/kubernetes/certs/apiserver.pem \
--tls-private-key-file=/opt/kubernetes/certs/apiserver-key.pem \
--client-ca-file=/opt/kubernetes/certs/ca.pem \
--service-account-key-file=/opt/kubernetes/certs/apiserver-key.pem \
--service-account-lookup=${SERVICE_ACCOUNT_LOOKUP} \
--admission-control=${ADMISSION_CONTROL} \
--insecure-bind-address=0.0.0.0 \
--insecure-port=8080 \
--etcd-servers=http://127.0.0.1:2379 \
--kubelet-port=10250 \
--v=4 \
--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}
Restart=always
RestartSec=2
@ -40,7 +44,9 @@ coreos:
[Service]
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
--master=127.0.0.1:8080 \
--service-account-private-key-file=/opt/kubernetes/certs/kube-serviceaccount.key \
--service-account-private-key-file=/opt/kubernetes/certs/apiserver-key.pem \
--root-ca-file=/opt/kubernetes/certs/ca.pem \
--v=4
Restart=always
RestartSec=2

View File

@ -16,7 +16,10 @@ coreos:
ExecStart=/opt/kubernetes/bin/kubelet \
--address=0.0.0.0 \
--hostname-override=${NODE_IPS[$i]} \
--cluster-domain=cluster.local \
--api-servers=http://${MASTER_IP}:8080 \
--tls-cert-file=/opt/kubernetes/certs/${NODE_NAMES[$i]}-node.pem \ \
--tls-private-key-file=/opt/kubernetes/certs/${NODE_NAMES[$i]}-node-key.pem \
$( [[ "$ENABLE_CLUSTER_DNS" == "true" ]] && echo "--cluster-dns=${DNS_SERVER_IP}" ) \
$( [[ "$ENABLE_CLUSTER_DNS" == "true" ]] && echo "--cluster-domain=${DNS_DOMAIN}" ) \
--config=/opt/kubernetes/manifests

View File

@ -56,16 +56,38 @@ function detect-nodes {
KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}")
}
function set_service_accounts {
SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-"/tmp/kube-serviceaccount.key"}
# Generate ServiceAccount key if needed
if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
mkdir -p "$(dirname ${SERVICE_ACCOUNT_KEY})"
openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
fi
function generate_certs {
node_names=("${@}")
#Root-CA
tempdir=$(mktemp -d)
CA_KEY=${CA_KEY:-"$tempdir/ca-key.pem"}
CA_CERT=${CA_CERT:-"$tempdir/ca.pem"}
openssl genrsa -out "${CA_KEY}" 2048 2>/dev/null
openssl req -x509 -new -nodes -key "${CA_KEY}" -days 10000 -out "${CA_CERT}" -subj "/CN=kube-ca" 2>/dev/null
#API server key pair
KUBE_KEY=${KUBE_KEY:-"$tempdir/apiserver-key.pem"}
API_SERVER_CERT_REQ=${API_SERVER_CERT_REQ:-"$tempdir/apiserver.csr"}
openssl genrsa -out "${KUBE_KEY}" 2048 2>/dev/null
KUBERNETES_SVC=${SERVICE_CLUSTER_IP_RANGE%.*}.1 openssl req -new -key "${KUBE_KEY}" -out "${API_SERVER_CERT_REQ}" -subj "/CN=kube-apiserver" -config cluster/libvirt-coreos/openssl.cnf 2>/dev/null
KUBE_CERT=${KUBE_CERT:-"$tempdir/apiserver.pem"}
KUBERNETES_SVC=${SERVICE_CLUSTER_IP_RANGE%.*}.1 openssl x509 -req -in "${API_SERVER_CERT_REQ}" -CA "${CA_CERT}" -CAkey "${CA_KEY}" -CAcreateserial -out "${KUBE_CERT}" -days 365 -extensions v3_req -extfile cluster/libvirt-coreos/openssl.cnf 2>/dev/null
#Copy apiserver and controller tsl assets
mkdir -p "$POOL_PATH/kubernetes/certs"
cp "${SERVICE_ACCOUNT_KEY}" "$POOL_PATH/kubernetes/certs"
cp "${KUBE_CERT}" "$POOL_PATH/kubernetes/certs"
cp "${KUBE_KEY}" "$POOL_PATH/kubernetes/certs"
cp "${CA_CERT}" "$POOL_PATH/kubernetes/certs"
#Generate nodes certificate
for (( i = 0 ; i < $NUM_NODES ; i++ )); do
openssl genrsa -out $tempdir/${node_names[$i]}-node-key.pem 2048 2>/dev/null
cp "$tempdir/${node_names[$i]}-node-key.pem" "$POOL_PATH/kubernetes/certs"
WORKER_IP=${NODE_IPS[$i]} openssl req -new -key $tempdir/${node_names[$i]}-node-key.pem -out $tempdir/${node_names[$i]}-node.csr -subj "/CN=${node_names[$i]}" -config cluster/libvirt-coreos/node-openssl.cnf 2>/dev/null
WORKER_IP=${NODE_IPS[$i]} openssl x509 -req -in $tempdir/${node_names[$i]}-node.csr -CA "${CA_CERT}" -CAkey "${CA_KEY}" -CAcreateserial -out $tempdir/${node_names[$i]}-node.pem -days 365 -extensions v3_req -extfile cluster/libvirt-coreos/node-openssl.cnf 2>/dev/null
cp "$tempdir/${node_names[$i]}-node.pem" "$POOL_PATH/kubernetes/certs"
done
echo "TLS assets generated..."
}
@ -154,6 +176,7 @@ function initialize-pool {
mkdir -p "$POOL_PATH/kubernetes/addons"
if [[ "$ENABLE_CLUSTER_DNS" == "true" ]]; then
render-template "$ROOT/namespace.yaml" > "$POOL_PATH/kubernetes/addons/namespace.yaml"
render-template "$ROOT/skydns-svc.yaml" > "$POOL_PATH/kubernetes/addons/skydns-svc.yaml"
render-template "$ROOT/skydns-rc.yaml" > "$POOL_PATH/kubernetes/addons/skydns-rc.yaml"
fi
@ -200,11 +223,11 @@ function wait-cluster-readiness {
function kube-up {
detect-master
detect-nodes
load-or-gen-kube-bearertoken
initialize-pool keep_base_image
set_service_accounts
generate_certs "${NODE_NAMES[@]}"
initialize-network
readonly ssh_keys="$(cat ~/.ssh/*.pub | sed 's/^/ - /')"
readonly kubernetes_dir="$POOL_PATH/kubernetes"