mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #22744 from sdminonne/libvirt_coreos
Auto commit by PR queue bot
This commit is contained in:
commit
1460f24746
@ -46,7 +46,8 @@ for ((i=0; i < NUM_NODES; i++)) do
|
|||||||
done
|
done
|
||||||
NODE_CONTAINER_SUBNETS[$NUM_NODES]=$MASTER_CONTAINER_SUBNET
|
NODE_CONTAINER_SUBNETS[$NUM_NODES]=$MASTER_CONTAINER_SUBNET
|
||||||
|
|
||||||
SERVICE_CLUSTER_IP_RANGE=10.11.0.0/16 # formerly PORTAL_NET
|
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.11.0.0/16}" # formerly PORTAL_NET
|
||||||
|
|
||||||
|
|
||||||
# Optional: Enable node logging.
|
# Optional: Enable node logging.
|
||||||
ENABLE_NODE_LOGGING=false
|
ENABLE_NODE_LOGGING=false
|
||||||
@ -54,6 +55,6 @@ LOGGING_DESTINATION=elasticsearch
|
|||||||
|
|
||||||
# Optional: Install cluster DNS.
|
# Optional: Install cluster DNS.
|
||||||
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
ENABLE_CLUSTER_DNS="${KUBE_ENABLE_CLUSTER_DNS:-true}"
|
||||||
DNS_SERVER_IP="10.11.0.254"
|
DNS_SERVER_IP="${SERVICE_CLUSTER_IP_RANGE%.*}.254"
|
||||||
DNS_DOMAIN="cluster.local"
|
DNS_DOMAIN="cluster.local"
|
||||||
DNS_REPLICAS=1
|
DNS_REPLICAS=1
|
||||||
|
4
cluster/libvirt-coreos/namespace.yaml
Normal file
4
cluster/libvirt-coreos/namespace.yaml
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Namespace
|
||||||
|
metadata:
|
||||||
|
name: kube-system
|
10
cluster/libvirt-coreos/node-openssl.cnf
Normal file
10
cluster/libvirt-coreos/node-openssl.cnf
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
[req]
|
||||||
|
req_extensions = v3_req
|
||||||
|
distinguished_name = req_distinguished_name
|
||||||
|
[req_distinguished_name]
|
||||||
|
[ v3_req ]
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||||
|
subjectAltName = @alt_names
|
||||||
|
[alt_names]
|
||||||
|
IP.1 = $ENV::WORKER_IP
|
15
cluster/libvirt-coreos/openssl.cnf
Normal file
15
cluster/libvirt-coreos/openssl.cnf
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
[req]
|
||||||
|
req_extensions = v3_req
|
||||||
|
distinguished_name = req_distinguished_name
|
||||||
|
[req_distinguished_name]
|
||||||
|
[ v3_req ]
|
||||||
|
basicConstraints = CA:FALSE
|
||||||
|
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
|
||||||
|
subjectAltName = @alt_names
|
||||||
|
[alt_names]
|
||||||
|
DNS.1 = kubernetes
|
||||||
|
DNS.2 = kubernetes.default
|
||||||
|
DNS.3 = kubernetes.default.svc
|
||||||
|
DNS.4 = kubernetes.default.svc.cluster.local
|
||||||
|
IP.1 = $ENV::KUBERNETES_SVC
|
||||||
|
IP.2 = $ENV::MASTER_IP
|
@ -1,37 +1,129 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
name: kube-dns-v11
|
||||||
k8s-app: skydns
|
|
||||||
name: skydns
|
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
version: v11
|
||||||
|
kubernetes.io/cluster-service: \"true\"
|
||||||
spec:
|
spec:
|
||||||
replicas: ${DNS_REPLICAS}
|
replicas: ${DNS_REPLICAS}
|
||||||
selector:
|
selector:
|
||||||
k8s-app: skydns
|
k8s-app: kube-dns
|
||||||
|
version: v11
|
||||||
template:
|
template:
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
labels:
|
||||||
k8s-app: skydns
|
k8s-app: kube-dns
|
||||||
|
version: v11
|
||||||
|
kubernetes.io/cluster-service: \"true\"
|
||||||
spec:
|
spec:
|
||||||
containers:
|
containers:
|
||||||
- args:
|
- name: etcd
|
||||||
- \"/etcd\"
|
image: gcr.io/google_containers/etcd-amd64:2.2.1
|
||||||
- \"-bind-addr=127.0.0.1\"
|
resources:
|
||||||
- \"-peer-bind-addr=127.0.0.1\"
|
# TODO: Set memory limits when we've profiled the container for large
|
||||||
image: quay.io/coreos/etcd:latest
|
# clusters, then set request = limit to keep this container in
|
||||||
name: etcd
|
# guaranteed class. Currently, this container falls into the
|
||||||
- args:
|
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||||
- \"-domain=${DNS_DOMAIN}\"
|
limits:
|
||||||
image: kubernetes/kube2sky:1.0
|
cpu: 100m
|
||||||
name: kube2sky
|
memory: 500Mi
|
||||||
- args:
|
requests:
|
||||||
- \"-machines=http://localhost:4001\"
|
cpu: 100m
|
||||||
- \"-addr=0.0.0.0:53\"
|
memory: 50Mi
|
||||||
- \"-domain=${DNS_DOMAIN}.\"
|
command:
|
||||||
image: kubernetes/skydns:2014-12-23-001
|
- /usr/local/bin/etcd
|
||||||
name: skydns
|
- -data-dir
|
||||||
|
- /var/etcd/data
|
||||||
|
- -listen-client-urls
|
||||||
|
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||||
|
- -advertise-client-urls
|
||||||
|
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||||
|
- -initial-cluster-token
|
||||||
|
- skydns-etcd
|
||||||
|
volumeMounts:
|
||||||
|
- name: etcd-storage
|
||||||
|
mountPath: /var/etcd/data
|
||||||
|
- name: kube2sky
|
||||||
|
image: gcr.io/google_containers/kube2sky:1.14
|
||||||
|
resources:
|
||||||
|
# TODO: Set memory limits when we've profiled the container for large
|
||||||
|
# clusters, then set request = limit to keep this container in
|
||||||
|
# guaranteed class. Currently, this container falls into the
|
||||||
|
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
# Kube2sky watches all pods.
|
||||||
|
memory: 200Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 50Mi
|
||||||
|
livenessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: 8080
|
||||||
|
scheme: HTTP
|
||||||
|
initialDelaySeconds: 60
|
||||||
|
timeoutSeconds: 5
|
||||||
|
successThreshold: 1
|
||||||
|
failureThreshold: 5
|
||||||
|
readinessProbe:
|
||||||
|
httpGet:
|
||||||
|
path: /readiness
|
||||||
|
port: 8081
|
||||||
|
scheme: HTTP
|
||||||
|
# we poll on pod startup for the Kubernetes master service and
|
||||||
|
# only setup the /readiness HTTP server once that's available.
|
||||||
|
initialDelaySeconds: 30
|
||||||
|
timeoutSeconds: 5
|
||||||
|
args:
|
||||||
|
# command = "/kube2sky"
|
||||||
|
- --domain=${DNS_DOMAIN}
|
||||||
|
- name: skydns
|
||||||
|
image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c
|
||||||
|
resources:
|
||||||
|
# TODO: Set memory limits when we've profiled the container for large
|
||||||
|
# clusters, then set request = limit to keep this container in
|
||||||
|
# guaranteed class. Currently, this container falls into the
|
||||||
|
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||||
|
limits:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 200Mi
|
||||||
|
requests:
|
||||||
|
cpu: 100m
|
||||||
|
memory: 50Mi
|
||||||
|
args:
|
||||||
|
# command = "/skydns"
|
||||||
|
- -machines=http://127.0.0.1:4001
|
||||||
|
- -addr=0.0.0.0:53
|
||||||
|
- -ns-rotate=false
|
||||||
|
- -domain=${DNS_DOMAIN}.
|
||||||
ports:
|
ports:
|
||||||
- containerPort: 53
|
- containerPort: 53
|
||||||
name: dns
|
name: dns
|
||||||
protocol: UDP
|
protocol: UDP
|
||||||
|
- containerPort: 53
|
||||||
|
name: dns-tcp
|
||||||
|
protocol: TCP
|
||||||
|
- name: healthz
|
||||||
|
image: gcr.io/google_containers/exechealthz:1.0
|
||||||
|
resources:
|
||||||
|
# keep request = limit to keep this container in guaranteed class
|
||||||
|
limits:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
requests:
|
||||||
|
cpu: 10m
|
||||||
|
memory: 20Mi
|
||||||
|
args:
|
||||||
|
- -cmd=nslookup kubernetes.default.svc.${DNS_DOMAIN} 127.0.0.1 >/dev/null
|
||||||
|
- -port=8080
|
||||||
|
ports:
|
||||||
|
- containerPort: 8080
|
||||||
|
protocol: TCP
|
||||||
|
volumes:
|
||||||
|
- name: etcd-storage
|
||||||
|
emptyDir: {}
|
||||||
|
dnsPolicy: Default # Don't use cluster DNS.
|
||||||
|
@ -1,15 +1,20 @@
|
|||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
labels:
|
name: kube-dns
|
||||||
k8s-app: skydns
|
|
||||||
name: skydns
|
|
||||||
namespace: kube-system
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
k8s-app: kube-dns
|
||||||
|
kubernetes.io/cluster-service: \"true\"
|
||||||
|
kubernetes.io/name: \"KubeDNS\"
|
||||||
spec:
|
spec:
|
||||||
clusterIP: ${DNS_SERVER_IP}
|
|
||||||
ports:
|
|
||||||
- port: 53
|
|
||||||
protocol: UDP
|
|
||||||
targetPort: 53
|
|
||||||
selector:
|
selector:
|
||||||
k8s-app: skydns
|
k8s-app: kube-dns
|
||||||
|
clusterIP: ${DNS_SERVER_IP}
|
||||||
|
ports:
|
||||||
|
- name: dns
|
||||||
|
port: 53
|
||||||
|
protocol: UDP
|
||||||
|
- name: dns-tcp
|
||||||
|
port: 53
|
||||||
|
protocol: TCP
|
||||||
|
@ -14,13 +14,17 @@ coreos:
|
|||||||
|
|
||||||
[Service]
|
[Service]
|
||||||
ExecStart=/opt/kubernetes/bin/kube-apiserver \
|
ExecStart=/opt/kubernetes/bin/kube-apiserver \
|
||||||
--service-account-key-file=/opt/kubernetes/certs/kube-serviceaccount.key \
|
--tls-cert-file=./opt/kubernetes/certs/apiserver.pem \
|
||||||
|
--tls-private-key-file=/opt/kubernetes/certs/apiserver-key.pem \
|
||||||
|
--client-ca-file=/opt/kubernetes/certs/ca.pem \
|
||||||
|
--service-account-key-file=/opt/kubernetes/certs/apiserver-key.pem \
|
||||||
--service-account-lookup=${SERVICE_ACCOUNT_LOOKUP} \
|
--service-account-lookup=${SERVICE_ACCOUNT_LOOKUP} \
|
||||||
--admission-control=${ADMISSION_CONTROL} \
|
--admission-control=${ADMISSION_CONTROL} \
|
||||||
--insecure-bind-address=0.0.0.0 \
|
--insecure-bind-address=0.0.0.0 \
|
||||||
--insecure-port=8080 \
|
--insecure-port=8080 \
|
||||||
--etcd-servers=http://127.0.0.1:2379 \
|
--etcd-servers=http://127.0.0.1:2379 \
|
||||||
--kubelet-port=10250 \
|
--kubelet-port=10250 \
|
||||||
|
--v=4 \
|
||||||
--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}
|
--service-cluster-ip-range=${SERVICE_CLUSTER_IP_RANGE}
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=2
|
RestartSec=2
|
||||||
@ -40,7 +44,9 @@ coreos:
|
|||||||
[Service]
|
[Service]
|
||||||
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
|
ExecStart=/opt/kubernetes/bin/kube-controller-manager \
|
||||||
--master=127.0.0.1:8080 \
|
--master=127.0.0.1:8080 \
|
||||||
--service-account-private-key-file=/opt/kubernetes/certs/kube-serviceaccount.key \
|
--service-account-private-key-file=/opt/kubernetes/certs/apiserver-key.pem \
|
||||||
|
--root-ca-file=/opt/kubernetes/certs/ca.pem \
|
||||||
|
--v=4
|
||||||
Restart=always
|
Restart=always
|
||||||
RestartSec=2
|
RestartSec=2
|
||||||
|
|
||||||
|
@ -16,7 +16,10 @@ coreos:
|
|||||||
ExecStart=/opt/kubernetes/bin/kubelet \
|
ExecStart=/opt/kubernetes/bin/kubelet \
|
||||||
--address=0.0.0.0 \
|
--address=0.0.0.0 \
|
||||||
--hostname-override=${NODE_IPS[$i]} \
|
--hostname-override=${NODE_IPS[$i]} \
|
||||||
|
--cluster-domain=cluster.local \
|
||||||
--api-servers=http://${MASTER_IP}:8080 \
|
--api-servers=http://${MASTER_IP}:8080 \
|
||||||
|
--tls-cert-file=/opt/kubernetes/certs/${NODE_NAMES[$i]}-node.pem \ \
|
||||||
|
--tls-private-key-file=/opt/kubernetes/certs/${NODE_NAMES[$i]}-node-key.pem \
|
||||||
$( [[ "$ENABLE_CLUSTER_DNS" == "true" ]] && echo "--cluster-dns=${DNS_SERVER_IP}" ) \
|
$( [[ "$ENABLE_CLUSTER_DNS" == "true" ]] && echo "--cluster-dns=${DNS_SERVER_IP}" ) \
|
||||||
$( [[ "$ENABLE_CLUSTER_DNS" == "true" ]] && echo "--cluster-domain=${DNS_DOMAIN}" ) \
|
$( [[ "$ENABLE_CLUSTER_DNS" == "true" ]] && echo "--cluster-domain=${DNS_DOMAIN}" ) \
|
||||||
--config=/opt/kubernetes/manifests
|
--config=/opt/kubernetes/manifests
|
||||||
|
@ -56,16 +56,38 @@ function detect-nodes {
|
|||||||
KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}")
|
KUBE_NODE_IP_ADDRESSES=("${NODE_IPS[@]}")
|
||||||
}
|
}
|
||||||
|
|
||||||
function set_service_accounts {
|
function generate_certs {
|
||||||
SERVICE_ACCOUNT_KEY=${SERVICE_ACCOUNT_KEY:-"/tmp/kube-serviceaccount.key"}
|
node_names=("${@}")
|
||||||
# Generate ServiceAccount key if needed
|
#Root-CA
|
||||||
if [[ ! -f "${SERVICE_ACCOUNT_KEY}" ]]; then
|
tempdir=$(mktemp -d)
|
||||||
mkdir -p "$(dirname ${SERVICE_ACCOUNT_KEY})"
|
CA_KEY=${CA_KEY:-"$tempdir/ca-key.pem"}
|
||||||
openssl genrsa -out "${SERVICE_ACCOUNT_KEY}" 2048 2>/dev/null
|
CA_CERT=${CA_CERT:-"$tempdir/ca.pem"}
|
||||||
fi
|
openssl genrsa -out "${CA_KEY}" 2048 2>/dev/null
|
||||||
|
openssl req -x509 -new -nodes -key "${CA_KEY}" -days 10000 -out "${CA_CERT}" -subj "/CN=kube-ca" 2>/dev/null
|
||||||
|
|
||||||
|
#API server key pair
|
||||||
|
KUBE_KEY=${KUBE_KEY:-"$tempdir/apiserver-key.pem"}
|
||||||
|
API_SERVER_CERT_REQ=${API_SERVER_CERT_REQ:-"$tempdir/apiserver.csr"}
|
||||||
|
openssl genrsa -out "${KUBE_KEY}" 2048 2>/dev/null
|
||||||
|
KUBERNETES_SVC=${SERVICE_CLUSTER_IP_RANGE%.*}.1 openssl req -new -key "${KUBE_KEY}" -out "${API_SERVER_CERT_REQ}" -subj "/CN=kube-apiserver" -config cluster/libvirt-coreos/openssl.cnf 2>/dev/null
|
||||||
|
KUBE_CERT=${KUBE_CERT:-"$tempdir/apiserver.pem"}
|
||||||
|
KUBERNETES_SVC=${SERVICE_CLUSTER_IP_RANGE%.*}.1 openssl x509 -req -in "${API_SERVER_CERT_REQ}" -CA "${CA_CERT}" -CAkey "${CA_KEY}" -CAcreateserial -out "${KUBE_CERT}" -days 365 -extensions v3_req -extfile cluster/libvirt-coreos/openssl.cnf 2>/dev/null
|
||||||
|
|
||||||
|
#Copy apiserver and controller tsl assets
|
||||||
mkdir -p "$POOL_PATH/kubernetes/certs"
|
mkdir -p "$POOL_PATH/kubernetes/certs"
|
||||||
cp "${SERVICE_ACCOUNT_KEY}" "$POOL_PATH/kubernetes/certs"
|
cp "${KUBE_CERT}" "$POOL_PATH/kubernetes/certs"
|
||||||
|
cp "${KUBE_KEY}" "$POOL_PATH/kubernetes/certs"
|
||||||
|
cp "${CA_CERT}" "$POOL_PATH/kubernetes/certs"
|
||||||
|
|
||||||
|
#Generate nodes certificate
|
||||||
|
for (( i = 0 ; i < $NUM_NODES ; i++ )); do
|
||||||
|
openssl genrsa -out $tempdir/${node_names[$i]}-node-key.pem 2048 2>/dev/null
|
||||||
|
cp "$tempdir/${node_names[$i]}-node-key.pem" "$POOL_PATH/kubernetes/certs"
|
||||||
|
WORKER_IP=${NODE_IPS[$i]} openssl req -new -key $tempdir/${node_names[$i]}-node-key.pem -out $tempdir/${node_names[$i]}-node.csr -subj "/CN=${node_names[$i]}" -config cluster/libvirt-coreos/node-openssl.cnf 2>/dev/null
|
||||||
|
WORKER_IP=${NODE_IPS[$i]} openssl x509 -req -in $tempdir/${node_names[$i]}-node.csr -CA "${CA_CERT}" -CAkey "${CA_KEY}" -CAcreateserial -out $tempdir/${node_names[$i]}-node.pem -days 365 -extensions v3_req -extfile cluster/libvirt-coreos/node-openssl.cnf 2>/dev/null
|
||||||
|
cp "$tempdir/${node_names[$i]}-node.pem" "$POOL_PATH/kubernetes/certs"
|
||||||
|
done
|
||||||
|
echo "TLS assets generated..."
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -154,6 +176,7 @@ function initialize-pool {
|
|||||||
|
|
||||||
mkdir -p "$POOL_PATH/kubernetes/addons"
|
mkdir -p "$POOL_PATH/kubernetes/addons"
|
||||||
if [[ "$ENABLE_CLUSTER_DNS" == "true" ]]; then
|
if [[ "$ENABLE_CLUSTER_DNS" == "true" ]]; then
|
||||||
|
render-template "$ROOT/namespace.yaml" > "$POOL_PATH/kubernetes/addons/namespace.yaml"
|
||||||
render-template "$ROOT/skydns-svc.yaml" > "$POOL_PATH/kubernetes/addons/skydns-svc.yaml"
|
render-template "$ROOT/skydns-svc.yaml" > "$POOL_PATH/kubernetes/addons/skydns-svc.yaml"
|
||||||
render-template "$ROOT/skydns-rc.yaml" > "$POOL_PATH/kubernetes/addons/skydns-rc.yaml"
|
render-template "$ROOT/skydns-rc.yaml" > "$POOL_PATH/kubernetes/addons/skydns-rc.yaml"
|
||||||
fi
|
fi
|
||||||
@ -200,11 +223,11 @@ function wait-cluster-readiness {
|
|||||||
function kube-up {
|
function kube-up {
|
||||||
detect-master
|
detect-master
|
||||||
detect-nodes
|
detect-nodes
|
||||||
load-or-gen-kube-bearertoken
|
|
||||||
initialize-pool keep_base_image
|
initialize-pool keep_base_image
|
||||||
set_service_accounts
|
generate_certs "${NODE_NAMES[@]}"
|
||||||
initialize-network
|
initialize-network
|
||||||
|
|
||||||
|
|
||||||
readonly ssh_keys="$(cat ~/.ssh/*.pub | sed 's/^/ - /')"
|
readonly ssh_keys="$(cat ~/.ssh/*.pub | sed 's/^/ - /')"
|
||||||
readonly kubernetes_dir="$POOL_PATH/kubernetes"
|
readonly kubernetes_dir="$POOL_PATH/kubernetes"
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user