mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 11:21:47 +00:00
Merge pull request #78246 from mborsz/kubemark
Migrate kubemark to e2e-up/e2e-down scripts
This commit is contained in:
commit
79aab0b90d
@ -23,92 +23,22 @@
|
||||
|
||||
source "${KUBE_ROOT}/cluster/gce/config-common.sh"
|
||||
|
||||
GCLOUD=gcloud
|
||||
ZONE=${KUBE_GCE_ZONE:-us-central1-b}
|
||||
REGION=${ZONE%-*}
|
||||
NUM_NODES=${KUBEMARK_NUM_NODES:-10}
|
||||
NUM_WINDOWS_NODES=${KUBEMARK_NUM_WINDOWS_NODES:-0}
|
||||
MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-n1-standard-$(get-master-size)}
|
||||
MASTER_DISK_TYPE=pd-ssd
|
||||
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)}
|
||||
MASTER_ROOT_DISK_SIZE=${KUBEMARK_MASTER_ROOT_DISK_SIZE:-$(get-master-root-disk-size)}
|
||||
REGISTER_MASTER_KUBELET=${REGISTER_MASTER:-false}
|
||||
PREEMPTIBLE_NODE=${PREEMPTIBLE_NODE:-false}
|
||||
NODE_ACCELERATORS=${NODE_ACCELERATORS:-""}
|
||||
CREATE_CUSTOM_NETWORK=${CREATE_CUSTOM_NETWORK:-false}
|
||||
EVENT_PD=${EVENT_PD:-false}
|
||||
|
||||
MASTER_OS_DISTRIBUTION=${KUBE_MASTER_OS_DISTRIBUTION:-gci}
|
||||
NODE_OS_DISTRIBUTION=${KUBE_NODE_OS_DISTRIBUTION:-gci}
|
||||
MASTER_IMAGE=${KUBE_GCE_MASTER_IMAGE:-cos-beta-73-11647-64-0}
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-cos-cloud}
|
||||
CLEANUP_KUBEMARK_IMAGE=${CLEANUP_KUBEMARK_IMAGE:-true}
|
||||
|
||||
# GPUs supported in GCE do not have compatible drivers in Debian 7.
|
||||
if [[ "${NODE_OS_DISTRIBUTION}" == "debian" ]]; then
|
||||
NODE_ACCELERATORS=""
|
||||
fi
|
||||
|
||||
NETWORK=${KUBE_GCE_NETWORK:-e2e}
|
||||
if [[ "${CREATE_CUSTOM_NETWORK}" == true ]]; then
|
||||
SUBNETWORK="${SUBNETWORK:-${NETWORK}-custom-subnet}"
|
||||
fi
|
||||
INSTANCE_PREFIX="${INSTANCE_PREFIX:-"default"}"
|
||||
MASTER_NAME="${INSTANCE_PREFIX}-kubemark-master"
|
||||
AGGREGATOR_MASTER_NAME="${INSTANCE_PREFIX}-kubemark-aggregator"
|
||||
MASTER_TAG="kubemark-master"
|
||||
EVENT_STORE_NAME="${INSTANCE_PREFIX}-event-store"
|
||||
MASTER_IP_RANGE="${MASTER_IP_RANGE:-10.246.0.0/24}"
|
||||
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-$(get-cluster-ip-range)}"
|
||||
RUNTIME_CONFIG="${KUBE_RUNTIME_CONFIG:-}"
|
||||
TERMINATED_POD_GC_THRESHOLD=${TERMINATED_POD_GC_THRESHOLD:-100}
|
||||
KUBE_APISERVER_REQUEST_TIMEOUT=300
|
||||
ETCD_COMPACTION_INTERVAL_SEC="${KUBEMARK_ETCD_COMPACTION_INTERVAL_SEC:-}"
|
||||
|
||||
# Set etcd image (e.g. k8s.gcr.io/etcd) and version (e.g. 3.1.12-1) if you need
|
||||
# non-default version.
|
||||
ETCD_IMAGE="${TEST_ETCD_IMAGE:-}"
|
||||
ETCD_VERSION="${TEST_ETCD_VERSION:-}"
|
||||
ETCD_SERVERS="${KUBEMARK_ETCD_SERVERS:-}"
|
||||
ETCD_SERVERS_OVERRIDES="${KUBEMARK_ETCD_SERVERS_OVERRIDES:-}"
|
||||
|
||||
# Storage backend. 'etcd2' and 'etcd3' are supported.
|
||||
STORAGE_BACKEND=${STORAGE_BACKEND:-}
|
||||
# Storage media type: application/json and application/vnd.kubernetes.protobuf are supported.
|
||||
STORAGE_MEDIA_TYPE=${STORAGE_MEDIA_TYPE:-}
|
||||
|
||||
# Default Log level for all components in test clusters and variables to override it in specific components.
|
||||
TEST_CLUSTER_LOG_LEVEL="${TEST_CLUSTER_LOG_LEVEL:---v=4}"
|
||||
API_SERVER_TEST_LOG_LEVEL="${API_SERVER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
|
||||
CONTROLLER_MANAGER_TEST_LOG_LEVEL="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
|
||||
SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
|
||||
# NUM_NODES is used by start-kubemark.sh to determine a correct number of replicas.
|
||||
NUM_NODES=${KUBEMARK_NUM_NODES:-10}
|
||||
NUM_WINDOWS_NODES=${KUBEMARK_NUM_WINDOWS_NODES:-0}
|
||||
|
||||
HOLLOW_KUBELET_TEST_LOG_LEVEL="${HOLLOW_KUBELET_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
|
||||
HOLLOW_PROXY_TEST_LOG_LEVEL="${HOLLOW_PROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}"
|
||||
|
||||
TEST_CLUSTER_DELETE_COLLECTION_WORKERS="${TEST_CLUSTER_DELETE_COLLECTION_WORKERS:---delete-collection-workers=16}"
|
||||
TEST_CLUSTER_MAX_REQUESTS_INFLIGHT="${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT:-}"
|
||||
TEST_CLUSTER_RESYNC_PERIOD="${TEST_CLUSTER_RESYNC_PERIOD:-}"
|
||||
|
||||
# ContentType used by all components to communicate with apiserver.
|
||||
TEST_CLUSTER_API_CONTENT_TYPE="${TEST_CLUSTER_API_CONTENT_TYPE:-}"
|
||||
|
||||
KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS="${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS:-}"
|
||||
|
||||
CUSTOM_ADMISSION_PLUGINS="${CUSTOM_ADMISSION_PLUGINS:-NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,Priority,StorageObjectInUseProtection,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota}"
|
||||
|
||||
# Master components' test arguments.
|
||||
APISERVER_TEST_ARGS="${KUBEMARK_APISERVER_TEST_ARGS:-} --runtime-config=extensions/v1beta1,scheduling.k8s.io/v1alpha1 ${API_SERVER_TEST_LOG_LEVEL} ${TEST_CLUSTER_MAX_REQUESTS_INFLIGHT} ${TEST_CLUSTER_DELETE_COLLECTION_WORKERS}"
|
||||
CONTROLLER_MANAGER_TEST_ARGS="${KUBEMARK_CONTROLLER_MANAGER_TEST_ARGS:-} ${CONTROLLER_MANAGER_TEST_LOG_LEVEL} ${TEST_CLUSTER_RESYNC_PERIOD} ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS}"
|
||||
SCHEDULER_TEST_ARGS="${KUBEMARK_SCHEDULER_TEST_ARGS:-} ${SCHEDULER_TEST_LOG_LEVEL} ${TEST_CLUSTER_API_CONTENT_TYPE} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS}"
|
||||
|
||||
# Hollow-node components' test arguments.
|
||||
HOLLOW_KUBELET_TEST_ARGS="${HOLLOW_KUBELET_TEST_ARGS:-} ${HOLLOW_KUBELET_TEST_LOG_LEVEL}"
|
||||
HOLLOW_PROXY_TEST_ARGS="${HOLLOW_PROXY_TEST_ARGS:-} ${HOLLOW_PROXY_TEST_LOG_LEVEL}"
|
||||
|
||||
SERVICE_CLUSTER_IP_RANGE="10.0.0.0/16" # formerly PORTAL_NET
|
||||
ALLOCATE_NODE_CIDRS=true
|
||||
|
||||
# Optional: Enable cluster autoscaler.
|
||||
ENABLE_KUBEMARK_CLUSTER_AUTOSCALER="${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-false}"
|
||||
# When using Cluster Autoscaler, always start with one hollow-node replica.
|
||||
@ -126,12 +56,4 @@ fi
|
||||
ENABLE_KUBEMARK_KUBE_DNS="${ENABLE_KUBEMARK_KUBE_DNS:-true}"
|
||||
KUBE_DNS_DOMAIN="${KUBE_DNS_DOMAIN:-cluster.local}"
|
||||
|
||||
# Optional: set feature gates
|
||||
FEATURE_GATES="${KUBE_FEATURE_GATES:-ExperimentalCriticalPodAnnotation=true}"
|
||||
|
||||
# Enable a simple "AdvancedAuditing" setup for testing.
|
||||
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-false}"
|
||||
|
||||
# The number of services that are allowed to sync concurrently. Will be passed
|
||||
# into kube-controller-manager via `--concurrent-service-syncs`
|
||||
CONCURRENT_SERVICE_SYNCS="${CONCURRENT_SERVICE_SYNCS:-}"
|
||||
CLEANUP_KUBEMARK_IMAGE=false
|
||||
|
@ -31,127 +31,65 @@ function authenticate-docker {
|
||||
gcloud beta auth configure-docker -q
|
||||
}
|
||||
|
||||
# This function isn't too robust to race, but that should be ok given its one-off usage during setup.
|
||||
function get-or-create-master-ip {
|
||||
MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
|
||||
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)') 2>/dev/null || true
|
||||
function create-kubemark-master {
|
||||
# We intentionally override env vars in subshell to preserve original values.
|
||||
# shellcheck disable=SC2030,SC2031
|
||||
(
|
||||
export KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark"
|
||||
export CLUSTER_NAME="${CLUSTER_NAME}-kubemark"
|
||||
export KUBE_CREATE_NODES=false
|
||||
export KUBE_GCE_INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX}-kubemark"
|
||||
|
||||
if [[ -z "${MASTER_IP:-}" ]]; then
|
||||
run-gcloud-compute-with-retries addresses create "${MASTER_NAME}-ip" \
|
||||
--project "${PROJECT}" \
|
||||
--region "${REGION}" -q
|
||||
# Even if the "real cluster" is private, we shouldn't manage cloud nat.
|
||||
export KUBE_GCE_PRIVATE_CLUSTER=false
|
||||
|
||||
MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
|
||||
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
|
||||
fi
|
||||
# Quite tricky cidr setup: we set KUBE_GCE_ENABLE_IP_ALIASES=true to avoid creating
|
||||
# cloud routes and RangeAllocator to assign cidrs by kube-controller-manager.
|
||||
export KUBE_GCE_ENABLE_IP_ALIASES=true
|
||||
export KUBE_GCE_NODE_IPAM_MODE=RangeAllocator
|
||||
|
||||
# Disable all addons. They are running outside of the kubemark cluster.
|
||||
export KUBE_ENABLE_CLUSTER_AUTOSCALER=false
|
||||
export KUBE_ENABLE_CLUSTER_DNS=false
|
||||
export KUBE_ENABLE_NODE_LOGGING=false
|
||||
export KUBE_ENABLE_METRICS_SERVER=false
|
||||
export KUBE_ENABLE_CLUSTER_MONITORING="none"
|
||||
export KUBE_ENABLE_L7_LOADBALANCING="none"
|
||||
|
||||
# Unset env variables set by kubetest for 'root cluster'. We need recompute them
|
||||
# for kubemark master.
|
||||
# TODO(mborsz): Figure out some better way to filter out such env variables than
|
||||
# listing them here.
|
||||
unset MASTER_SIZE MASTER_DISK_SIZE MASTER_ROOT_DISK_SIZE
|
||||
|
||||
# Set kubemark-specific overrides:
|
||||
# for each defined env KUBEMARK_X=Y call export X=Y.
|
||||
for var in ${!KUBEMARK_*}; do
|
||||
dst_var=${var#KUBEMARK_}
|
||||
val=${!var}
|
||||
echo "Setting ${dst_var} to '${val}'"
|
||||
export "${dst_var}"="${val}"
|
||||
done
|
||||
|
||||
# Append kubemark-specific "MASTER_COMPONENTS_QPS_LIMITS".
|
||||
# TODO(mborsz): Migrate all users of KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS to {SCHEDULER,CONTROLLER_MANAGER}_TEST_ARGS.
|
||||
export CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS:-}"
|
||||
export SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-} ${KUBEMARK_MASTER_COMPONENTS_QPS_LIMITS:-}"
|
||||
|
||||
"${KUBE_ROOT}/hack/e2e-internal/e2e-up.sh"
|
||||
)
|
||||
}
|
||||
|
||||
function create-master-instance-with-resources {
|
||||
GCLOUD_COMMON_ARGS=(--project "${PROJECT}" --zone "${ZONE}")
|
||||
# Override the master image project to cos-cloud for COS images staring with `cos` string prefix.
|
||||
DEFAULT_GCI_PROJECT=google-containers
|
||||
if [[ "${GCI_VERSION}" == "cos"* ]]; then
|
||||
DEFAULT_GCI_PROJECT=cos-cloud
|
||||
fi
|
||||
MASTER_IMAGE_PROJECT=${KUBE_GCE_MASTER_PROJECT:-${DEFAULT_GCI_PROJECT}}
|
||||
function delete-kubemark-master {
|
||||
# We intentionally override env vars in subshell to preserve original values.
|
||||
# shellcheck disable=SC2030,SC2031
|
||||
(
|
||||
export CLUSTER_NAME="${CLUSTER_NAME}-kubemark"
|
||||
export KUBE_GCE_INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX}-kubemark"
|
||||
|
||||
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-pd" \
|
||||
"${GCLOUD_COMMON_ARGS[@]}" \
|
||||
--type "${MASTER_DISK_TYPE}" \
|
||||
--size "${MASTER_DISK_SIZE}" &
|
||||
|
||||
if [ "${EVENT_PD:-}" == "true" ]; then
|
||||
run-gcloud-compute-with-retries disks create "${MASTER_NAME}-event-pd" \
|
||||
"${GCLOUD_COMMON_ARGS[@]}" \
|
||||
--type "${MASTER_DISK_TYPE}" \
|
||||
--size "${MASTER_DISK_SIZE}" &
|
||||
fi
|
||||
|
||||
get-or-create-master-ip &
|
||||
|
||||
wait
|
||||
|
||||
run-gcloud-compute-with-retries instances create "${MASTER_NAME}" \
|
||||
"${GCLOUD_COMMON_ARGS[@]}" \
|
||||
--address "${MASTER_IP}" \
|
||||
--machine-type "${MASTER_SIZE}" \
|
||||
--image-project="${MASTER_IMAGE_PROJECT}" \
|
||||
--image "${MASTER_IMAGE}" \
|
||||
--tags "${MASTER_TAG}" \
|
||||
--subnet "${SUBNETWORK:-${NETWORK}}" \
|
||||
--scopes "storage-ro,logging-write" \
|
||||
--boot-disk-size "${MASTER_ROOT_DISK_SIZE}" \
|
||||
--disk "name=${MASTER_NAME}-pd,device-name=master-pd,mode=rw,boot=no,auto-delete=no"
|
||||
|
||||
run-gcloud-compute-with-retries instances add-metadata "${MASTER_NAME}" \
|
||||
"${GCLOUD_COMMON_ARGS[@]}" \
|
||||
--metadata-from-file startup-script="${KUBE_ROOT}/test/kubemark/resources/start-kubemark-master.sh" &
|
||||
|
||||
if [ "${EVENT_PD:-}" == "true" ]; then
|
||||
echo "Attaching ${MASTER_NAME}-event-pd to ${MASTER_NAME}"
|
||||
run-gcloud-compute-with-retries instances attach-disk "${MASTER_NAME}" \
|
||||
"${GCLOUD_COMMON_ARGS[@]}" \
|
||||
--disk "${MASTER_NAME}-event-pd" \
|
||||
--device-name="master-event-pd" &
|
||||
fi
|
||||
|
||||
run-gcloud-compute-with-retries firewall-rules create "${MASTER_NAME}-https" \
|
||||
--project "${PROJECT}" \
|
||||
--network "${NETWORK}" \
|
||||
--source-ranges "0.0.0.0/0" \
|
||||
--target-tags "${MASTER_TAG}" \
|
||||
--allow "tcp:443" &
|
||||
|
||||
run-gcloud-compute-with-retries firewall-rules create "${MASTER_NAME}-internal" \
|
||||
--project "${PROJECT}" \
|
||||
--network "${NETWORK}" \
|
||||
--source-ranges "10.0.0.0/8" \
|
||||
--target-tags "${MASTER_TAG}" \
|
||||
--allow "tcp:1-2379,tcp:2382-65535,udp:1-65535,icmp" &
|
||||
|
||||
wait
|
||||
}
|
||||
|
||||
# Command to be executed is '$1'.
|
||||
# No. of retries is '$2' (if provided) or 1 (default).
|
||||
function execute-cmd-on-master-with-retries() {
|
||||
RETRIES="${2:-1}" run-gcloud-compute-with-retries ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" --command="$1"
|
||||
}
|
||||
|
||||
function copy-files() {
|
||||
run-gcloud-compute-with-retries scp --recurse --zone="${ZONE}" --project="${PROJECT}" "$@"
|
||||
}
|
||||
|
||||
function delete-master-instance-and-resources {
|
||||
GCLOUD_COMMON_ARGS=(--project "${PROJECT}" --zone "${ZONE}" --quiet)
|
||||
|
||||
gcloud compute instances delete "${MASTER_NAME}" \
|
||||
"${GCLOUD_COMMON_ARGS[@]}" || true
|
||||
|
||||
gcloud compute disks delete "${MASTER_NAME}-pd" \
|
||||
"${GCLOUD_COMMON_ARGS[@]}" || true
|
||||
|
||||
gcloud compute disks delete "${MASTER_NAME}-event-pd" \
|
||||
"${GCLOUD_COMMON_ARGS[@]}" &> /dev/null || true
|
||||
|
||||
gcloud compute addresses delete "${MASTER_NAME}-ip" \
|
||||
--project "${PROJECT}" \
|
||||
--region "${REGION}" \
|
||||
--quiet || true
|
||||
|
||||
gcloud compute firewall-rules delete "${MASTER_NAME}-https" \
|
||||
--project "${PROJECT}" \
|
||||
--quiet || true
|
||||
|
||||
gcloud compute firewall-rules delete "${MASTER_NAME}-internal" \
|
||||
--project "${PROJECT}" \
|
||||
--quiet || true
|
||||
|
||||
if [ "${SEPARATE_EVENT_MACHINE:-false}" == "true" ]; then
|
||||
gcloud compute instances delete "${EVENT_STORE_NAME}" \
|
||||
"${GCLOUD_COMMON_ARGS[@]}" || true
|
||||
|
||||
gcloud compute disks delete "${EVENT_STORE_NAME}-pd" \
|
||||
"${GCLOUD_COMMON_ARGS[@]}" || true
|
||||
fi
|
||||
export KUBE_DELETE_NETWORK=false
|
||||
# Even if the "real cluster" is private, we shouldn't manage cloud nat.
|
||||
export KUBE_GCE_PRIVATE_CLUSTER=false
|
||||
"${KUBE_ROOT}/hack/e2e-internal/e2e-down.sh"
|
||||
)
|
||||
}
|
||||
|
@ -1,51 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: etcd-server-events
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeName: {{instance_prefix}}-master
|
||||
containers:
|
||||
- name: etcd-container
|
||||
image: {{kube_docker_registry}}/etcd:{{etcd_image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/etcd
|
||||
{{params}}
|
||||
1>>/var/log/etcd-events.log 2>&1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 4002
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
ports:
|
||||
- name: serverport
|
||||
containerPort: 2381
|
||||
hostPort: 2381
|
||||
protocol: TCP
|
||||
- name: clientport
|
||||
containerPort: 4002
|
||||
hostPort: 4002
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: varetcd
|
||||
mountPath: /var/etcd
|
||||
- name: varlogetcd
|
||||
mountPath: /var/log/etcd-events.log
|
||||
volumes:
|
||||
- name: varetcd
|
||||
hostPath:
|
||||
path: /var/etcd/events
|
||||
- name: varlogetcd
|
||||
hostPath:
|
||||
path: /var/log/etcd-events.log
|
||||
type: FileOrCreate
|
@ -1,50 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: etcd-server
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: etcd-container
|
||||
image: {{kube_docker_registry}}/etcd:{{etcd_image}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/etcd
|
||||
{{params}}
|
||||
1>>/var/log/etcd.log 2>&1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /health
|
||||
port: 2379
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
ports:
|
||||
- name: serverport
|
||||
containerPort: 2380
|
||||
hostPort: 2380
|
||||
protocol: TCP
|
||||
- name: clientport
|
||||
containerPort: 2379
|
||||
hostPort: 2379
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
- name: varetcd
|
||||
mountPath: /var/etcd
|
||||
- name: varlogetcd
|
||||
mountPath: /var/log/etcd.log
|
||||
volumes:
|
||||
- name: varetcd
|
||||
hostPath:
|
||||
path: /var/etcd
|
||||
- name: varlogetcd
|
||||
hostPath:
|
||||
path: /var/log/etcd.log
|
||||
type: FileOrCreate
|
@ -1,70 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-apiserver
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-apiserver
|
||||
image: {{kube_docker_registry}}/kube-apiserver:{{kube-apiserver_docker_tag}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/kube-apiserver
|
||||
{{params}}
|
||||
1>>/var/log/kube-apiserver.log 2>&1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
ports:
|
||||
- name: https
|
||||
containerPort: 443
|
||||
hostPort: 443
|
||||
protocol: TCP
|
||||
- name: local
|
||||
containerPort: 8080
|
||||
hostPort: 8080
|
||||
protocol: TCP
|
||||
volumeMounts:
|
||||
{{audit_policy_config_mount}}
|
||||
- name: srvkube
|
||||
mountPath: /etc/srv/kubernetes
|
||||
readOnly: true
|
||||
- name: logfile
|
||||
mountPath: /var/log/kube-apiserver.log
|
||||
- name: etcssl
|
||||
mountPath: /etc/ssl
|
||||
readOnly: true
|
||||
- name: usrsharecacerts
|
||||
mountPath: /usr/share/ca-certificates
|
||||
readOnly: true
|
||||
- name: srvsshproxy
|
||||
mountPath: /etc/srv/sshproxy
|
||||
volumes:
|
||||
{{audit_policy_config_volume}}
|
||||
- name: srvkube
|
||||
hostPath:
|
||||
path: /etc/srv/kubernetes
|
||||
- name: logfile
|
||||
hostPath:
|
||||
path: /var/log/kube-apiserver.log
|
||||
type: FileOrCreate
|
||||
- name: etcssl
|
||||
hostPath:
|
||||
path: /etc/ssl
|
||||
- name: usrsharecacerts
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
||||
- name: srvsshproxy
|
||||
hostPath:
|
||||
path: /etc/srv/sshproxy
|
@ -1,54 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-controller-manager
|
||||
image: {{kube_docker_registry}}/kube-controller-manager:{{kube-controller-manager_docker_tag}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 200m
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/kube-controller-manager
|
||||
{{params}}
|
||||
1>>/var/log/kube-controller-manager.log 2>&1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10252
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
volumeMounts:
|
||||
- name: srvkube
|
||||
mountPath: /etc/srv/kubernetes
|
||||
readOnly: true
|
||||
- name: logfile
|
||||
mountPath: /var/log/kube-controller-manager.log
|
||||
- name: etcssl
|
||||
mountPath: /etc/ssl
|
||||
readOnly: true
|
||||
- name: usrsharecacerts
|
||||
mountPath: /usr/share/ca-certificates
|
||||
readOnly: true
|
||||
volumes:
|
||||
- name: srvkube
|
||||
hostPath:
|
||||
path: /etc/srv/kubernetes
|
||||
- name: logfile
|
||||
hostPath:
|
||||
path: /var/log/kube-controller-manager.log
|
||||
type: FileOrCreate
|
||||
- name: etcssl
|
||||
hostPath:
|
||||
path: /etc/ssl
|
||||
- name: usrsharecacerts
|
||||
hostPath:
|
||||
path: /usr/share/ca-certificates
|
@ -1,43 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
namespace: kube-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeName: {{instance_prefix}}-master
|
||||
containers:
|
||||
- name: kube-scheduler
|
||||
image: {{kube_docker_registry}}/kube-scheduler:{{kube-scheduler_docker_tag}}
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- /usr/local/bin/kube-scheduler
|
||||
{{params}}
|
||||
1>>/var/log/kube-scheduler.log 2>&1
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10251
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
volumeMounts:
|
||||
- name: srvkube
|
||||
mountPath: /etc/srv/kubernetes
|
||||
readOnly: true
|
||||
- name: logfile
|
||||
mountPath: /var/log/kube-scheduler.log
|
||||
volumes:
|
||||
- name: srvkube
|
||||
hostPath:
|
||||
path: /etc/srv/kubernetes
|
||||
- name: logfile
|
||||
hostPath:
|
||||
path: /var/log/kube-scheduler.log
|
||||
type: FileOrCreate
|
@ -24,55 +24,15 @@ function authenticate-docker {
|
||||
echo "Configuring registry authentication" 1>&2
|
||||
}
|
||||
|
||||
# This function should get master IP address (creating one if needed).
|
||||
# ENV vars that should be defined by the end of this function:
|
||||
# - MASTER_IP
|
||||
#
|
||||
# Recommended for this function to include retrying logic in case of failures.
|
||||
function get-or-create-master-ip {
|
||||
echo "MASTER_IP: $MASTER_IP" 1>&2
|
||||
# This function should create kubemark master and write kubeconfig to
|
||||
# "${RESOURCE_DIRECTORY}/kubeconfig.kubemark".
|
||||
function create-kubemark-master {
|
||||
echo "Creating cluster..."
|
||||
}
|
||||
|
||||
# This function should create a machine instance for the master along
|
||||
# with any/all of the following resources:
|
||||
# - Attach a PD to the master (optionally 1 more for storing events)
|
||||
# - A public IP address for the master ($MASTER_IP)
|
||||
# - A network firewall rule allowing all TCP traffic on port 443 in master
|
||||
# Note: This step is compulsory in order for kubemark to work
|
||||
#
|
||||
# ENV vars that should be defined by the end of this function:
|
||||
# - MASTER_NAME
|
||||
#
|
||||
# Recommended for this function to include retrying logic for the above
|
||||
# operations in case of failures.
|
||||
function create-master-instance-with-resources {
|
||||
echo "MASTER_IP: $MASTER_IP" 1>&2
|
||||
echo "MASTER_NAME: $MASTER_NAME" 1>&2
|
||||
}
|
||||
|
||||
# This function should execute the command('$1') on the master machine
|
||||
# (possibly through SSH), retrying in case of failure. The allowed number of
|
||||
# retries would be '$2' (if not provided, default to single try).
|
||||
function execute-cmd-on-master-with-retries() {
|
||||
echo "Executing command on the master" 1>&2
|
||||
}
|
||||
|
||||
# This function should act as an scp for the kubemark cluster, which copies
|
||||
# the files given by the first n-1 arguments to the remote location given
|
||||
# by the n^th argument.
|
||||
#
|
||||
# Recommended for this function to include retrying logic in case of failures.
|
||||
function copy-files() {
|
||||
echo "Copying files" 1>&2
|
||||
}
|
||||
|
||||
# This function should delete the master instance along with all the
|
||||
# resources that have been allocated inside the function
|
||||
# 'create-master-instance-with-resources' above.
|
||||
#
|
||||
# Recommended for this function to include retrying logic in case of failures.
|
||||
function delete-master-instance-and-resources {
|
||||
echo "Deleting master instance and its allocated resources" 1>&2
|
||||
# This function should delete kubemark master.
|
||||
function delete-kubemark-master {
|
||||
echo "Deleting cluster..."
|
||||
}
|
||||
|
||||
# Common colors used throughout the kubemark scripts
|
||||
|
@ -34,174 +34,15 @@ fi
|
||||
|
||||
source "${KUBE_ROOT}/cluster/kubemark/util.sh"
|
||||
|
||||
# hack/lib/init.sh will ovewrite ETCD_VERSION if this is unset
|
||||
# what what is default in hack/lib/etcd.sh
|
||||
# To avoid it, if it is empty, we set it to 'avoid-overwrite' and
|
||||
# clean it after that.
|
||||
if [ -z "${ETCD_VERSION:-}" ]; then
|
||||
ETCD_VERSION="avoid-overwrite"
|
||||
fi
|
||||
source "${KUBE_ROOT}/hack/lib/init.sh"
|
||||
if [ "${ETCD_VERSION:-}" == "avoid-overwrite" ]; then
|
||||
ETCD_VERSION=""
|
||||
fi
|
||||
|
||||
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
|
||||
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
|
||||
LOCAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark"
|
||||
|
||||
# Generate a random 6-digit alphanumeric tag for the kubemark image.
|
||||
# Used to uniquify image builds across different invocations of this script.
|
||||
KUBEMARK_IMAGE_TAG=$(head /dev/urandom | tr -dc 'a-z0-9' | fold -w 6 | head -n 1)
|
||||
|
||||
# Write all environment variables that we need to pass to the kubemark master,
|
||||
# locally to the file ${RESOURCE_DIRECTORY}/kubemark-master-env.sh.
|
||||
function create-master-environment-file {
|
||||
cat > "${RESOURCE_DIRECTORY}/kubemark-master-env.sh" <<EOF
|
||||
# Generic variables.
|
||||
INSTANCE_PREFIX="${INSTANCE_PREFIX:-}"
|
||||
SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-}"
|
||||
EVENT_PD="${EVENT_PD:-}"
|
||||
|
||||
# Etcd related variables.
|
||||
ETCD_IMAGE="${ETCD_IMAGE:-3.3.10-1}"
|
||||
ETCD_VERSION="${ETCD_VERSION:-}"
|
||||
|
||||
# Controller-manager related variables.
|
||||
CONTROLLER_MANAGER_TEST_ARGS="${CONTROLLER_MANAGER_TEST_ARGS:-}"
|
||||
ALLOCATE_NODE_CIDRS="${ALLOCATE_NODE_CIDRS:-}"
|
||||
CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-}"
|
||||
TERMINATED_POD_GC_THRESHOLD="${TERMINATED_POD_GC_THRESHOLD:-}"
|
||||
|
||||
# Scheduler related variables.
|
||||
SCHEDULER_TEST_ARGS="${SCHEDULER_TEST_ARGS:-}"
|
||||
|
||||
# Apiserver related variables.
|
||||
APISERVER_TEST_ARGS="${APISERVER_TEST_ARGS:-}"
|
||||
STORAGE_MEDIA_TYPE="${STORAGE_MEDIA_TYPE:-}"
|
||||
STORAGE_BACKEND="${STORAGE_BACKEND:-etcd3}"
|
||||
ETCD_SERVERS="${ETCD_SERVERS:-}"
|
||||
ETCD_SERVERS_OVERRIDES="${ETCD_SERVERS_OVERRIDES:-}"
|
||||
ETCD_COMPACTION_INTERVAL_SEC="${ETCD_COMPACTION_INTERVAL_SEC:-}"
|
||||
RUNTIME_CONFIG="${RUNTIME_CONFIG:-}"
|
||||
NUM_NODES="${NUM_NODES:-}"
|
||||
CUSTOM_ADMISSION_PLUGINS="${CUSTOM_ADMISSION_PLUGINS:-}"
|
||||
FEATURE_GATES="${FEATURE_GATES:-}"
|
||||
KUBE_APISERVER_REQUEST_TIMEOUT="${KUBE_APISERVER_REQUEST_TIMEOUT:-}"
|
||||
ENABLE_APISERVER_ADVANCED_AUDIT="${ENABLE_APISERVER_ADVANCED_AUDIT:-}"
|
||||
EOF
|
||||
echo "Created the environment file for master."
|
||||
}
|
||||
|
||||
# Generate certs/keys for CA, master, kubelet and kubecfg, and tokens for kubelet
|
||||
# and kubeproxy.
|
||||
function generate-pki-config {
|
||||
kube::util::ensure-temp-dir
|
||||
gen-kube-bearertoken
|
||||
gen-kube-basicauth
|
||||
create-certs "${MASTER_IP}"
|
||||
create-etcd-apiserver-certs "etcd-${MASTER_NAME}" "${MASTER_NAME}"
|
||||
KUBELET_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_PROXY_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
NODE_PROBLEM_DETECTOR_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
HEAPSTER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
CLUSTER_AUTOSCALER_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
KUBE_DNS_TOKEN=$(dd if=/dev/urandom bs=128 count=1 2>/dev/null | base64 | tr -d "=+/" | dd bs=32 count=1 2>/dev/null)
|
||||
echo "Generated PKI authentication data for kubemark."
|
||||
}
|
||||
|
||||
# Wait for the master to be reachable for executing commands on it. We do this by
|
||||
# trying to run the bash noop(:) on the master, with 10 retries.
|
||||
function wait-for-master-reachability {
|
||||
execute-cmd-on-master-with-retries ":" 10
|
||||
echo "Checked master reachability for remote command execution."
|
||||
}
|
||||
|
||||
# Write all the relevant certs/keys/tokens to the master.
|
||||
function write-pki-config-to-master {
|
||||
PKI_SETUP_CMD="sudo mkdir /home/kubernetes/k8s_auth_data -p && \
|
||||
sudo bash -c \"echo ${CA_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/ca.crt\" && \
|
||||
sudo bash -c \"echo ${MASTER_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/server.cert\" && \
|
||||
sudo bash -c \"echo ${MASTER_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/server.key\" && \
|
||||
sudo bash -c \"echo ${ETCD_APISERVER_CA_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/etcd-apiserver-ca.key\" && \
|
||||
sudo bash -c \"echo ${ETCD_APISERVER_CA_CERT_BASE64} | base64 --decode | gunzip > /home/kubernetes/k8s_auth_data/etcd-apiserver-ca.crt\" && \
|
||||
sudo bash -c \"echo ${ETCD_APISERVER_SERVER_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/etcd-apiserver-server.key\" && \
|
||||
sudo bash -c \"echo ${ETCD_APISERVER_SERVER_CERT_BASE64} | base64 --decode | gunzip > /home/kubernetes/k8s_auth_data/etcd-apiserver-server.crt\" && \
|
||||
sudo bash -c \"echo ${ETCD_APISERVER_CLIENT_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/etcd-apiserver-client.key\" && \
|
||||
sudo bash -c \"echo ${ETCD_APISERVER_CLIENT_CERT_BASE64} | base64 --decode | gunzip > /home/kubernetes/k8s_auth_data/etcd-apiserver-client.crt\" && \
|
||||
sudo bash -c \"echo ${REQUESTHEADER_CA_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/aggr_ca.crt\" && \
|
||||
sudo bash -c \"echo ${PROXY_CLIENT_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/proxy_client.crt\" && \
|
||||
sudo bash -c \"echo ${PROXY_CLIENT_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/proxy_client.key\" && \
|
||||
sudo bash -c \"echo ${KUBECFG_CERT_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/kubecfg.crt\" && \
|
||||
sudo bash -c \"echo ${KUBECFG_KEY_BASE64} | base64 --decode > /home/kubernetes/k8s_auth_data/kubecfg.key\" && \
|
||||
sudo bash -c \"echo \"${KUBE_BEARER_TOKEN},admin,admin\" > /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
|
||||
sudo bash -c \"echo \"${KUBELET_TOKEN},system:node:node-name,uid:kubelet,system:nodes\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
|
||||
sudo bash -c \"echo \"${KUBE_PROXY_TOKEN},system:kube-proxy,uid:kube_proxy\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
|
||||
sudo bash -c \"echo \"${HEAPSTER_TOKEN},system:heapster,uid:heapster\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
|
||||
sudo bash -c \"echo \"${CLUSTER_AUTOSCALER_TOKEN},system:cluster-autoscaler,uid:cluster-autoscaler\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
|
||||
sudo bash -c \"echo \"${NODE_PROBLEM_DETECTOR_TOKEN},system:node-problem-detector,uid:system:node-problem-detector\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
|
||||
sudo bash -c \"echo \"${KUBE_DNS_TOKEN},system:kube-dns,uid:kube-dns\" >> /home/kubernetes/k8s_auth_data/known_tokens.csv\" && \
|
||||
sudo bash -c \"echo ${KUBE_PASSWORD},admin,admin > /home/kubernetes/k8s_auth_data/basic_auth.csv\""
|
||||
execute-cmd-on-master-with-retries "${PKI_SETUP_CMD}" 3
|
||||
echo "Wrote PKI certs, keys, tokens and admin password to master."
|
||||
}
|
||||
|
||||
# Write kubeconfig to ${RESOURCE_DIRECTORY}/kubeconfig.kubemark in order to
|
||||
# use kubectl locally.
|
||||
function write-local-kubeconfig {
|
||||
LOCAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark"
|
||||
cat > "${LOCAL_KUBECONFIG}" << EOF
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kubecfg
|
||||
user:
|
||||
client-certificate-data: "${KUBECFG_CERT_BASE64}"
|
||||
client-key-data: "${KUBECFG_KEY_BASE64}"
|
||||
username: admin
|
||||
password: admin
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
certificate-authority-data: "${CA_CERT_BASE64}"
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: kubecfg
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context
|
||||
EOF
|
||||
echo "Kubeconfig file for kubemark master written to ${LOCAL_KUBECONFIG}."
|
||||
}
|
||||
|
||||
# Copy all the necessary resource files (scripts/configs/manifests) to the master.
|
||||
function copy-resource-files-to-master {
|
||||
copy-files \
|
||||
"${SERVER_BINARY_TAR}" \
|
||||
"${RESOURCE_DIRECTORY}/kubemark-master-env.sh" \
|
||||
"${RESOURCE_DIRECTORY}/start-kubemark-master.sh" \
|
||||
"${RESOURCE_DIRECTORY}/kubeconfig.kubemark" \
|
||||
"${KUBEMARK_DIRECTORY}/configure-kubectl.sh" \
|
||||
"${RESOURCE_DIRECTORY}/manifests/etcd.yaml" \
|
||||
"${RESOURCE_DIRECTORY}/manifests/etcd-events.yaml" \
|
||||
"${RESOURCE_DIRECTORY}/manifests/kube-apiserver.yaml" \
|
||||
"${RESOURCE_DIRECTORY}/manifests/kube-scheduler.yaml" \
|
||||
"${RESOURCE_DIRECTORY}/manifests/kube-controller-manager.yaml" \
|
||||
"${RESOURCE_DIRECTORY}/manifests/kube-addon-manager.yaml" \
|
||||
"${RESOURCE_DIRECTORY}/manifests/addons/kubemark-rbac-bindings" \
|
||||
"kubernetes@${MASTER_NAME}":/home/kubernetes/
|
||||
echo "Copied server binary, master startup scripts, configs and resource manifests to master."
|
||||
}
|
||||
|
||||
# Make startup scripts executable and run start-kubemark-master.sh.
|
||||
function start-master-components {
|
||||
echo ""
|
||||
MASTER_STARTUP_CMD="sudo bash /home/kubernetes/start-kubemark-master.sh"
|
||||
execute-cmd-on-master-with-retries "${MASTER_STARTUP_CMD}"
|
||||
echo "The master has started and is now live."
|
||||
}
|
||||
|
||||
# Create a docker image for hollow-node and upload it to the appropriate docker registry.
|
||||
function create-and-upload-hollow-node-image {
|
||||
authenticate-docker
|
||||
@ -243,121 +84,6 @@ function delete-kubemark-image {
|
||||
# manifests of the hollow-node and heapster replication controllers from
|
||||
# templates, and finally create these resources through kubectl.
|
||||
function create-kube-hollow-node-resources {
|
||||
# Create kubeconfig for Kubelet.
|
||||
KUBELET_KUBECONFIG_CONTENTS="apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kubelet
|
||||
user:
|
||||
client-certificate-data: ${KUBELET_CERT_BASE64}
|
||||
client-key-data: ${KUBELET_KEY_BASE64}
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
certificate-authority-data: ${CA_CERT_BASE64}
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: kubelet
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context"
|
||||
|
||||
# Create kubeconfig for Kubeproxy.
|
||||
KUBEPROXY_KUBECONFIG_CONTENTS="apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kube-proxy
|
||||
user:
|
||||
token: ${KUBE_PROXY_TOKEN}
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: kube-proxy
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context"
|
||||
|
||||
# Create kubeconfig for Heapster.
|
||||
HEAPSTER_KUBECONFIG_CONTENTS="apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: heapster
|
||||
user:
|
||||
token: ${HEAPSTER_TOKEN}
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: heapster
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context"
|
||||
|
||||
# Create kubeconfig for Cluster Autoscaler.
|
||||
CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS="apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: cluster-autoscaler
|
||||
user:
|
||||
token: ${CLUSTER_AUTOSCALER_TOKEN}
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: cluster-autoscaler
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context"
|
||||
|
||||
# Create kubeconfig for NodeProblemDetector.
|
||||
NPD_KUBECONFIG_CONTENTS="apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: node-problem-detector
|
||||
user:
|
||||
token: ${NODE_PROBLEM_DETECTOR_TOKEN}
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: node-problem-detector
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context"
|
||||
|
||||
# Create kubeconfig for Kube DNS.
|
||||
KUBE_DNS_KUBECONFIG_CONTENTS="apiVersion: v1
|
||||
kind: Config
|
||||
users:
|
||||
- name: kube-dns
|
||||
user:
|
||||
token: ${KUBE_DNS_TOKEN}
|
||||
clusters:
|
||||
- name: kubemark
|
||||
cluster:
|
||||
insecure-skip-tls-verify: true
|
||||
server: https://${MASTER_IP}
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubemark
|
||||
user: kube-dns
|
||||
name: kubemark-context
|
||||
current-context: kubemark-context"
|
||||
|
||||
# Create kubemark namespace.
|
||||
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json"
|
||||
|
||||
@ -367,13 +93,15 @@ current-context: kubemark-context"
|
||||
--from-file=kernel.monitor="${RESOURCE_DIRECTORY}/kernel-monitor.json"
|
||||
|
||||
# Create secret for passing kubeconfigs to kubelet, kubeproxy and npd.
|
||||
# It's bad that all component shares the same kubeconfig.
|
||||
# TODO(https://github.com/kubernetes/kubernetes/issues/79883): Migrate all components to separate credentials.
|
||||
"${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \
|
||||
--from-literal=kubelet.kubeconfig="${KUBELET_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=kubeproxy.kubeconfig="${KUBEPROXY_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=heapster.kubeconfig="${HEAPSTER_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=cluster_autoscaler.kubeconfig="${CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=npd.kubeconfig="${NPD_KUBECONFIG_CONTENTS}" \
|
||||
--from-literal=dns.kubeconfig="${KUBE_DNS_KUBECONFIG_CONTENTS}"
|
||||
--from-file=kubelet.kubeconfig="${LOCAL_KUBECONFIG}" \
|
||||
--from-file=kubeproxy.kubeconfig="${LOCAL_KUBECONFIG}" \
|
||||
--from-file=npd.kubeconfig="${LOCAL_KUBECONFIG}" \
|
||||
--from-file=heapster.kubeconfig="${LOCAL_KUBECONFIG}" \
|
||||
--from-file=cluster_autoscaler.kubeconfig="${LOCAL_KUBECONFIG}" \
|
||||
--from-file=dns.kubeconfig="${LOCAL_KUBECONFIG}"
|
||||
|
||||
# Create addon pods.
|
||||
# Heapster.
|
||||
@ -474,42 +202,23 @@ function wait-for-hollow-nodes-to-run-or-timeout {
|
||||
}
|
||||
|
||||
############################### Main Function ########################################
|
||||
detect-project &> /dev/null
|
||||
find-release-tars
|
||||
|
||||
# We need master IP to generate PKI and kubeconfig for cluster.
|
||||
get-or-create-master-ip
|
||||
generate-pki-config
|
||||
write-local-kubeconfig
|
||||
|
||||
# Setup for master.
|
||||
function start-master {
|
||||
# shellcheck disable=SC2154 # Color defined in sourced script
|
||||
echo -e "${color_yellow}STARTING SETUP FOR MASTER${color_norm}"
|
||||
create-master-environment-file
|
||||
create-master-instance-with-resources
|
||||
wait-for-master-reachability
|
||||
write-pki-config-to-master
|
||||
copy-resource-files-to-master
|
||||
start-master-components
|
||||
}
|
||||
start-master &
|
||||
start_master_pid=$!
|
||||
|
||||
# Setup for hollow-nodes.
|
||||
function start-hollow-nodes {
|
||||
# shellcheck disable=SC2154 # Color defined in sourced script
|
||||
echo -e "${color_yellow}STARTING SETUP FOR HOLLOW-NODES${color_norm}"
|
||||
create-and-upload-hollow-node-image
|
||||
create-kube-hollow-node-resources
|
||||
wait-for-hollow-nodes-to-run-or-timeout
|
||||
}
|
||||
start-hollow-nodes &
|
||||
start_hollow_nodes_pid=$!
|
||||
|
||||
wait $start_master_pid || { echo "Failed to start kubemark master" ; exit 1 ; }
|
||||
wait $start_hollow_nodes_pid ||{ echo "Failed to start hollow nodes" ; exit 1 ; }
|
||||
detect-project &> /dev/null
|
||||
create-kubemark-master
|
||||
|
||||
MASTER_IP=$(grep server "$LOCAL_KUBECONFIG" | awk -F "/" '{print $3}')
|
||||
|
||||
start-hollow-nodes
|
||||
|
||||
echo ""
|
||||
echo "Master IP: ${MASTER_IP}"
|
||||
echo "Password to kubemark master: ${KUBE_PASSWORD}"
|
||||
echo "Kubeconfig for kubemark master is written in ${LOCAL_KUBECONFIG}"
|
||||
|
@ -41,7 +41,6 @@ detect-project &> /dev/null
|
||||
|
||||
rm -rf "${RESOURCE_DIRECTORY}/addons" \
|
||||
"${RESOURCE_DIRECTORY}/kubeconfig.kubemark" \
|
||||
"${RESOURCE_DIRECTORY}/hollow-node.yaml" \
|
||||
"${RESOURCE_DIRECTORY}/kubemark-master-env.sh" &> /dev/null || true
|
||||
"${RESOURCE_DIRECTORY}/hollow-node.yaml" &> /dev/null || true
|
||||
|
||||
delete-master-instance-and-resources
|
||||
delete-kubemark-master
|
||||
|
Loading…
Reference in New Issue
Block a user