mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-19 09:52:49 +00:00
Drop unsupported iks configuration for kubemark
Signed-off-by: Davanum Srinivas <davanum@gmail.com>
This commit is contained in:
parent
5fd6b0a980
commit
908ec2a496
@ -42,15 +42,4 @@ function detect-project() {
|
|||||||
|
|
||||||
function create-certs {
|
function create-certs {
|
||||||
execute-cmd-on-pre-existing-master-with-retries 'sudo cat /etc/kubernetes/admin.conf' > /tmp/kubeconfig
|
execute-cmd-on-pre-existing-master-with-retries 'sudo cat /etc/kubernetes/admin.conf' > /tmp/kubeconfig
|
||||||
|
|
||||||
# CA_CERT_BASE64, KUBELET_CERT_BASE64 and KUBELET_KEY_BASE64 might be used
|
|
||||||
# in test/kubemark/iks/util.sh
|
|
||||||
# If it becomes clear that the variables are not used anywhere, then we can
|
|
||||||
# remove them:
|
|
||||||
CA_CERT_BASE64=$(grep certificate-authority /tmp/kubeconfig | awk '{print $2}' | head -n 1)
|
|
||||||
KUBELET_CERT_BASE64=$(grep client-certificate-data /tmp/kubeconfig | awk '{print $2}' | head -n 1)
|
|
||||||
KUBELET_KEY_BASE64=$(grep client-key-data /tmp/kubeconfig | awk '{print $2}' | head -n 1)
|
|
||||||
export CA_CERT_BASE64
|
|
||||||
export KUBELET_CERT_BASE64
|
|
||||||
export KUBELET_KEY_BASE64
|
|
||||||
}
|
}
|
||||||
|
@ -1,44 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Copyright 2018 The Kubernetes Authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
# Script that destroys the clusters used, namespace, and deployment.
|
|
||||||
|
|
||||||
KUBECTL=kubectl
|
|
||||||
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
|
|
||||||
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
|
|
||||||
|
|
||||||
# Login to cloud services
|
|
||||||
complete-login
|
|
||||||
|
|
||||||
# Remove resources created for kubemark
|
|
||||||
# shellcheck disable=SC2154 # Color defined in sourced script
|
|
||||||
echo -e "${color_yellow}REMOVING RESOURCES${color_norm}"
|
|
||||||
spawn-config
|
|
||||||
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/addons" &> /dev/null || true
|
|
||||||
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" &> /dev/null || true
|
|
||||||
"${KUBECTL}" delete -f "${RESOURCE_DIRECTORY}/kubemark-ns.json" &> /dev/null || true
|
|
||||||
rm -rf "${RESOURCE_DIRECTORY}/addons"
|
|
||||||
"${RESOURCE_DIRECTORY}/hollow-node.yaml" &> /dev/null || true
|
|
||||||
|
|
||||||
# Remove clusters, namespaces, and deployments
|
|
||||||
delete-clusters
|
|
||||||
if [[ -f "${RESOURCE_DIRECTORY}/iks-namespacelist.sh" ]] ; then
|
|
||||||
bash "${RESOURCE_DIRECTORY}/iks-namespacelist.sh"
|
|
||||||
rm -f "${RESOURCE_DIRECTORY}/iks-namespacelist.sh"
|
|
||||||
fi
|
|
||||||
# shellcheck disable=SC2154 # Color defined in sourced script
|
|
||||||
echo -e "${color_blue}EXECUTION COMPLETE${color_norm}"
|
|
||||||
exit 0
|
|
@ -1,320 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Copyright 2018 The Kubernetes Authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
# Script that creates a Kubemark cluster for IBM cloud.
|
|
||||||
|
|
||||||
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
|
|
||||||
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
|
|
||||||
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
|
|
||||||
|
|
||||||
# Generate secret and configMap for the hollow-node pods to work, prepare
|
|
||||||
# manifests of the hollow-node and heapster replication controllers from
|
|
||||||
# templates, and finally create these resources through kubectl.
|
|
||||||
function create-kube-hollow-node-resources {
|
|
||||||
# Create kubeconfig for Kubelet.
|
|
||||||
KUBELET_KUBECONFIG_CONTENTS="$(cat <<EOF
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
users:
|
|
||||||
- name: kubelet
|
|
||||||
user:
|
|
||||||
client-certificate-data: "${KUBELET_CERT_BASE64}"
|
|
||||||
client-key-data: "${KUBELET_KEY_BASE64}"
|
|
||||||
clusters:
|
|
||||||
- name: kubemark
|
|
||||||
cluster:
|
|
||||||
certificate-authority-data: "${CA_CERT_BASE64}"
|
|
||||||
server: https://${MASTER_IP}
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: kubemark
|
|
||||||
user: kubelet
|
|
||||||
name: kubemark-context
|
|
||||||
current-context: kubemark-context
|
|
||||||
EOF
|
|
||||||
)"
|
|
||||||
|
|
||||||
# Create kubeconfig for Kubeproxy.
|
|
||||||
KUBEPROXY_KUBECONFIG_CONTENTS="$(cat <<EOF
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
users:
|
|
||||||
- name: kube-proxy
|
|
||||||
user:
|
|
||||||
client-certificate-data: "${KUBELET_CERT_BASE64}"
|
|
||||||
client-key-data: "${KUBELET_KEY_BASE64}"
|
|
||||||
clusters:
|
|
||||||
- name: kubemark
|
|
||||||
cluster:
|
|
||||||
insecure-skip-tls-verify: true
|
|
||||||
server: https://${MASTER_IP}
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: kubemark
|
|
||||||
user: kube-proxy
|
|
||||||
name: kubemark-context
|
|
||||||
current-context: kubemark-context
|
|
||||||
EOF
|
|
||||||
)"
|
|
||||||
|
|
||||||
# Create kubeconfig for Heapster.
|
|
||||||
HEAPSTER_KUBECONFIG_CONTENTS="$(cat <<EOF
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
users:
|
|
||||||
- name: heapster
|
|
||||||
user:
|
|
||||||
client-certificate-data: "${KUBELET_CERT_BASE64}"
|
|
||||||
client-key-data: "${KUBELET_KEY_BASE64}"
|
|
||||||
clusters:
|
|
||||||
- name: kubemark
|
|
||||||
cluster:
|
|
||||||
insecure-skip-tls-verify: true
|
|
||||||
server: https://${MASTER_IP}
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: kubemark
|
|
||||||
user: heapster
|
|
||||||
name: kubemark-context
|
|
||||||
current-context: kubemark-context
|
|
||||||
EOF
|
|
||||||
)"
|
|
||||||
|
|
||||||
# Create kubeconfig for Cluster Autoscaler.
|
|
||||||
CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS="$(cat <<EOF
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
users:
|
|
||||||
- name: cluster-autoscaler
|
|
||||||
user:
|
|
||||||
client-certificate-data: "${KUBELET_CERT_BASE64}"
|
|
||||||
client-key-data: "${KUBELET_KEY_BASE64}"
|
|
||||||
clusters:
|
|
||||||
- name: kubemark
|
|
||||||
cluster:
|
|
||||||
insecure-skip-tls-verify: true
|
|
||||||
server: https://${MASTER_IP}
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: kubemark
|
|
||||||
user: cluster-autoscaler
|
|
||||||
name: kubemark-context
|
|
||||||
current-context: kubemark-context
|
|
||||||
EOF
|
|
||||||
)"
|
|
||||||
|
|
||||||
# Create kubeconfig for NodeProblemDetector.
|
|
||||||
NPD_KUBECONFIG_CONTENTS="$(cat <<EOF
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
users:
|
|
||||||
- name: node-problem-detector
|
|
||||||
user:
|
|
||||||
client-certificate-data: "${KUBELET_CERT_BASE64}"
|
|
||||||
client-key-data: "${KUBELET_KEY_BASE64}"
|
|
||||||
clusters:
|
|
||||||
- name: kubemark
|
|
||||||
cluster:
|
|
||||||
insecure-skip-tls-verify: true
|
|
||||||
server: https://${MASTER_IP}
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: kubemark
|
|
||||||
user: node-problem-detector
|
|
||||||
name: kubemark-context
|
|
||||||
current-context: kubemark-context
|
|
||||||
EOF
|
|
||||||
)"
|
|
||||||
|
|
||||||
# Create kubeconfig for Kube DNS.
|
|
||||||
KUBE_DNS_KUBECONFIG_CONTENTS="$(cat <<EOF
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Config
|
|
||||||
users:
|
|
||||||
- name: kube-dns
|
|
||||||
user:
|
|
||||||
client-certificate-data: "${KUBELET_CERT_BASE64}"
|
|
||||||
client-key-data: "${KUBELET_KEY_BASE64}"
|
|
||||||
clusters:
|
|
||||||
- name: kubemark
|
|
||||||
cluster:
|
|
||||||
insecure-skip-tls-verify: true
|
|
||||||
server: https://${MASTER_IP}
|
|
||||||
contexts:
|
|
||||||
- context:
|
|
||||||
cluster: kubemark
|
|
||||||
user: kube-dns
|
|
||||||
name: kubemark-context
|
|
||||||
current-context: kubemark-context
|
|
||||||
EOF
|
|
||||||
)"
|
|
||||||
|
|
||||||
# Create kubemark namespace.
|
|
||||||
spawn-config
|
|
||||||
if kubectl get ns | grep -Fq "kubemark"; then
|
|
||||||
kubectl delete ns kubemark
|
|
||||||
while kubectl get ns | grep -Fq "kubemark"
|
|
||||||
do
|
|
||||||
sleep 10
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/kubemark-ns.json"
|
|
||||||
# Create configmap for configuring hollow- kubelet, proxy and npd.
|
|
||||||
"${KUBECTL}" create configmap "node-configmap" --namespace="kubemark" \
|
|
||||||
--from-file=kernel.monitor="${RESOURCE_DIRECTORY}/kernel-monitor.json"
|
|
||||||
|
|
||||||
# Create secret for passing kubeconfigs to kubelet, kubeproxy and npd.
|
|
||||||
"${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \
|
|
||||||
--from-literal=kubelet.kubeconfig="${KUBELET_KUBECONFIG_CONTENTS}" \
|
|
||||||
--from-literal=kubeproxy.kubeconfig="${KUBEPROXY_KUBECONFIG_CONTENTS}" \
|
|
||||||
--from-literal=heapster.kubeconfig="${HEAPSTER_KUBECONFIG_CONTENTS}" \
|
|
||||||
--from-literal=cluster_autoscaler.kubeconfig="${CLUSTER_AUTOSCALER_KUBECONFIG_CONTENTS}" \
|
|
||||||
--from-literal=npd.kubeconfig="${NPD_KUBECONFIG_CONTENTS}" \
|
|
||||||
--from-literal=dns.kubeconfig="${KUBE_DNS_KUBECONFIG_CONTENTS}"
|
|
||||||
|
|
||||||
# Create addon pods.
|
|
||||||
# Heapster.
|
|
||||||
mkdir -p "${RESOURCE_DIRECTORY}/addons"
|
|
||||||
sed "s/{{MASTER_IP}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/heapster_template.json" > "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
|
||||||
metrics_mem_per_node=4
|
|
||||||
metrics_mem=$((200 + metrics_mem_per_node*NUM_NODES))
|
|
||||||
sed -i'' -e "s/{{METRICS_MEM}}/${metrics_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
|
||||||
metrics_cpu_per_node_numerator=${NUM_NODES}
|
|
||||||
metrics_cpu_per_node_denominator=2
|
|
||||||
metrics_cpu=$((80 + metrics_cpu_per_node_numerator / metrics_cpu_per_node_denominator))
|
|
||||||
sed -i'' -e "s/{{METRICS_CPU}}/${metrics_cpu}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
|
||||||
eventer_mem_per_node=500
|
|
||||||
eventer_mem=$((200 * 1024 + eventer_mem_per_node*NUM_NODES))
|
|
||||||
sed -i'' -e "s/{{EVENTER_MEM}}/${eventer_mem}/g" "${RESOURCE_DIRECTORY}/addons/heapster.json"
|
|
||||||
|
|
||||||
# Cluster Autoscaler.
|
|
||||||
if [[ "${ENABLE_KUBEMARK_CLUSTER_AUTOSCALER:-}" == "true" ]]; then
|
|
||||||
echo "Setting up Cluster Autoscaler"
|
|
||||||
KUBEMARK_AUTOSCALER_MIG_NAME="${KUBEMARK_AUTOSCALER_MIG_NAME:-${NODE_INSTANCE_PREFIX}-group}"
|
|
||||||
KUBEMARK_AUTOSCALER_MIN_NODES="${KUBEMARK_AUTOSCALER_MIN_NODES:-0}"
|
|
||||||
KUBEMARK_AUTOSCALER_MAX_NODES="${KUBEMARK_AUTOSCALER_MAX_NODES:-${DESIRED_NODES}}"
|
|
||||||
NUM_NODES=${KUBEMARK_AUTOSCALER_MAX_NODES}
|
|
||||||
echo "Setting maximum cluster size to ${NUM_NODES}."
|
|
||||||
KUBEMARK_MIG_CONFIG="autoscaling.k8s.io/nodegroup: ${KUBEMARK_AUTOSCALER_MIG_NAME}"
|
|
||||||
sed "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/cluster-autoscaler_template.json" > "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
|
|
||||||
sed -i'' -e "s/{{kubemark_autoscaler_mig_name}}/${KUBEMARK_AUTOSCALER_MIG_NAME}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
|
|
||||||
sed -i'' -e "s/{{kubemark_autoscaler_min_nodes}}/${KUBEMARK_AUTOSCALER_MIN_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
|
|
||||||
sed -i'' -e "s/{{kubemark_autoscaler_max_nodes}}/${KUBEMARK_AUTOSCALER_MAX_NODES}/g" "${RESOURCE_DIRECTORY}/addons/cluster-autoscaler.json"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Kube DNS.
|
|
||||||
if [[ "${ENABLE_KUBEMARK_KUBE_DNS:-}" == "true" ]]; then
|
|
||||||
echo "Setting up kube-dns"
|
|
||||||
sed "s/{{dns_domain}}/${KUBE_DNS_DOMAIN}/g" "${RESOURCE_DIRECTORY}/kube_dns_template.yaml" > "${RESOURCE_DIRECTORY}/addons/kube_dns.yaml"
|
|
||||||
fi
|
|
||||||
|
|
||||||
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/addons" --namespace="kubemark"
|
|
||||||
set-registry-secrets
|
|
||||||
|
|
||||||
# Create the replication controller for hollow-nodes.
|
|
||||||
# We allow to override the NUM_REPLICAS when running Cluster Autoscaler.
|
|
||||||
NUM_REPLICAS=${NUM_REPLICAS:-${KUBEMARK_NUM_NODES}}
|
|
||||||
sed "s/{{numreplicas}}/${NUM_REPLICAS}/g" "${RESOURCE_DIRECTORY}/hollow-node_template.yaml" > "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
|
||||||
proxy_cpu=20
|
|
||||||
if [ "${NUM_NODES}" -gt 1000 ]; then
|
|
||||||
proxy_cpu=50
|
|
||||||
fi
|
|
||||||
proxy_mem_per_node=50
|
|
||||||
proxy_mem=$((100 * 1024 + proxy_mem_per_node*NUM_NODES))
|
|
||||||
hollow_kubelet_params=$(eval "for param in ${HOLLOW_KUBELET_TEST_ARGS:-}; do echo -n \\\"\$param\\\",; done")
|
|
||||||
hollow_kubelet_params=${hollow_kubelet_params%?}
|
|
||||||
hollow_proxy_params=$(eval "for param in ${HOLLOW_PROXY_TEST_ARGS:-}; do echo -n \\\"\$param\\\",; done")
|
|
||||||
hollow_proxy_params=${hollow_proxy_params%?}
|
|
||||||
|
|
||||||
sed -i'' -e "s/{{HOLLOW_PROXY_CPU}}/${proxy_cpu}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
|
||||||
sed -i'' -e "s/{{HOLLOW_PROXY_MEM}}/${proxy_mem}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
|
||||||
sed -i'' -e "s'{{kubemark_image_registry}}'${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
|
||||||
sed -i'' -e "s/{{kubemark_image_tag}}/${KUBEMARK_IMAGE_TAG}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
|
||||||
sed -i'' -e "s/{{master_ip}}/${MASTER_IP}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
|
||||||
sed -i'' -e "s/{{hollow_kubelet_params}}/${hollow_kubelet_params}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
|
||||||
sed -i'' -e "s/{{hollow_proxy_params}}/${hollow_proxy_params}/g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
|
||||||
sed -i'' -e "s'{{kubemark_mig_config}}'${KUBEMARK_MIG_CONFIG:-}'g" "${RESOURCE_DIRECTORY}/hollow-node.yaml"
|
|
||||||
"${KUBECTL}" create -f "${RESOURCE_DIRECTORY}/hollow-node.yaml" --namespace="kubemark"
|
|
||||||
|
|
||||||
echo "Created secrets, configMaps, replication-controllers required for hollow-nodes."
|
|
||||||
}
|
|
||||||
|
|
||||||
# Wait until all hollow-nodes are running or there is a timeout.
|
|
||||||
function wait-for-hollow-nodes-to-run-or-timeout {
|
|
||||||
echo -n "Waiting for all hollow-nodes to become Running"
|
|
||||||
start=$(date +%s)
|
|
||||||
# IKS uses a real cluster for hollow master, so need to exclude the real worker nodes
|
|
||||||
nodes=$("${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node | grep hollow-node 2> /dev/null) || true
|
|
||||||
ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1))
|
|
||||||
until [[ "${ready}" -ge "${NUM_REPLICAS}" ]]; do
|
|
||||||
echo -n "."
|
|
||||||
sleep 1
|
|
||||||
now=$(date +%s)
|
|
||||||
# Fail it if it already took more than 30 minutes.
|
|
||||||
if [ $((now - start)) -gt 1800 ]; then
|
|
||||||
echo ""
|
|
||||||
# shellcheck disable=SC2154 # Color defined in sourced script
|
|
||||||
echo -e "${color_red} Timeout waiting for all hollow-nodes to become Running. ${color_norm}"
|
|
||||||
# Try listing nodes again - if it fails it means that API server is not responding
|
|
||||||
if "${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node &> /dev/null; then
|
|
||||||
echo "Found only ${ready} ready hollow-nodes while waiting for ${NUM_NODES}."
|
|
||||||
else
|
|
||||||
echo "Got error while trying to list hollow-nodes. Probably API server is down."
|
|
||||||
fi
|
|
||||||
spawn-config
|
|
||||||
pods=$("${KUBECTL}" get pods -l name=hollow-node --namespace=kubemark) || true
|
|
||||||
running=$(($(echo "${pods}" | grep -c "Running")))
|
|
||||||
echo "${running} hollow-nodes are reported as 'Running'"
|
|
||||||
not_running=$(($(echo "${pods}" | grep -vc "Running") - 1))
|
|
||||||
echo "${not_running} hollow-nodes are reported as NOT 'Running'"
|
|
||||||
echo "${pods}" | grep -v "Running"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
nodes=$("${KUBECTL}" --kubeconfig="${KUBECONFIG}" get node 2> /dev/null) || true
|
|
||||||
ready=$(($(echo "${nodes}" | grep -vc "NotReady") - 1))
|
|
||||||
done
|
|
||||||
# shellcheck disable=SC2154 # Color defined in sourced script
|
|
||||||
echo -e "${color_green} Done!${color_norm}"
|
|
||||||
}
|
|
||||||
|
|
||||||
############################### Main Function ########################################
|
|
||||||
# In order for the cluster autoscalar to function, the template file must be changed so that the ":443"
|
|
||||||
# is removed. This is because the port is already given with the MASTER_IP.
|
|
||||||
|
|
||||||
|
|
||||||
# Create clusters and populate with hollow nodes
|
|
||||||
complete-login
|
|
||||||
build-kubemark-image
|
|
||||||
choose-clusters
|
|
||||||
generate-values
|
|
||||||
set-hollow-master
|
|
||||||
echo "Creating kube hollow node resources"
|
|
||||||
create-kube-hollow-node-resources
|
|
||||||
master-config
|
|
||||||
# shellcheck disable=SC2154 # Color defined in sourced script
|
|
||||||
echo -e "${color_blue}EXECUTION COMPLETE${color_norm}"
|
|
||||||
|
|
||||||
# Check status of Kubemark
|
|
||||||
# shellcheck disable=SC2154 # Color defined in sourced script
|
|
||||||
echo -e "${color_yellow}CHECKING STATUS${color_norm}"
|
|
||||||
wait-for-hollow-nodes-to-run-or-timeout
|
|
||||||
|
|
||||||
# Celebrate
|
|
||||||
echo ""
|
|
||||||
# shellcheck disable=SC2154 # Color defined in sourced script
|
|
||||||
echo -e "${color_blue}SUCCESS${color_norm}"
|
|
||||||
clean-repo
|
|
||||||
exit 0
|
|
@ -1,224 +0,0 @@
|
|||||||
#!/usr/bin/env bash
|
|
||||||
|
|
||||||
# Copyright 2018 The Kubernetes Authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../../..
|
|
||||||
|
|
||||||
# Creates a new kube-spawn cluster
|
|
||||||
function create-clusters {
|
|
||||||
# shellcheck disable=SC2154 # Color defined in sourced script
|
|
||||||
echo -e "${color_yellow}CHECKING CLUSTERS${color_norm}"
|
|
||||||
if ibmcloud ks clusters | grep -Fq 'deleting'; then
|
|
||||||
echo -n "Deleting old clusters"
|
|
||||||
fi
|
|
||||||
while ibmcloud ks clusters | grep -Fq 'deleting'
|
|
||||||
do
|
|
||||||
echo -n "."
|
|
||||||
sleep 10
|
|
||||||
done
|
|
||||||
echo ""
|
|
||||||
ibmcloud ks region-set us-east >/dev/null
|
|
||||||
ibmcloud ks vlans wdc06 >/dev/null
|
|
||||||
PRIVLAN=$(ibmcloud ks vlans wdc06 --json | jq '. | .[] | select(.type == "private") | .id' | sed -e "s/\"//g")
|
|
||||||
PUBVLAN=$(ibmcloud ks vlans wdc06 --json | jq '. | .[] | select(.type == "public") | .id' | sed -e "s/\"//g")
|
|
||||||
if ! ibmcloud ks clusters | grep -Fq 'kubeSpawnTester'; then
|
|
||||||
echo "Creating spawning cluster"
|
|
||||||
# make number and spec of node workers configurable
|
|
||||||
# otherwise it can't afford tests like kubemark-5000
|
|
||||||
# TODO: dynamically adjust the number and spec
|
|
||||||
ibmcloud ks cluster-create --location "${CLUSTER_LOCATION}" --public-vlan "${PUBVLAN}" --private-vlan "${PRIVLAN}" --workers "${NUM_NODES:-2}" --machine-type "${NODE_SIZE}" --name kubeSpawnTester
|
|
||||||
fi
|
|
||||||
if ! ibmcloud ks clusters | grep -Fq 'kubeMasterTester'; then
|
|
||||||
echo "Creating master cluster"
|
|
||||||
# if we can't make it a bare master (workers = 0)
|
|
||||||
# then make workers = 1 with the smallest machine spec
|
|
||||||
ibmcloud ks cluster-create --location "${CLUSTER_LOCATION}" --public-vlan "${PUBVLAN}" --private-vlan "${PRIVLAN}" --workers 1 --machine-type u2c.2x4 --name kubeMasterTester
|
|
||||||
fi
|
|
||||||
push-image
|
|
||||||
if ! ibmcloud ks clusters | grep 'kubeSpawnTester' | grep -Fq 'normal'; then
|
|
||||||
# shellcheck disable=SC2154 # Color defined in sourced script
|
|
||||||
echo -e "${color_cyan}Warning: new clusters may take up to 60 minutes to be ready${color_norm}"
|
|
||||||
echo -n "Clusters loading"
|
|
||||||
fi
|
|
||||||
while ! ibmcloud ks clusters | grep 'kubeSpawnTester' | grep -Fq 'normal'
|
|
||||||
do
|
|
||||||
echo -n "."
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
while ! ibmcloud ks clusters | grep 'kubeMasterTester' | grep -Fq 'normal'
|
|
||||||
do
|
|
||||||
echo -n "."
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
echo -e "${color_yellow}CLUSTER CREATION COMPLETE${color_norm}"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Builds and pushes image to registry
|
|
||||||
function push-image {
|
|
||||||
if [[ "${ISBUILD}" = "y" ]]; then
|
|
||||||
if ! ibmcloud cr namespaces | grep -Fq "${KUBE_NAMESPACE}"; then
|
|
||||||
echo "Creating registry namespace"
|
|
||||||
ibmcloud cr namespace-add "${KUBE_NAMESPACE}"
|
|
||||||
echo "ibmcloud cr namespace-rm ${KUBE_NAMESPACE}" >> "${RESOURCE_DIRECTORY}/iks-namespacelist.sh"
|
|
||||||
fi
|
|
||||||
docker build -t "${KUBEMARK_INIT_TAG}" "${KUBEMARK_IMAGE_LOCATION}"
|
|
||||||
docker tag "${KUBEMARK_INIT_TAG}" "${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}/${PROJECT}:${KUBEMARK_IMAGE_TAG}"
|
|
||||||
docker push "${KUBEMARK_IMAGE_REGISTRY}${KUBE_NAMESPACE}/${PROJECT}:${KUBEMARK_IMAGE_TAG}"
|
|
||||||
echo "Image pushed"
|
|
||||||
else
|
|
||||||
KUBEMARK_IMAGE_REGISTRY="${KUBEMARK_IMAGE_REGISTRY:-brandondr96}"
|
|
||||||
KUBE_NAMESPACE=""
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Allow user to use existing clusters if desired
|
|
||||||
function choose-clusters {
|
|
||||||
echo -n -e "Do you want to use custom clusters? [y/N]${color_cyan}>${color_norm} "
|
|
||||||
read -r USE_EXISTING
|
|
||||||
if [[ "${USE_EXISTING}" = "y" ]]; then
|
|
||||||
echo -e "${color_yellow}Enter path for desired hollow-node spawning cluster kubeconfig file:${color_norm}"
|
|
||||||
read -r CUSTOM_SPAWN_CONFIG
|
|
||||||
echo -e "${color_yellow}Enter path for desired hollow-node hosting cluster kubeconfig file:${color_norm}"
|
|
||||||
read -r CUSTOM_MASTER_CONFIG
|
|
||||||
push-image
|
|
||||||
elif [[ "${USE_EXISTING}" = "N" ]]; then
|
|
||||||
create-clusters
|
|
||||||
else
|
|
||||||
# shellcheck disable=SC2154 # Color defined in sourced script
|
|
||||||
echo -e "${color_red}Invalid response, please try again:${color_norm}"
|
|
||||||
choose-clusters
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Ensure secrets are correctly set
|
|
||||||
function set-registry-secrets {
|
|
||||||
spawn-config
|
|
||||||
kubectl get secret bluemix-default-secret-regional -o yaml | sed 's/default/kubemark/g' | kubectl -n kubemark create -f -
|
|
||||||
kubectl patch serviceaccount -n kubemark default -p '{"imagePullSecrets": [{"name": "bluemix-kubemark-secret"}]}'
|
|
||||||
kubectl -n kubemark get serviceaccounts default -o json | jq 'del(.metadata.resourceVersion)' | jq 'setpath(["imagePullSecrets"];[{"name":"bluemix-kubemark-secret-regional"}])' | kubectl -n kubemark replace serviceaccount default -f -
|
|
||||||
}
|
|
||||||
|
|
||||||
# Sets the hollow-node master
|
|
||||||
# Exported variables:
|
|
||||||
# MASTER_IP - IP Address of the Kubemark master
|
|
||||||
function set-hollow-master {
|
|
||||||
echo -e "${color_yellow}CONFIGURING MASTER${color_norm}"
|
|
||||||
master-config
|
|
||||||
MASTER_IP=$(grep server "$KUBECONFIG" | awk -F "/" '{print $3}')
|
|
||||||
export MASTER_IP
|
|
||||||
}
|
|
||||||
|
|
||||||
# Set up master cluster environment
|
|
||||||
# Exported variables:
|
|
||||||
# KUBECONFIG - Overrides default kube config for the purpose of setting up the Kubemark master components.
|
|
||||||
function master-config {
|
|
||||||
if [[ "${USE_EXISTING}" = "y" ]]; then
|
|
||||||
export KUBECONFIG=${CUSTOM_MASTER_CONFIG}
|
|
||||||
else
|
|
||||||
eval "$(ibmcloud ks cluster-config kubeMasterTester --admin | grep export)"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Set up spawn cluster environment
|
|
||||||
# Exported variables:
|
|
||||||
# KUBECONFIG - Overrides default kube config for the purpose of setting up the hollow-node cluster.
|
|
||||||
function spawn-config {
|
|
||||||
if [[ "${USE_EXISTING}" = "y" ]]; then
|
|
||||||
export KUBECONFIG=${CUSTOM_SPAWN_CONFIG}
|
|
||||||
else
|
|
||||||
eval "$(ibmcloud ks cluster-config kubeSpawnTester --admin | grep export)"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Deletes existing clusters
|
|
||||||
function delete-clusters {
|
|
||||||
echo "DELETING CLUSTERS"
|
|
||||||
ibmcloud ks cluster-rm kubeSpawnTester
|
|
||||||
ibmcloud ks cluster-rm kubeMasterTester
|
|
||||||
while ! ibmcloud ks clusters | grep 'kubeSpawnTester' | grep -Fq 'deleting'
|
|
||||||
do
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
while ! ibmcloud ks clusters | grep 'kubeMasterTester' | grep -Fq 'deleting'
|
|
||||||
do
|
|
||||||
sleep 5
|
|
||||||
done
|
|
||||||
kubectl delete ns kubemark
|
|
||||||
}
|
|
||||||
|
|
||||||
# Login to cloud services
|
|
||||||
function complete-login {
|
|
||||||
echo -e "${color_yellow}LOGGING INTO CLOUD SERVICES${color_norm}"
|
|
||||||
echo -n -e "Do you have a federated IBM cloud login? [y/N]${color_cyan}>${color_norm} "
|
|
||||||
read -r ISFED
|
|
||||||
if [[ "${ISFED}" = "y" ]]; then
|
|
||||||
ibmcloud login --sso -a "${REGISTRY_LOGIN_URL}"
|
|
||||||
elif [[ "${ISFED}" = "N" ]]; then
|
|
||||||
ibmcloud login -a "${REGISTRY_LOGIN_URL}"
|
|
||||||
else
|
|
||||||
echo -e "${color_red}Invalid response, please try again:${color_norm}"
|
|
||||||
complete-login
|
|
||||||
fi
|
|
||||||
ibmcloud cr login
|
|
||||||
}
|
|
||||||
|
|
||||||
# Generate values to fill the hollow-node configuration templates.
|
|
||||||
# Exported variables:
|
|
||||||
# KUBECTL - The name or path to the kubernetes client binary.
|
|
||||||
function generate-values {
|
|
||||||
echo "Generating values"
|
|
||||||
master-config
|
|
||||||
KUBECTL=kubectl
|
|
||||||
export KUBECTL
|
|
||||||
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
|
|
||||||
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
|
|
||||||
CONFIGPATH=${KUBECONFIG%/*}
|
|
||||||
KUBELET_CERT_BASE64="${KUBELET_CERT_BASE64:-$(base64 "${CONFIGPATH}/admin.pem" | tr -d '\r\n')}"
|
|
||||||
KUBELET_KEY_BASE64="${KUBELET_KEY_BASE64:-$(base64 "${CONFIGPATH}/admin-key.pem" | tr -d '\r\n')}"
|
|
||||||
CA_CERT_BASE64="${CA_CERT_BASE64:-$( base64 "$(find "${CONFIGPATH}" -name "*ca*" | head -n 1)" | tr -d '\r\n')}"
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
# Build image for kubemark
|
|
||||||
function build-kubemark-image {
|
|
||||||
echo -n -e "Do you want to build the kubemark image? [y/N]${color_cyan}>${color_norm} "
|
|
||||||
read -r ISBUILD
|
|
||||||
if [[ "${ISBUILD}" = "y" ]]; then
|
|
||||||
echo -e "${color_yellow}BUILDING IMAGE${color_norm}"
|
|
||||||
"${KUBE_ROOT}/build/run.sh" make kubemark
|
|
||||||
cp "${KUBE_ROOT}/_output/dockerized/bin/linux/amd64/kubemark" "${KUBEMARK_IMAGE_LOCATION}"
|
|
||||||
elif [[ "${ISBUILD}" = "N" ]]; then
|
|
||||||
echo -n ""
|
|
||||||
else
|
|
||||||
echo -e "${color_red}Invalid response, please try again:${color_norm}"
|
|
||||||
build-kubemark-image
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Clean up repository
|
|
||||||
function clean-repo {
|
|
||||||
echo -n -e "Do you want to remove build output and binary? [y/N]${color_cyan}>${color_norm} "
|
|
||||||
read -r ISCLEAN
|
|
||||||
if [[ "${ISCLEAN}" = "y" ]]; then
|
|
||||||
echo -e "${color_yellow}CLEANING REPO${color_norm}"
|
|
||||||
rm -rf "${KUBE_ROOT}/_output"
|
|
||||||
rm -f "${KUBEMARK_IMAGE_LOCATION}/kubemark"
|
|
||||||
elif [[ "${ISCLEAN}" = "N" ]]; then
|
|
||||||
echo -n ""
|
|
||||||
else
|
|
||||||
echo -e "${color_red}Invalid response, please try again:${color_norm}"
|
|
||||||
clean-repo
|
|
||||||
fi
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user