mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 02:11:09 +00:00
Use private master IP in GCE kubemark tests
Currently hollow nodes communicate with kubemark master using public master IP, which results in each call going through cloud NAT. Cloud NAT limitations become performance bottleneck (see kubernetes/perf-tests/issues/874). To mitigate this, in this change, a second kubeconfig called "internal" is created. It uses private master IP and is used to set up hollow nodes. Note that we still need the original kubemark kubeconfig (using public master IP) to be able to communicate with the master from outside the cluster (when setting it up or running tests). Testing: - set up kubemark cluster, verified apiserver logs to confirm that the call from hollow nodes did not go through NAT
This commit is contained in:
parent
0273d43ae9
commit
f9c4551a94
@ -40,6 +40,7 @@ function create-kubemark-master {
|
||||
export KUBE_TEMP="${KUBE_TEMP}"
|
||||
|
||||
export KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark"
|
||||
export KUBECONFIG_INTERNAL="${RESOURCE_DIRECTORY}/kubeconfig-internal.kubemark"
|
||||
export CLUSTER_NAME="${CLUSTER_NAME}-kubemark"
|
||||
export KUBE_CREATE_NODES=false
|
||||
export KUBE_GCE_INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX}-kubemark"
|
||||
@ -80,6 +81,33 @@ function create-kubemark-master {
|
||||
"${KUBE_ROOT}/hack/e2e-internal/e2e-grow-cluster.sh"
|
||||
done
|
||||
fi
|
||||
|
||||
# The e2e-up.sh script is not sourced, so we don't have access to variables that
|
||||
# it sets. Instead, we read data which was written to the KUBE_TEMP directory.
|
||||
# The cluster-location is either ZONE (say us-east1-a) or REGION (say us-east1).
|
||||
# To get REGION from location, only first two parts are matched.
|
||||
REGION=$(grep -o "^[a-z]*-[a-z0-9]*" "${KUBE_TEMP}"/cluster-location.txt)
|
||||
MASTER_NAME="${KUBE_GCE_INSTANCE_PREFIX}"-master
|
||||
|
||||
MASTER_INTERNAL_IP=$(gcloud compute addresses describe "${MASTER_NAME}-internal-ip" \
|
||||
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
|
||||
MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \
|
||||
--project "${PROJECT}" --region "${REGION}" -q --format='value(address)')
|
||||
|
||||
# If cluster uses private master IP, two kubeconfigs are created:
|
||||
# - kubeconfig with public IP, which will be used to connect to the cluster
|
||||
# from outside of the cluster network
|
||||
# - kubeconfig with private IP (called internal kubeconfig), which will be
|
||||
# used to create hollow nodes.
|
||||
#
|
||||
# Note that hollow nodes might use either of these kubeconfigs, but
|
||||
# using internal one is better from performance and cost perspective, since
|
||||
# traffic does not need to go through Cloud NAT.
|
||||
if [[ -n "${MASTER_INTERNAL_IP:-}" ]]; then
|
||||
echo "Writing internal kubeconfig to '${KUBECONFIG_INTERNAL}'"
|
||||
ip_regexp=${MASTER_IP//./\\.} # escape ".", so that sed won't treat it as "any char"
|
||||
sed "s/${ip_regexp}/${MASTER_INTERNAL_IP}/g" "${KUBECONFIG}" > "${KUBECONFIG_INTERNAL}"
|
||||
fi
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,10 @@ function authenticate-docker {
|
||||
|
||||
# This function should create kubemark master and write kubeconfig to
|
||||
# "${RESOURCE_DIRECTORY}/kubeconfig.kubemark".
|
||||
# If a cluster uses private master IP, create-kubemark-master might also write
|
||||
# a second kubeconfig to "${RESOURCE_DIRECTORY}/kubeconfig-internal.kubemark".
|
||||
# The difference between these two kubeconfigs is that the internal one uses
|
||||
# private master IP, which might be better suited for setting up hollow nodes.
|
||||
function create-kubemark-master {
|
||||
echo "Creating cluster..."
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark"
|
||||
RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources"
|
||||
LOCAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark"
|
||||
INTERNAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig-internal.kubemark"
|
||||
|
||||
# Generate a random 6-digit alphanumeric tag for the kubemark image.
|
||||
# Used to uniquify image builds across different invocations of this script.
|
||||
@ -96,12 +97,12 @@ function create-kube-hollow-node-resources {
|
||||
# It's bad that all component shares the same kubeconfig.
|
||||
# TODO(https://github.com/kubernetes/kubernetes/issues/79883): Migrate all components to separate credentials.
|
||||
"${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \
|
||||
--from-file=kubelet.kubeconfig="${LOCAL_KUBECONFIG}" \
|
||||
--from-file=kubeproxy.kubeconfig="${LOCAL_KUBECONFIG}" \
|
||||
--from-file=npd.kubeconfig="${LOCAL_KUBECONFIG}" \
|
||||
--from-file=heapster.kubeconfig="${LOCAL_KUBECONFIG}" \
|
||||
--from-file=cluster_autoscaler.kubeconfig="${LOCAL_KUBECONFIG}" \
|
||||
--from-file=dns.kubeconfig="${LOCAL_KUBECONFIG}"
|
||||
--from-file=kubelet.kubeconfig="${HOLLOWNODE_KUBECONFIG}" \
|
||||
--from-file=kubeproxy.kubeconfig="${HOLLOWNODE_KUBECONFIG}" \
|
||||
--from-file=npd.kubeconfig="${HOLLOWNODE_KUBECONFIG}" \
|
||||
--from-file=heapster.kubeconfig="${HOLLOWNODE_KUBECONFIG}" \
|
||||
--from-file=cluster_autoscaler.kubeconfig="${HOLLOWNODE_KUBECONFIG}" \
|
||||
--from-file=dns.kubeconfig="${HOLLOWNODE_KUBECONFIG}"
|
||||
|
||||
# Create addon pods.
|
||||
# Heapster.
|
||||
@ -227,7 +228,13 @@ function start-hollow-nodes {
|
||||
detect-project &> /dev/null
|
||||
create-kubemark-master
|
||||
|
||||
MASTER_IP=$(grep server "$LOCAL_KUBECONFIG" | awk -F "/" '{print $3}')
|
||||
if [ -f "${INTERNAL_KUBECONFIG}" ]; then
|
||||
HOLLOWNODE_KUBECONFIG="${INTERNAL_KUBECONFIG}"
|
||||
else
|
||||
HOLLOWNODE_KUBECONFIG="${LOCAL_KUBECONFIG}"
|
||||
fi
|
||||
|
||||
MASTER_IP=$(grep server "${HOLLOWNODE_KUBECONFIG}" | awk -F "/" '{print $3}')
|
||||
|
||||
start-hollow-nodes
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user