From f9c4551a947fa0ee8dcd1b217e30b117b9f4e6e9 Mon Sep 17 00:00:00 2001 From: Jakub Pierewoj Date: Mon, 16 Dec 2019 18:34:18 +0100 Subject: [PATCH] Use private master IP in GCE kubemark tests Currently hollow nodes communicate with kubemark master using public master IP, which results in each call going through cloud NAT. Cloud NAT limitations become performance bottleneck (see kubernetes/perf-tests/issues/874). To mitigate this, in this change, a second kubeconfig called "internal" is created. It uses private master IP and is used to set up hollow nodes. Note that we still need the original kubemark kubeconfig (using public master IP) to be able to communicate with the master from outside the cluster (when setting it up or running tests). Testing: - set up kubemark cluster, verified apiserver logs to confirm that the call from hollow nodes did not go through NAT --- test/kubemark/gce/util.sh | 28 ++++++++++++++++++++++++++++ test/kubemark/skeleton/util.sh | 4 ++++ test/kubemark/start-kubemark.sh | 21 ++++++++++++++------- 3 files changed, 46 insertions(+), 7 deletions(-) diff --git a/test/kubemark/gce/util.sh b/test/kubemark/gce/util.sh index f62f523b0f7..76cb7439da4 100644 --- a/test/kubemark/gce/util.sh +++ b/test/kubemark/gce/util.sh @@ -40,6 +40,7 @@ function create-kubemark-master { export KUBE_TEMP="${KUBE_TEMP}" export KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark" + export KUBECONFIG_INTERNAL="${RESOURCE_DIRECTORY}/kubeconfig-internal.kubemark" export CLUSTER_NAME="${CLUSTER_NAME}-kubemark" export KUBE_CREATE_NODES=false export KUBE_GCE_INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX}-kubemark" @@ -80,6 +81,33 @@ function create-kubemark-master { "${KUBE_ROOT}/hack/e2e-internal/e2e-grow-cluster.sh" done fi + + # The e2e-up.sh script is not sourced, so we don't have access to variables that + # it sets. Instead, we read data which was written to the KUBE_TEMP directory. + # The cluster-location is either ZONE (say us-east1-a) or REGION (say us-east1). + # To get REGION from location, only first two parts are matched. + REGION=$(grep -o "^[a-z]*-[a-z0-9]*" "${KUBE_TEMP}"/cluster-location.txt) + MASTER_NAME="${KUBE_GCE_INSTANCE_PREFIX}"-master + + MASTER_INTERNAL_IP=$(gcloud compute addresses describe "${MASTER_NAME}-internal-ip" \ + --project "${PROJECT}" --region "${REGION}" -q --format='value(address)') + MASTER_IP=$(gcloud compute addresses describe "${MASTER_NAME}-ip" \ + --project "${PROJECT}" --region "${REGION}" -q --format='value(address)') + + # If cluster uses private master IP, two kubeconfigs are created: + # - kubeconfig with public IP, which will be used to connect to the cluster + # from outside of the cluster network + # - kubeconfig with private IP (called internal kubeconfig), which will be + # used to create hollow nodes. + # + # Note that hollow nodes might use either of these kubeconfigs, but + # using internal one is better from performance and cost perspective, since + # traffic does not need to go through Cloud NAT. + if [[ -n "${MASTER_INTERNAL_IP:-}" ]]; then + echo "Writing internal kubeconfig to '${KUBECONFIG_INTERNAL}'" + ip_regexp=${MASTER_IP//./\\.} # escape ".", so that sed won't treat it as "any char" + sed "s/${ip_regexp}/${MASTER_INTERNAL_IP}/g" "${KUBECONFIG}" > "${KUBECONFIG_INTERNAL}" + fi ) } diff --git a/test/kubemark/skeleton/util.sh b/test/kubemark/skeleton/util.sh index 430743bb886..6e8f4e21f2b 100644 --- a/test/kubemark/skeleton/util.sh +++ b/test/kubemark/skeleton/util.sh @@ -26,6 +26,10 @@ function authenticate-docker { # This function should create kubemark master and write kubeconfig to # "${RESOURCE_DIRECTORY}/kubeconfig.kubemark". +# If a cluster uses private master IP, create-kubemark-master might also write +# a second kubeconfig to "${RESOURCE_DIRECTORY}/kubeconfig-internal.kubemark". +# The difference between these two kubeconfigs is that the internal one uses +# private master IP, which might be better suited for setting up hollow nodes. function create-kubemark-master { echo "Creating cluster..." } diff --git a/test/kubemark/start-kubemark.sh b/test/kubemark/start-kubemark.sh index a6bf8191d89..87bbbddb89a 100755 --- a/test/kubemark/start-kubemark.sh +++ b/test/kubemark/start-kubemark.sh @@ -38,6 +38,7 @@ KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh" KUBEMARK_DIRECTORY="${KUBE_ROOT}/test/kubemark" RESOURCE_DIRECTORY="${KUBEMARK_DIRECTORY}/resources" LOCAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig.kubemark" +INTERNAL_KUBECONFIG="${RESOURCE_DIRECTORY}/kubeconfig-internal.kubemark" # Generate a random 6-digit alphanumeric tag for the kubemark image. # Used to uniquify image builds across different invocations of this script. @@ -96,12 +97,12 @@ function create-kube-hollow-node-resources { # It's bad that all component shares the same kubeconfig. # TODO(https://github.com/kubernetes/kubernetes/issues/79883): Migrate all components to separate credentials. "${KUBECTL}" create secret generic "kubeconfig" --type=Opaque --namespace="kubemark" \ - --from-file=kubelet.kubeconfig="${LOCAL_KUBECONFIG}" \ - --from-file=kubeproxy.kubeconfig="${LOCAL_KUBECONFIG}" \ - --from-file=npd.kubeconfig="${LOCAL_KUBECONFIG}" \ - --from-file=heapster.kubeconfig="${LOCAL_KUBECONFIG}" \ - --from-file=cluster_autoscaler.kubeconfig="${LOCAL_KUBECONFIG}" \ - --from-file=dns.kubeconfig="${LOCAL_KUBECONFIG}" + --from-file=kubelet.kubeconfig="${HOLLOWNODE_KUBECONFIG}" \ + --from-file=kubeproxy.kubeconfig="${HOLLOWNODE_KUBECONFIG}" \ + --from-file=npd.kubeconfig="${HOLLOWNODE_KUBECONFIG}" \ + --from-file=heapster.kubeconfig="${HOLLOWNODE_KUBECONFIG}" \ + --from-file=cluster_autoscaler.kubeconfig="${HOLLOWNODE_KUBECONFIG}" \ + --from-file=dns.kubeconfig="${HOLLOWNODE_KUBECONFIG}" # Create addon pods. # Heapster. @@ -227,7 +228,13 @@ function start-hollow-nodes { detect-project &> /dev/null create-kubemark-master -MASTER_IP=$(grep server "$LOCAL_KUBECONFIG" | awk -F "/" '{print $3}') +if [ -f "${INTERNAL_KUBECONFIG}" ]; then + HOLLOWNODE_KUBECONFIG="${INTERNAL_KUBECONFIG}" +else + HOLLOWNODE_KUBECONFIG="${LOCAL_KUBECONFIG}" +fi + +MASTER_IP=$(grep server "${HOLLOWNODE_KUBECONFIG}" | awk -F "/" '{print $3}') start-hollow-nodes