mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Create kubemark cluster as private
This removes setting KUBE_GCE_PRIVATE_CLUSTER=false flag when creating kubemark master. In result, util.sh detect-master function detects both private and public master IPs. The comment about cloud NAT does not apply after https://github.com/kubernetes/kubernetes/pull/81073/files got merged (see comments in the PR discussion). This is first PR to change kubemark clusters to use private master IPs: https://github.com/kubernetes/perf-tests/issues/874. Note that kubemark kubeconfig will still contain public master IP. This will be addresses in the follow-up PRs. Testing: * set up kubemark cluster * verified that both private and public kubemark master IPs are logged * ran tests on kubemark cluster using cluster loader
This commit is contained in:
parent
c8b123e8e1
commit
6f0eec4dff
@ -44,9 +44,6 @@ function create-kubemark-master {
|
||||
export KUBE_CREATE_NODES=false
|
||||
export KUBE_GCE_INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX}-kubemark"
|
||||
|
||||
# Even if the "real cluster" is private, we shouldn't manage cloud nat.
|
||||
export KUBE_GCE_PRIVATE_CLUSTER=false
|
||||
|
||||
# Quite tricky cidr setup: we set KUBE_GCE_ENABLE_IP_ALIASES=true to avoid creating
|
||||
# cloud routes and RangeAllocator to assign cidrs by kube-controller-manager.
|
||||
export KUBE_GCE_ENABLE_IP_ALIASES=true
|
||||
@ -94,8 +91,6 @@ function delete-kubemark-master {
|
||||
export KUBE_GCE_INSTANCE_PREFIX="${KUBE_GCE_INSTANCE_PREFIX}-kubemark"
|
||||
|
||||
export KUBE_DELETE_NETWORK=false
|
||||
# Even if the "real cluster" is private, we shouldn't manage cloud nat.
|
||||
export KUBE_GCE_PRIVATE_CLUSTER=false
|
||||
|
||||
if [[ "${KUBEMARK_HA_MASTER:-}" == "true" && -n "${KUBEMARK_MASTER_ADDITIONAL_ZONES:-}" ]]; then
|
||||
for KUBE_GCE_ZONE in ${KUBEMARK_MASTER_ADDITIONAL_ZONES}; do
|
||||
|
Loading…
Reference in New Issue
Block a user