switch to e2 machine types

This commit is contained in:
upodroid 2023-06-12 22:51:47 +01:00 committed by upodroid
parent 3631efd85c
commit e2227a24fb
6 changed files with 17 additions and 27 deletions

View File

@ -27,10 +27,7 @@ function get-num-nodes {
# NUM_NODES # NUM_NODES
# NUM_WINDOWS_NODES # NUM_WINDOWS_NODES
function get-master-size { function get-master-size {
local suggested_master_size=1 local suggested_master_size=2
if [[ "$(get-num-nodes)" -gt "5" ]]; then
suggested_master_size=2
fi
if [[ "$(get-num-nodes)" -gt "10" ]]; then if [[ "$(get-num-nodes)" -gt "10" ]]; then
suggested_master_size=4 suggested_master_size=4
fi fi
@ -43,9 +40,6 @@ function get-master-size {
if [[ "$(get-num-nodes)" -gt "500" ]]; then if [[ "$(get-num-nodes)" -gt "500" ]]; then
suggested_master_size=32 suggested_master_size=32
fi fi
if [[ "$(get-num-nodes)" -gt "2000" ]]; then
suggested_master_size=64
fi
echo "${suggested_master_size}" echo "${suggested_master_size}"
} }

View File

@ -27,12 +27,10 @@ ZONE=${KUBE_GCE_ZONE:-us-central1-b}
export REGION=${ZONE%-*} export REGION=${ZONE%-*}
RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false} RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false}
REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true} REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true}
# TODO: Migrate to e2-standard machine family. NODE_SIZE=${NODE_SIZE:-e2-standard-2}
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_NODES=${NUM_NODES:-3} NUM_NODES=${NUM_NODES:-3}
NUM_WINDOWS_NODES=${NUM_WINDOWS_NODES:-0} NUM_WINDOWS_NODES=${NUM_WINDOWS_NODES:-0}
# TODO: Migrate to e2-standard machine family. MASTER_SIZE=${MASTER_SIZE:-e2-standard-$(get-master-size)}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)}
MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures. MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures.
export MASTER_DISK_TYPE=pd-ssd export MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)} MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)}

View File

@ -26,12 +26,10 @@ ZONE=${KUBE_GCE_ZONE:-us-central1-b}
export REGION=${ZONE%-*} export REGION=${ZONE%-*}
RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false} RELEASE_REGION_FALLBACK=${RELEASE_REGION_FALLBACK:-false}
REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true} REGIONAL_KUBE_ADDONS=${REGIONAL_KUBE_ADDONS:-true}
# TODO: Migrate to e2-standard machine family. NODE_SIZE=${NODE_SIZE:-e2-standard-2}
NODE_SIZE=${NODE_SIZE:-n1-standard-2}
NUM_NODES=${NUM_NODES:-3} NUM_NODES=${NUM_NODES:-3}
NUM_WINDOWS_NODES=${NUM_WINDOWS_NODES:-0} NUM_WINDOWS_NODES=${NUM_WINDOWS_NODES:-0}
# TODO: Migrate to e2-standard machine family. MASTER_SIZE=${MASTER_SIZE:-e2-standard-$(get-master-size)}
MASTER_SIZE=${MASTER_SIZE:-n1-standard-$(get-master-size)}
MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures. MASTER_MIN_CPU_ARCHITECTURE=${MASTER_MIN_CPU_ARCHITECTURE:-} # To allow choosing better architectures.
export MASTER_DISK_TYPE=pd-ssd export MASTER_DISK_TYPE=pd-ssd
MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)} MASTER_DISK_SIZE=${MASTER_DISK_SIZE:-$(get-master-disk-size)}

View File

@ -33,7 +33,7 @@ skip=${SKIP-"\[Flaky\]|\[Slow\]|\[Serial\]"}
# The number of tests that can run in parallel depends on what tests # The number of tests that can run in parallel depends on what tests
# are running and on the size of the node. Too many, and tests will # are running and on the size of the node. Too many, and tests will
# fail due to resource contention. 8 is a reasonable default for a # fail due to resource contention. 8 is a reasonable default for a
# n1-standard-1 node. # e2-standard-2 node.
# Currently, parallelism only affects when REMOTE=true. For local test, # Currently, parallelism only affects when REMOTE=true. For local test,
# ginkgo default parallelism (cores - 1) is used. # ginkgo default parallelism (cores - 1) is used.
parallelism=${PARALLELISM:-8} parallelism=${PARALLELISM:-8}

View File

@ -375,9 +375,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { ginkgo.It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gke") e2eskipper.SkipUnlessProviderIs("gke")
ginkgo.By("Creating new node-pool with n1-standard-4 machines") ginkgo.By("Creating new node-pool with e2-standard-4 machines")
const extraPoolName = "extra-pool" const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1) addNodePool(extraPoolName, "e2-standard-4", 1)
defer deleteNodePool(extraPoolName) defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName) extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout))
@ -409,9 +409,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { ginkgo.It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gke") e2eskipper.SkipUnlessProviderIs("gke")
ginkgo.By("Creating new node-pool with n1-standard-4 machines") ginkgo.By("Creating new node-pool with e2-standard-4 machines")
const extraPoolName = "extra-pool" const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1) addNodePool(extraPoolName, "e2-standard-4", 1)
defer deleteNodePool(extraPoolName) defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName) extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout))
@ -641,9 +641,9 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) { ginkgo.It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gke") e2eskipper.SkipUnlessProviderIs("gke")
ginkgo.By("Creating new node-pool with n1-standard-4 machines") ginkgo.By("Creating new node-pool with e2-standard-4 machines")
const extraPoolName = "extra-pool" const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1) addNodePool(extraPoolName, "e2-standard-4", 1)
defer deleteNodePool(extraPoolName) defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName) extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout))
@ -697,7 +697,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
increasedSize := manuallyIncreaseClusterSize(ctx, f, originalSizes) increasedSize := manuallyIncreaseClusterSize(ctx, f, originalSizes)
const extraPoolName = "extra-pool" const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-1", 3) addNodePool(extraPoolName, "e2-standard-2", 3)
defer deleteNodePool(extraPoolName) defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName) extraNodes := getPoolInitialSize(extraPoolName)
@ -753,7 +753,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
// GKE-specific setup // GKE-specific setup
ginkgo.By("Add a new node pool with 0 nodes and min size 0") ginkgo.By("Add a new node pool with 0 nodes and min size 0")
const extraPoolName = "extra-pool" const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 0) addNodePool(extraPoolName, "e2-standard-4", 0)
defer deleteNodePool(extraPoolName) defer deleteNodePool(extraPoolName)
framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1)) framework.ExpectNoError(enableAutoscaler(extraPoolName, 0, 1))
defer disableAutoscaler(extraPoolName, 0, 1) defer disableAutoscaler(extraPoolName, 0, 1)
@ -813,7 +813,7 @@ var _ = SIGDescribe("Cluster size autoscaling [Slow]", func() {
// GKE-specific setup // GKE-specific setup
ginkgo.By("Add a new node pool with size 1 and min size 0") ginkgo.By("Add a new node pool with size 1 and min size 0")
const extraPoolName = "extra-pool" const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1) addNodePool(extraPoolName, "e2-standard-4", 1)
defer deleteNodePool(extraPoolName) defer deleteNodePool(extraPoolName)
extraNodes := getPoolInitialSize(extraPoolName) extraNodes := getPoolInitialSize(extraPoolName)
framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout)) framework.ExpectNoError(e2enode.WaitForReadyNodes(ctx, c, nodeCount+extraNodes, resizeTimeout))

View File

@ -85,8 +85,8 @@ func init() {
} }
const ( const (
defaultGCEMachine = "n1-standard-1" defaultGCEMachine = "e2-standard-2"
acceleratorTypeResourceFormat = "https://www.googleapis.com/compute/v1/projects/%s/zones/%s/acceleratorTypes/%s" acceleratorTypeResourceFormat = "https://www.googleapis.com/compute/beta/projects/%s/zones/%s/acceleratorTypes/%s"
) )
type GCERunner struct { type GCERunner struct {