From aa09cb097b2bb7d76a0d2a023eff335accebb5cd Mon Sep 17 00:00:00 2001 From: gmarek Date: Wed, 16 Mar 2016 11:18:04 +0100 Subject: [PATCH] Change scalability test configs --- .../kubernetes-e2e-gce-enormous-startup.yaml | 56 ------------------- .../kubernetes-jenkins/kubernetes-e2e.yaml | 11 +++- .../kubernetes-kubemark.yaml | 6 +- 3 files changed, 12 insertions(+), 61 deletions(-) delete mode 100644 hack/jenkins/job-configs/kubernetes-jenkins/kubernetes-e2e-gce-enormous-startup.yaml diff --git a/hack/jenkins/job-configs/kubernetes-jenkins/kubernetes-e2e-gce-enormous-startup.yaml b/hack/jenkins/job-configs/kubernetes-jenkins/kubernetes-e2e-gce-enormous-startup.yaml deleted file mode 100644 index a4b6fca64a4..00000000000 --- a/hack/jenkins/job-configs/kubernetes-jenkins/kubernetes-e2e-gce-enormous-startup.yaml +++ /dev/null @@ -1,56 +0,0 @@ -# FIXME: desnowflake this -- job-template: - name: 'kubernetes-e2e-gce-enormous-startup' - description: 'Starts and deletes empty 1000 node cluster. Does not allow Node failures. Test owner: gmarek' - logrotate: - daysToKeep: 7 - builders: - - shell: | - {gce-provider-env} - # XXX Not a unique project - # TODO: increase a quota for networks in kubernetes-scale and move this test to its own network - export E2E_NAME="e2e-enormous-startup" - export E2E_TEST="false" - export PROJECT="kubernetes-scale" - export FAIL_ON_GCP_RESOURCE_LEAK="false" - # Override GCE defaults. - export NETWORK_PROVIDER="flannel" - # Temporarily switch of Heapster, as this will not schedule anywhere. - # TODO: Think of a solution to enable it. - export KUBE_ENABLE_CLUSTER_MONITORING="none" - export KUBE_GCE_ZONE="asia-east1-a" - export MASTER_SIZE="n1-standard-32" - export NODE_SIZE="n1-standard-1" - export NODE_DISK_SIZE="50GB" - export NUM_NODES="1000" - # Reduce logs verbosity - export TEST_CLUSTER_LOG_LEVEL="--v=1" - # Increase resync period to simulate production - export TEST_CLUSTER_RESYNC_PERIOD="--min-resync-period=12h" - {post-env} - export KUBE_GCE_NETWORK="e2e-enormous-cluster" - timeout -k {kill-timeout}m 480m {runner} && rc=$? || rc=$? - {report-rc} - properties: - - mail-watcher - publishers: - - claim-build - - gcs-uploader - - log-parser - - email-ext: - recipients: "gmarek@google.com" - triggers: - - timed: 'H 20 * * *' - wrappers: - - ansicolor: - colormap: xterm - - timeout: - timeout: '{jenkins-timeout}' - fail: true - - timestamps - - workspace-cleanup - -- project: - name: 'kubernetes-e2e-gce-enormous-startup' - jobs: - - 'kubernetes-e2e-gce-enormous-startup' diff --git a/hack/jenkins/job-configs/kubernetes-jenkins/kubernetes-e2e.yaml b/hack/jenkins/job-configs/kubernetes-jenkins/kubernetes-e2e.yaml index dc42475dbb0..0a8389e3202 100644 --- a/hack/jenkins/job-configs/kubernetes-jenkins/kubernetes-e2e.yaml +++ b/hack/jenkins/job-configs/kubernetes-jenkins/kubernetes-e2e.yaml @@ -111,6 +111,8 @@ - 'gce-scalability': description: 'Run the performance/scalability tests on GCE. A larger cluster is used.' timeout: 120 + # TODO: Run this twice a day after we make kubemark-500 a blocking suite. + # cron-string: 'H H/12 * * *' job-env: | export E2E_NAME="e2e-scalability" export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Performance\] \ @@ -118,6 +120,7 @@ --gather-metrics-at-teardown=true \ --gather-logs-sizes=true \ --output-print-type=json" + # Create a project k8s-jenkins-scalability-head and move this test there export PROJECT="google.com:k8s-jenkins-scalability" export FAIL_ON_GCP_RESOURCE_LEAK="false" # Override GCE defaults. @@ -256,6 +259,8 @@ - 'gce-scalability-release-1.2': timeout: 120 description: 'Run scalability E2E tests on GCE from the release-1.2 branch.' + # Run on Saturday 8 am + cron-string: 'H 8 * * 6' job-env: | export E2E_NAME="e2e-scalability-1-2" export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:Performance\] \ @@ -264,12 +269,13 @@ --gather-logs-sizes=true \ --output-print-type=json" # Use the 1.1 project for now, since it has quota. + # TODO: create a project k8s-e2e-gce-scalability-release and move this test there export PROJECT="k8s-e2e-gce-scalability-1-1" export FAIL_ON_GCP_RESOURCE_LEAK="false" # Override GCE defaults. export KUBE_GCE_ZONE="us-east1-b" export MASTER_SIZE="n1-standard-4" - export NODE_SIZE="n1-standard-2" + export NODE_SIZE="n1-standard-1" export NODE_DISK_SIZE="50GB" export NUM_NODES="100" export REGISTER_MASTER="true" @@ -518,7 +524,8 @@ name: kubernetes-e2e-gce-enormous-cluster test-owner: 'gmarek' emails: 'gmarek@google.com' - cron-string: 'H 8 * * *' + # Run only on Sunday and Wednesday at 8 am MTV time. + cron-string: 'H 8 * * 0,4' trigger-job: '' description: 'Starts and deletes empty 1000 node cluster and runs Density 30 test on it. Does allow few Nodes to fail during startup.' timeout: 480 diff --git a/hack/jenkins/job-configs/kubernetes-jenkins/kubernetes-kubemark.yaml b/hack/jenkins/job-configs/kubernetes-jenkins/kubernetes-kubemark.yaml index 6e6de6f4a34..bfdc937d31d 100644 --- a/hack/jenkins/job-configs/kubernetes-jenkins/kubernetes-kubemark.yaml +++ b/hack/jenkins/job-configs/kubernetes-jenkins/kubernetes-kubemark.yaml @@ -73,7 +73,7 @@ - 'high-density-100-gce': description: 'Run Kubemark high-density (100 pods/node) test on a fake 100 node cluster.' timeout: 160 - cron-string: '@hourly' + cron-string: 'H 20 * * 6' job-env: | export E2E_NAME="kubemark-100pods" export PROJECT="k8s-jenkins-kubemark" @@ -107,10 +107,10 @@ export KUBEMARK_MASTER_SIZE="n1-standard-16" export KUBEMARK_NUM_NODES="500" - 'gce-scale': - description: 'Run Density test on Kubemark in very large cluster. Currently only scheduled to run every 6 hours so as not to waste too many resources.' + description: 'Run Density test on Kubemark in very large cluster. Currently only scheduled to run every 12 hours so as not to waste too many resources.' # 12h - load tests take really, really, really long time. timeout: 720 - cron-string: 'H H/8 * * *' + cron-string: 'H H/12 * * *' job-env: | # XXX Not a unique project export E2E_NAME="kubemark-1000"