Merge pull request #24949 from spxtr/remove-jjb

Remove JJB scripts and configs.
This commit is contained in:
Jeff Grafton 2016-05-02 15:04:18 -07:00
commit 3a4f179c75
19 changed files with 5 additions and 2492 deletions

View File

@ -82,9 +82,11 @@ or not the job is healthy.
New jobs should be specified as YAML files to be processed by [Jenkins Job
Builder](http://docs.openstack.org/infra/jenkins-job-builder/). The YAML files
live in `job-configs` and its subfolders. Jenkins runs Jenkins Job Builder
in a Docker container defined in `job-builder-image`, and triggers it using
`update-jobs.sh`. Jenkins Job Builder uses a config file called
live in `jenkins/job-configs` and its subfolders **in the
[kubernetes/test-infra repository](https://github.com/kubernetes/test-infra)**.
Jenkins runs Jenkins Job Builder in a Docker container defined in
`job-builder-image`, and triggers it using `update-jobs.sh`. Jenkins Job Builder
uses a config file called
[jenkins_jobs.ini](http://docs.openstack.org/infra/jenkins-job-builder/execution.html)
which contains the location and credentials of the Jenkins server.

View File

@ -1,75 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Uses the kubekins-job-builder Docker image to compute the differences in
# the Jenkins job config XML, comparing the current git branch against upstream
# master. The filename containing the diff is printed at the end, assuming
# everything parsed successfully.
# Note: anecdotal evidence suggests this doesn't work correctly on OS X.
# If you find that there is no diff being generated, you may want to try setting
# OUTPUT_DIR to some directory in your home directory.
# When running this script from inside Docker, you must set REPO_ROOT to point
# to the path to the repository on the host, and DOCKER_VOLUME_OUTPUT_DIR must
# point to the path of $OUTPUT_DIR on the host. This is due to the way volume
# mounts work in Docker-in-Docker.
set -o errexit
set -o nounset
set -o pipefail
readonly JOB_CONFIGS_ROOT="hack/jenkins/job-configs"
readonly JOB_BUILDER_IMAGE='gcr.io/google_containers/kubekins-job-builder:1'
KUBE_ROOT=$(cd $(dirname "${BASH_SOURCE}")/../.. && pwd)
REPO_DIR=${REPO_DIR:-"${KUBE_ROOT}"}
readonly output_dir=${OUTPUT_DIR:=$(mktemp -d -t JJB-XXXXX)}
readonly docker_volume_output_dir=${DOCKER_VOLUME_OUTPUT_DIR:="${output_dir}"}
readonly diff_file="${output_dir}/diff.txt"
mkdir -p "${output_dir}/upstream" "${output_dir}/patch"
echo "Saving output in ${output_dir}"
readonly common_commands="\
git describe --long --tags --abbrev=14 >/output/gitversion.txt && \
git rev-parse --abbrev-ref HEAD >/output/gitbranch.txt && \
jenkins-jobs test \
'${JOB_CONFIGS_ROOT}:${JOB_CONFIGS_ROOT}/kubernetes-jenkins' \
-o /output/kubernetes-jenkins && \
jenkins-jobs test \
'${JOB_CONFIGS_ROOT}:${JOB_CONFIGS_ROOT}/kubernetes-jenkins-pull' \
-o /output/kubernetes-jenkins-pull"
docker run --rm=true -i \
-v "${docker_volume_output_dir}/upstream:/output" \
"${JOB_BUILDER_IMAGE}" \
bash -c "git checkout master && git pull && ${common_commands}"
docker run --rm=true -i \
-v "${docker_volume_output_dir}/patch:/output" \
-v "${REPO_DIR}:/kubernetes:ro" \
"${JOB_BUILDER_IMAGE}" \
bash -c "${common_commands}"
diff -ruN "${output_dir}/upstream" "${output_dir}/patch" >"${diff_file}" || true
if [[ -t 1 ]]; then # Attached to a terminal?
less "${diff_file}"
fi
echo
echo " *** Diff saved in ${diff_file} ***"

View File

@ -1,47 +0,0 @@
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This docker image runs Jenkins Job Builder (JJB) for automatic job reconciliation.
FROM ubuntu:14.04
MAINTAINER Joe Finney <spxtr@google.com>
RUN mkdir /build
WORKDIR /build
# Dependencies for JJB
RUN apt-get update && apt-get install -y \
wget \
git \
python-dev \
python-pip \
libyaml-dev \
python-yaml
RUN pip install PyYAML python-jenkins
# Required since JJB supports python 2.6, which doesn't have ordereddict built-in. We have 2.7.
RUN wget https://pypi.python.org/packages/source/o/ordereddict/ordereddict-1.1.tar.gz \
&& tar -xvf ordereddict-1.1.tar.gz \
&& cd ordereddict-1.1 \
&& python setup.py install
RUN git clone https://git.openstack.org/openstack-infra/jenkins-job-builder \
&& cd jenkins-job-builder \
&& python setup.py install
# JJB configuration lives in /etc/jenkins_jobs/jenkins_jobs.ini
RUN mkdir -p /etc/jenkins_jobs
WORKDIR /
RUN git clone https://github.com/kubernetes/kubernetes.git
WORKDIR kubernetes

View File

@ -1,19 +0,0 @@
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TAG = 1
all:
docker build -t gcr.io/google_containers/kubekins-job-builder:$(TAG) .
gcloud docker push gcr.io/google_containers/kubekins-job-builder:$(TAG)

View File

@ -1,197 +0,0 @@
# Mail Watcher Plugin alerts the specified address whenever a job config is updated or deleted.
- property:
name: mail-watcher
properties:
- raw:
xml: |
<org.jenkinsci.plugins.mailwatcher.WatcherJobProperty plugin="mail-watcher-plugin@1.13">
<watcherAddresses>cloud-kubernetes-team@google.com</watcherAddresses>
</org.jenkinsci.plugins.mailwatcher.WatcherJobProperty>
- publisher:
name: gcs-uploader
publishers:
- postbuildscript:
builders:
- shell: |
mkdir -p _tmp
curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/upload-to-gcs.sh" > ./_tmp/upload-to-gcs.sh
chmod +x ./_tmp/upload-to-gcs.sh
- conditional-step:
condition-kind: current-status
condition-worst: SUCCESS
condition-best: SUCCESS
steps:
- shell: 'JENKINS_BUILD_FINISHED=SUCCESS ./_tmp/upload-to-gcs.sh'
- conditional-step:
condition-kind: current-status
condition-worst: UNSTABLE
condition-best: UNSTABLE
steps:
- shell: 'JENKINS_BUILD_FINISHED=UNSTABLE ./_tmp/upload-to-gcs.sh'
- conditional-step:
condition-kind: current-status
condition-worst: FAILURE
condition-best: FAILURE
steps:
- shell: 'JENKINS_BUILD_FINISHED=FAILURE ./_tmp/upload-to-gcs.sh'
- conditional-step:
condition-kind: current-status
condition-worst: ABORTED
condition-best: ABORTED
steps:
- shell: 'JENKINS_BUILD_FINISHED=ABORTED ./_tmp/upload-to-gcs.sh'
script-only-if-succeeded: False
script-only-if-failed: False
# Use the plugin for the build log, since it isn't available on Jenkins slaves.
- google-cloud-storage:
credentials-id: kubernetes-jenkins
uploads:
- build-log:
log-name: build-log.txt
storage-location: gs://kubernetes-jenkins/logs/$JOB_NAME/$BUILD_NUMBER
share-publicly: true
upload-for-failed-jobs: true
# Default log parser rules.
- publisher:
name: log-parser
publishers:
- logparser:
parse-rules: /jenkins-master-data/log_parser_rules.txt
unstable-on-warning: false
fail-on-error: false
# There is a junit JJB tag, but it doesn't handle the flaky-test-handler plugin.
- publisher:
name: junit-publisher
publishers:
- raw:
xml: |
<hudson.tasks.junit.JUnitResultArchiver plugin="junit@1.9">
<testResults>_artifacts/junit*.xml</testResults>
<keepLongStdio>true</keepLongStdio>
<testDataPublishers>
<de.esailors.jenkins.teststability.StabilityTestDataPublisher plugin="test-stability@1.0"/>
<com.google.jenkins.flakyTestHandler.plugin.JUnitFlakyTestDataPublisher plugin="flaky-test-handler@1.0.1"/>
<hudson.plugins.claim.ClaimTestDataPublisher plugin="claim@2.7"/>
</testDataPublishers>
<healthScaleFactor>100.0</healthScaleFactor>
</hudson.tasks.junit.JUnitResultArchiver>
# Implements Docker Build and Publish Plugin
# https://wiki.jenkins-ci.org/display/JENKINS/CloudBees+Docker+Build+and+Publish+plugin
# From the docs: 'This plugin provides the ability to build projects with a Dockerfile, and publish the resultant
# tagged image (repo) to the docker registry.'
#
# repoName: docker registry repo name to publish the image
# dockerfilePath: workspace relative path to the 'Dockerfile' to build
# tag: Tag to apply to the built image
# credentialsId: Jenkins 'Username with password' credentials ID (hex string) to authenticate to docker registry.
# Found under http:<jenkinsurl>/credentials -> "Advanced"
- builder:
name: docker-build-publish
builders:
- raw:
xml: |
<com.cloudbees.dockerpublish.DockerBuilder plugin="docker-build-publish@1.1">
<server plugin="docker-commons@1.2"/>
<registry plugin="docker-commons@1.2">
<credentialsId>{credentialsId}</credentialsId>
</registry>
<repoName>{repoName}</repoName>
<noCache>true</noCache>
<forcePull>true</forcePull>
<dockerfilePath>{dockerfilePath}</dockerfilePath>
<skipBuild>false</skipBuild>
<skipDecorate>false</skipDecorate>
<repoTag>{tag}</repoTag>
<skipPush>false</skipPush>
<createFingerprint>true</createFingerprint>
<skipTagLatest>true</skipTagLatest>
</com.cloudbees.dockerpublish.DockerBuilder>
- shell: |
#!/bin/bash
set -e
set -x
docker rmi -f {repoName}:{tag}
# Default email recipients are set in Jenkins global config
- defaults:
name: global
disable_job: false
emails: '$DEFAULT_RECIPIENTS'
cron-string: 'H/30 * * * *' # Set a 30m floor to start jobs.
sq-cron-string: 'H/5 * * * *' # Lower floor to 5m for blocking jobs.
# How long to wait after sending TERM to send KILL (minutes)
kill-timeout: 15
# Just to be safe, use the Jenkins timeout after a long time.
jenkins-timeout: 600
# report-rc assumes that $rc is set to the exit status of the runner.
report-rc: |
if [[ ${{rc}} -eq 124 || ${{rc}} -eq 137 ]]; then
echo "Build timed out" >&2
elif [[ ${{rc}} -ne 0 ]]; then
echo "Build failed" >&2
fi
echo "Exiting with code: ${{rc}}"
exit ${{rc}}
branch: 'master'
job-env: ''
runner: bash <(curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/dockerized-e2e-runner.sh")
legacy-runner: bash <(curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh")
old-runner-1-1: bash <(curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.1/hack/jenkins/e2e.sh")
# XXX This is a hack to run only the tests we care about, without importing all of the skip list vars from the v1.1 e2e.sh.
default-skip-list-1-1: Autoscaling\sSuite|resource\susage\stracking|Nodes|Etcd\sFailure|MasterCerts|experimental\sresource\susage\stracking|ServiceLoadBalancer|Shell|Daemon\sset|Deployment|Skipped|Restart\sshould\srestart\sall\snodes|Example|Reboot|ServiceLoadBalancer|DaemonRestart\sController\sManager|Daemon\sset\sshould\srun\sand\sstop\scomplex\sdaemon|Resource\susage\sof\ssystem\scontainers|allows\sscheduling\sof\spods\son\sa\sminion\safter\sit\srejoins\sthe\scluster
old-runner-1-0: bash <(curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.0/hack/jenkins/e2e.sh")
# XXX This is a hack to run only the tests we care about, without importing all of the skip list vars from the v1.0 e2e.sh.
default-skip-list-1-0: Skipped|Restart\sshould\srestart\sall\snodes|Example|Reboot|ServiceLoadBalancer|DaemonRestart\sController\sManager|Daemon\sset\sshould\srun\sand\sstop\scomplex\sdaemon|Resource\susage\sof\ssystem\scontainers|allows\sscheduling\sof\spods\son\sa\sminion\safter\sit\srejoins\sthe\scluster
provider-env: ''
gce-provider-env: |
export KUBERNETES_PROVIDER="gce"
export E2E_MIN_STARTUP_PODS="1"
export KUBE_GCE_ZONE="us-central1-f"
export FAIL_ON_GCP_RESOURCE_LEAK="true"
export CLOUDSDK_CORE_PRINT_UNHANDLED_TRACEBACKS="1"
gke-provider-env: |
export KUBERNETES_PROVIDER="gke"
export ZONE="us-central1-f"
# By default, GKE tests run against the GKE test endpoint using CI Cloud SDK.
# Release jobs (e.g. prod, staging, and test) override these two variables.
export CLOUDSDK_BUCKET="gs://cloud-sdk-testing/ci/staging"
export CLOUDSDK_API_ENDPOINT_OVERRIDES_CONTAINER="https://test-container.sandbox.googleapis.com/"
export FAIL_ON_GCP_RESOURCE_LEAK="true"
aws-provider-env: |
export KUBERNETES_PROVIDER="aws"
export E2E_MIN_STARTUP_PODS="1"
export KUBE_AWS_ZONE="us-west-2a"
export MASTER_SIZE="m3.medium"
export NODE_SIZE="m3.medium"
export NUM_NODES="3"
post-env: |
# Nothing should want Jenkins $HOME
export HOME=${{WORKSPACE}}
# Assume we're upping, testing, and downing a cluster
export E2E_UP="${{E2E_UP:-true}}"
export E2E_TEST="${{E2E_TEST:-true}}"
export E2E_DOWN="${{E2E_DOWN:-true}}"
# Skip gcloud update checking
export CLOUDSDK_COMPONENT_MANAGER_DISABLE_UPDATE_CHECK=true
# AWS variables
export KUBE_AWS_INSTANCE_PREFIX="${{E2E_NAME:-jenkins-e2e}}"
# GCE variables
export INSTANCE_PREFIX="${{E2E_NAME:-jenkins-e2e}}"
export KUBE_GCE_NETWORK="${{E2E_NAME:-jenkins-e2e}}"
export KUBE_GCE_INSTANCE_PREFIX="${{E2E_NAME:-jenkins-e2e}}"
export GCE_SERVICE_ACCOUNT=$(gcloud auth list 2> /dev/null | grep active | cut -f3 -d' ')
# GKE variables
export CLUSTER_NAME="${{E2E_NAME:-jenkins-e2e}}"
export KUBE_GKE_NETWORK="${{E2E_NAME:-jenkins-e2e}}"
# Get golang into our PATH so we can run e2e.go
export PATH="${{PATH}}:/usr/local/go/bin"

View File

@ -1,64 +0,0 @@
- job:
name: 'jenkins-daily-maintenance'
description: 'Run gcloud components update and clean Docker images. Test owner: spxtr.'
logrotate:
numToKeep: 200
builders:
- shell: |
sudo chown -R jenkins:jenkins /usr/local/share/google
gcloud components update
gcloud components update alpha
gcloud components update beta
# Select only exited or dead containers that weren't created today.
# A sort of work-around for https://github.com/kubernetes/kubernetes/issues/24661
containers_to_remove=($(docker ps -a -f 'status=exited' -f 'status=dead' --format='{{.CreatedAt}}\t{{.ID}}' | grep -v ^$(date +%Y-%m-%d) | cut -f2 || true))
# Copied from http://blog.yohanliyanage.com/2015/05/docker-clean-up-after-yourself/
docker rm -v "${containers_to_remove[@]:+${containers_to_remove[@]}}" || true
docker rmi $(docker images -q -f 'dangling=true') || true
docker run -v /var/run/docker.sock:/var/run/docker.sock -v /var/lib/docker:/var/lib/docker --rm martin/docker-cleanup-volumes
- job:
name: 'jenkins-daily-maintenance-all'
description: 'Run jenkins-daily-maintenance on all nodes. Test owner: spxtr.'
logrotate:
numToKeep: 200
builders:
# Run jenkins-daily-maintenance on all nodes.
- raw:
xml: |
<hudson.plugins.parameterizedtrigger.TriggerBuilder plugin="parameterized-trigger@2.29">
<configs>
<hudson.plugins.parameterizedtrigger.BlockableBuildTriggerConfig>
<configs class="empty-list"/>
<configFactories>
<org.jvnet.jenkins.plugins.nodelabelparameter.parameterizedtrigger.AllNodesBuildParameterFactory plugin="nodelabelparameter@1.7"/>
</configFactories>
<projects>jenkins-daily-maintenance</projects>
<condition>ALWAYS</condition>
<triggerWithNoParameters>false</triggerWithNoParameters>
<block>
<buildStepFailureThreshold>
<name>FAILURE</name>
<ordinal>2</ordinal>
<color>RED</color>
<completeBuild>true</completeBuild>
</buildStepFailureThreshold>
<unstableThreshold>
<name>UNSTABLE</name>
<ordinal>1</ordinal>
<color>YELLOW</color>
<completeBuild>true</completeBuild>
</unstableThreshold>
<failureThreshold>
<name>FAILURE</name>
<ordinal>2</ordinal>
<color>RED</color>
<completeBuild>true</completeBuild>
</failureThreshold>
</block>
<buildAllNodesWithLabel>false</buildAllNodesWithLabel>
</hudson.plugins.parameterizedtrigger.BlockableBuildTriggerConfig>
</configs>
</hudson.plugins.parameterizedtrigger.TriggerBuilder>
triggers:
- timed: '@daily'

View File

@ -1,17 +0,0 @@
- job:
name: kubernetes-update-jenkins-jobs
description: 'Update Jenkins jobs based on configs in https://github.com/kubernetes/kubernetes/tree/master/hack/jenkins/job-configs. Test owner: spxtr.'
logrotate:
daysToKeep: 7
node: master
triggers:
- timed: 'H/15 * * * *'
builders:
- shell: |
curl -fsS https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/update-jobs.sh > update-jobs.sh
chmod +x update-jobs.sh
./update-jobs.sh "hack/jenkins/job-configs:hack/jenkins/job-configs/kubernetes-jenkins-pull"
publishers:
- email-ext:
recipients: spxtr@google.com

View File

@ -1,60 +0,0 @@
- job-template:
name: 'kubernetes-{build}'
description: 'Grab the latest from GitHub, build. Test owner: Build Cop.'
node: 'build'
logrotate:
numToKeep: 200
builders:
- shell: 'JENKINS_BUILD_STARTED=true bash <(curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/upload-to-gcs.sh")'
- shell: |
timeout -k {kill-timeout}m {timeout}m ./hack/jenkins/build.sh && rc=$? || rc=$?
{report-rc}
properties:
- mail-watcher
publishers:
- claim-build
- gcs-uploader
- log-parser
- email-ext:
recipients: $DEFAULT_RECIPIENTS, cloud-kubernetes-team@google.com
presend-script: $DEFAULT_PRESEND_SCRIPT
fail: true
fixed: true
send-to:
- culprits
- recipients
scm:
- git:
url: https://github.com/kubernetes/kubernetes
branches:
- '{branch}'
browser: githubweb
browser-url: https://github.com/kubernetes/kubernetes
wipe-workspace: false
skip-tag: true
triggers:
- pollscm:
cron: 'H/2 * * * *'
wrappers:
- timeout:
timeout: '{jenkins-timeout}'
fail: true
- timestamps
- project:
name: kubernetes-builds
build:
- 'build':
branch: 'master'
timeout: 50
- 'build-1.0':
branch: 'release-1.0'
timeout: 30
- 'build-1.1':
branch: 'release-1.1'
timeout: 30
- 'build-1.2':
branch: 'release-1.2'
timeout: 30
jobs:
- 'kubernetes-{build}'

File diff suppressed because it is too large Load Diff

View File

@ -1,150 +0,0 @@
- job-template:
name: 'kubernetes-kubemark-{suffix}'
description: '{description} Test owner: gmarek'
logrotate:
daysToKeep: 7
node: 'e2e'
builders:
- shell: |
{provider-env}
{job-env}
{post-env}
timeout -k {kill-timeout}m {timeout}m {runner} && rc=$? || rc=$?
{report-rc}
properties:
- mail-watcher
publishers:
- claim-build
- junit-publisher
- gcs-uploader
- log-parser
- email-ext:
recipients: "gmarek@google.com"
triggers:
- reverse:
jobs: 'kubernetes-build'
result: success
- timed: '{cron-string}'
wrappers:
- ansicolor:
colormap: xterm
- timeout:
timeout: '{jenkins-timeout}'
fail: true
- timestamps
- workspace-cleanup
- project:
name: kubernetes-kubemark
provider-env: '{gce-provider-env}'
suffix:
- '5-gce':
description: 'Run minimal Kubemark to make sure it is not broken.'
timeout: 60
cron-string: '{sq-cron-string}'
job-env: |
export E2E_NAME="kubemark-5"
export PROJECT="k8s-jenkins-kubemark"
export E2E_TEST="false"
export USE_KUBEMARK="true"
export KUBEMARK_TESTS="starting\s30\spods\sper\snode"
export KUBEMARK_TEST_ARGS="--gather-resource-usage=true"
# Override defaults to be independent from GCE defaults and set kubemark parameters
export NUM_NODES="1"
export MASTER_SIZE="n1-standard-1"
export NODE_SIZE="n1-standard-2"
export KUBE_GCE_ZONE="us-central1-f"
export KUBEMARK_MASTER_SIZE="n1-standard-1"
export KUBEMARK_NUM_NODES="5"
# The kubemark scripts build a Docker image
export JENKINS_ENABLE_DOCKER_IN_DOCKER="y"
- '100-gce':
description: 'Run small-ish kubemark cluster to continuously run performance experiments'
timeout: 240
cron-string: 'H H/6 * * *'
job-env: |
export E2E_NAME="kubemark-100"
export PROJECT="k8s-jenkins-kubemark"
export E2E_TEST="false"
export USE_KUBEMARK="true"
export KUBEMARK_TESTS="\[Feature:Performance\]"
export CREATE_SERVICES="true"
# Override defaults to be independent from GCE defaults and set kubemark parameters
export NUM_NODES="3"
export MASTER_SIZE="n1-standard-2"
export NODE_SIZE="n1-standard-4"
export KUBE_GCE_ZONE="us-central1-f"
export KUBEMARK_MASTER_SIZE="n1-standard-4"
export KUBEMARK_NUM_NODES="100"
# The kubemark scripts build a Docker image
export JENKINS_ENABLE_DOCKER_IN_DOCKER="y"
- 'high-density-100-gce':
description: 'Run Kubemark high-density (100 pods/node) test on a fake 100 node cluster.'
timeout: 160
cron-string: 'H 20 * * 6'
job-env: |
export E2E_NAME="kubemark-100pods"
export PROJECT="k8s-jenkins-kubemark"
export E2E_TEST="false"
export USE_KUBEMARK="true"
export KUBEMARK_TESTS="\[Feature:HighDensityPerformance\]"
export KUBEMARK_TEST_ARGS="--gather-resource-usage=true"
# Override defaults to be independent from GCE defaults and set kubemark parameters
export NUM_NODES="3"
export MASTER_SIZE="n1-standard-2"
export NODE_SIZE="n1-standard-4"
export KUBE_GCE_ZONE="us-east1-d"
export KUBEMARK_MASTER_SIZE="n1-standard-4"
export KUBEMARK_NUM_NODES="100"
# The kubemark scripts build a Docker image
export JENKINS_ENABLE_DOCKER_IN_DOCKER="y"
- '500-gce':
description: 'Run Kubemark test on a fake 500 node cluster to test for regressions on bigger clusters'
timeout: 300
cron-string: '@hourly'
job-env: |
# XXX Not a unique project
export E2E_NAME="kubemark-500"
export PROJECT="kubernetes-scale"
export E2E_TEST="false"
export USE_KUBEMARK="true"
export KUBEMARK_TESTS="\[Feature:Performance\]"
export KUBEMARK_TEST_ARGS="--gather-resource-usage=true"
export FAIL_ON_GCP_RESOURCE_LEAK="false"
# Override defaults to be independent from GCE defaults and set kubemark parameters
export NUM_NODES="6"
export MASTER_SIZE="n1-standard-4"
export NODE_SIZE="n1-standard-8"
export KUBE_GCE_ZONE="us-central1-f"
export KUBEMARK_MASTER_SIZE="n1-standard-16"
export KUBEMARK_NUM_NODES="500"
# The kubemark scripts build a Docker image
export JENKINS_ENABLE_DOCKER_IN_DOCKER="y"
- 'gce-scale':
description: 'Run Density test on Kubemark in very large cluster. Currently only scheduled to run every 12 hours so as not to waste too many resources.'
# 12h - load tests take really, really, really long time.
timeout: 720
cron-string: 'H H/12 * * *'
job-env: |
# XXX Not a unique project
export E2E_NAME="kubemark-1000"
export PROJECT="kubernetes-scale"
export E2E_TEST="false"
export USE_KUBEMARK="true"
export KUBEMARK_TESTS="\[Feature:Performance\]"
export KUBEMARK_TEST_ARGS="--gather-resource-usage=true"
export FAIL_ON_GCP_RESOURCE_LEAK="false"
# Override defaults to be independent from GCE defaults and set kubemark parameters
# We need 11 so that we won't hit max-pods limit (set to 100). TODO: do it in a nicer way.
export NUM_NODES="11"
export MASTER_SIZE="n1-standard-4"
# Note: can fit about 17 hollow nodes per core so NUM_NODES x
# cores_per_node should be set accordingly.
export NODE_SIZE="n1-standard-8"
export KUBEMARK_MASTER_SIZE="n1-standard-16"
export KUBEMARK_NUM_NODES="1000"
export KUBE_GCE_ZONE="us-central1-f"
# The kubemark scripts build a Docker image
export JENKINS_ENABLE_DOCKER_IN_DOCKER="y"
jobs:
- 'kubernetes-kubemark-{suffix}'

View File

@ -1,160 +0,0 @@
- job-template:
name: 'kubernetes-soak-weekly-deploy-{suffix}'
description: '{deploy-description} Test owner: {test-owner}'
logrotate:
daysToKeep: 14
node: 'master'
builders:
- shell: |
{provider-env}
{soak-deploy}
{job-env}
{post-env}
timeout -k {kill-timeout}m 90m {legacy-runner} && rc=$? || rc=$?
{report-rc}
properties:
- build-blocker:
use-build-blocker: true
blocking-jobs:
- 'kubernetes-soak-continuous-e2e-{suffix}'
publishers:
- email-ext
- gcs-uploader
triggers:
- timed: 'H 0 * * 2'
wrappers:
- ansicolor:
colormap: xterm
- timeout:
timeout: '{jenkins-timeout}'
fail: true
- timestamps
- workspace-cleanup
- job-template:
name: 'kubernetes-soak-continuous-e2e-{suffix}'
description: '{e2e-description} Test Owner: {test-owner}'
workspace: '/var/lib/jenkins/jobs/kubernetes-soak-weekly-deploy-{suffix}/workspace'
logrotate:
daysToKeep: 7
node: 'master'
builders:
- shell: |
{provider-env}
{soak-continuous}
{job-env}
{post-env}
timeout -k {kill-timeout}m 360m {legacy-runner} && rc=$? || rc=$?
{report-rc}
properties:
- build-blocker:
use-build-blocker: true
blocking-jobs:
- 'kubernetes-soak-weekly-deploy-{suffix}'
queue-scanning: ALL
publishers:
- claim-build
- email-ext
- gcs-uploader
- junit-publisher
- log-parser
triggers:
- timed: '{cron-string}'
wrappers:
- ansicolor:
colormap: xterm
- timeout:
timeout: '{jenkins-timeout}'
fail: true
- timestamps
- project:
name: soak
test-owner: 'Build Cop'
soak-deploy: |
export FAIL_ON_GCP_RESOURCE_LEAK="false"
export E2E_TEST="false"
export E2E_DOWN="false"
soak-continuous: |
export JENKINS_USE_EXISTING_BINARIES="y"
export FAIL_ON_GCP_RESOURCE_LEAK="false"
export E2E_UP="false"
export E2E_DOWN="false"
# Clear out any orphaned namespaces in case previous run was interrupted.
export E2E_CLEAN_START="true"
# TODO: Remove when we figure out #22166 and other docker potential slowness.
export DOCKER_TEST_LOG_LEVEL="--log-level=warn"
# We should be testing the reliability of a long-running cluster. The
# [Disruptive] tests kill/restart components or nodes in the cluster,
# defeating the purpose of a soak cluster. (#15722)
export GINKGO_TEST_ARGS="--ginkgo.skip=\[Disruptive\]|\[Flaky\]|\[Feature:.+\]"
suffix:
- 'gce':
deploy-description: |
Deploy Kubernetes to soak cluster using the latest successful
Kubernetes build every week.<br>
If a kubernetes-soak-continuous-e2e-gce build is running,
this deployment build will be blocked and remain in the queue
until the test run is complete.<br>
e2e-description: |
Assumes Kubernetes soak cluster is already deployed.<br>
If a kubernetes-soak-weekly-deploy-gce build is enqueued,
builds will be blocked and remain in the queue until the
deployment is complete.<br>
branch: 'master'
provider-env: '{gce-provider-env}'
job-env: |
export PROJECT="k8s-jkns-gce-soak"
- 'gce-2':
deploy-description: Clone of kubernetes-soak-weekly-deploy-gce.
e2e-description: Clone of kubernetes-soak-continuous-e2e-gce.
branch: 'master'
provider-env: '{gce-provider-env}'
job-env: |
export HAIRPIN_MODE="hairpin-veth"
export PROJECT="k8s-jkns-gce-soak-2"
- 'gce-1.2':
deploy-description: |
Deploy Kubernetes to soak cluster using the latest successful
release-1.2 Kubernetes build every week.<br>
If a kubernetes-soak-continuous-e2e-gce-1.2 build is running,
this deployment build will be blocked and remain in the queue
until the test run is complete.<br>
e2e-description: |
Assumes Kubernetes soak cluster is already deployed.<br>
If a kubernetes-soak-weekly-deploy-gce-1.2 build is enqueued,
builds will be blocked and remain in the queue until the
deployment is complete.<br>
provider-env: '{gce-provider-env}'
job-env: |
export PROJECT="k8s-jkns-gce-soak-1-2"
export JENKINS_PUBLISHED_VERSION="ci/latest-1.2"
- 'gke':
deploy-description: |
Deploy Kubernetes to a GKE soak cluster using the staging GKE
Kubernetes build every week.<br>
If a kubernetes-soak-continuous-e2e-gke build is running, this
deployment build will be blocked and remain in the queue until
the test run is complete.<br>
Current Settings:<br>
- provider: GKE<br>
- apiary: staging<br>
- borg job: staging<br>
- client (kubectl): release/stable.txt<br>
- cluster (k8s): release/stable.txt<br>
- tests: release/stable.txt<br>
e2e-description: |
Assumes Kubernetes GKE soak cluster is already deployed.<br>
If a kubernetes-soak-weekly-deploy-gke build is enqueued,
builds will be blocked and remain in the queue until the
deployment is complete.<br>
branch: 'master'
provider-env: '{gke-provider-env}'
job-env: |
export PROJECT="k8s-jkns-gke-soak"
# Need at least n1-standard-2 nodes to run kubelet_perf tests
export MACHINE_TYPE="n1-standard-2"
export E2E_OPT="--check_version_skew=false"
jobs:
- 'kubernetes-soak-weekly-deploy-{suffix}'
- 'kubernetes-soak-continuous-e2e-{suffix}'

View File

@ -1,89 +0,0 @@
- job-template:
name: 'kubernetes-test-{suffix}'
description: |
Grab the latest from GitHub, then run unit and integration tests.<br>
Test Owner: Build Cop
logrotate:
numToKeep: 200
node: unittest
builders:
- shell: 'JENKINS_BUILD_STARTED=true bash <(curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/upload-to-gcs.sh")'
- shell: |
export KUBE_FORCE_VERIFY_CHECKS='y'
export KUBE_VERIFY_GIT_BRANCH='{branch}'
timeout -k {kill-timeout}m {timeout}m ./hack/jenkins/gotest-dockerized.sh && rc=$? || rc=$?
{report-rc}
publishers:
- claim-build
- gcs-uploader
- log-parser
- email-ext
- xunit:
thresholds:
- failed:
unstable: 0
unstablenew: 0
failure:
failurenew:
- skipped:
unstable:
unstablenew:
failure:
failurenew:
types:
- junit:
pattern: '_artifacts/**.xml'
deleteoutput: false
scm:
- git:
url: https://github.com/kubernetes/kubernetes
branches:
- '{branch}'
browser: githubweb
browser-url: https://github.com/kubernetes/kubernetes
wipe-workspace: false
skip-tag: true
triggers:
- pollscm:
cron: 'H/2 * * * *'
- timed: '{cron-string}'
wrappers:
- ansicolor:
colormap: xterm
- timeout:
timeout: '{jenkins-timeout}'
fail: true
- timestamps
- raw:
xml: |
<hudson.plugins.ws__cleanup.PreBuildCleanup plugin="ws-cleanup@0.28">
<patterns>
<hudson.plugins.ws__cleanup.Pattern>
<pattern>*</pattern>
<type>INCLUDE</type>
</hudson.plugins.ws__cleanup.Pattern>
</patterns>
<deleteDirs>true</deleteDirs>
<cleanupParameter/>
<externalDelete>sudo rm -rf %s</externalDelete>
</hudson.plugins.ws__cleanup.PreBuildCleanup>
- project:
name: kubernetes-test-go
suffix:
- 'go':
branch: 'master'
cron-string: '{sq-cron-string}'
timeout: 100
- 'go-release-1.2':
branch: 'release-1.2'
# Every 3 hours
cron-string: 'H H/3 * * *'
timeout: 100
- 'go-release-1.1':
branch: 'release-1.1'
# Every 6 hours
cron-string: 'H H/12 * * *'
timeout: 60
jobs:
- 'kubernetes-test-{suffix}'

View File

@ -1,21 +0,0 @@
- job:
name: kubernetes-test-summary
description: 'Create a daily test summary and upload to GCS. Test owner: spxtr.'
triggers:
# Run every night at midnight at a random minute.
- timed: 'H 0 * * *'
scm:
- git:
url: https://www.github.com/kubernetes/kubernetes
branches:
- master
browser: githubweb
browser-url: https://github.com/kubernetes/kubernetes
skip-tag: true
builders:
- shell: |
cd hack/jenkins/test-history
./gen_history http://jenkins-master:8080/
publishers:
- email-ext:
recipients: spxtr@google.com

View File

@ -1,16 +0,0 @@
- job:
name: kubernetes-update-jenkins-jobs
description: 'Update Jenkins jobs based on configs in https://github.com/kubernetes/kubernetes/tree/master/hack/jenkins/job-configs. Test owner: spxtr.'
logrotate:
daysToKeep: 7
triggers:
- timed: 'H/15 * * * *'
builders:
- shell: |
curl -fsS https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/update-jobs.sh > update-jobs.sh
chmod +x update-jobs.sh
./update-jobs.sh "hack/jenkins/job-configs:hack/jenkins/job-configs/kubernetes-jenkins"
publishers:
- email-ext:
recipients: spxtr@google.com

View File

@ -1,283 +0,0 @@
# These configs rely on the fact that previous branches will overwrite any
# environment variables that they need. Currently, 1.0 and 1.1 jobs use the
# old e2e.sh script on their branches. To turn up new upgrade jobs from these
# releases, you will need to fiddle with e2e.sh on their branches. For all
# branches after those, you will simply need to add a new entry in one of the
# projects below.
- job-template:
name: 'kubernetes-upgrade-{provider}-{version-old}-{version-new}'
disabled: false
description: 'Upgrade multijob from {version-old} to {version-new}. Test owner: ihmccreery.'
project-type: multijob
triggers:
- timed: '@hourly'
node: 'master'
builders:
# TODO(ihmccreery) In theory, we could get ourselves into trouble by
# editing these things in the middle of a run. Jenkins Job Builder
# would delete jobs, and they'd leave resources lying around. We
# should either (1) make this not a multi-job, or (2) add a script here
# to update these jobs only at the beginning of a multijob run.
#
# This pain would also pretty much disappear with #18119, too.
- multijob:
name: Deploy
condition: SUCCESSFUL
projects:
- name: 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-step1-deploy'
- multijob:
name: Kubectl Test New
condition: COMPLETED
projects:
- name: 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-step2-kubectl-e2e-new'
- multijob:
name: Upgrade Master
condition: COMPLETED
projects:
- name: 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-step3-upgrade-master'
- multijob:
name: Test Old
condition: COMPLETED
projects:
- name: 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-step4-e2e-old'
- multijob:
name: Upgrade Cluster
condition: COMPLETED
projects:
- name: 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-step5-upgrade-cluster'
- multijob:
name: Test Old
condition: COMPLETED
projects:
- name: 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-step6-e2e-old'
- multijob:
name: Test New
condition: COMPLETED
projects:
- name: 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-step7-e2e-new'
- job-template:
name: 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-step1-deploy'
description: 'Deploy a cluster at {version-old} to be tested and upgraded to {version-new}. Test owner: ihmccreery.'
logrotate:
daysToKeep: 7
node: 'master'
builders:
- shell: |
# per-provider variables
{provider-env}
# per-upgrade-flow variables, such as project name
{project-env}
# per-step variables, such as whether to run tests
{job-env}
{post-env}
timeout -k {kill-timeout}m 60m {runner} && rc=$? || rc=$?
{report-rc}
properties:
- mail-watcher
publishers:
- claim-build
# No junit-publisher, since we're not running any tests
- gcs-uploader
- log-parser
- email-ext:
recipients: 'ihmccreery@google.com'
wrappers:
- ansicolor:
colormap: xterm
- timeout:
timeout: '{jenkins-timeout}'
fail: true
- timestamps
- workspace-cleanup
- job-template:
name: 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-{step}'
description: '{description} Test owner: ihmccreery.'
# Use the same workspace as step1
workspace: /var/lib/jenkins/jobs/kubernetes-upgrade-{provider}-{version-old}-{version-new}-step1-deploy/workspace/
logrotate:
daysToKeep: 7
node: 'master'
builders:
- shell: |
# per-provider variables
{provider-env}
# per-upgrade-flow variables, such as project name
{project-env}
# per-step variables, such as whether to run tests
{job-env}
{post-env}
timeout -k {kill-timeout}m 300m {runner} && rc=$? || rc=$?
{report-rc}
properties:
- mail-watcher
publishers:
- claim-build
- junit-publisher
- gcs-uploader
- log-parser
- email-ext:
recipients: 'ihmccreery@google.com'
wrappers:
- ansicolor:
colormap: xterm
- timeout:
timeout: '{jenkins-timeout}'
fail: true
- timestamps
# Don't clean the workspace; we want to keep configs intact across steps in the multijob
- job-group:
name: '{provider}-{version-old}-{version-new}-upgrades'
jobs:
- 'kubernetes-upgrade-{provider}-{version-old}-{version-new}'
- 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-step1-deploy':
runner: '{runner-old}'
job-env: |
export E2E_TEST="false"
export E2E_DOWN="false"
export JENKINS_PUBLISHED_VERSION="ci/latest-{version-old}"
- 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-{step}':
step: 'step2-kubectl-e2e-new'
runner: '{runner-new}'
description: 'Run {version-new} kubectl tests against the cluster running {version-old}.'
job-env: |
export E2E_OPT="--check_version_skew=false"
export E2E_UP="false"
export E2E_DOWN="false"
export JENKINS_PUBLISHED_VERSION="ci/latest-{version-new}"
export GINKGO_TEST_ARGS="--ginkgo.focus=Kubectl"
- 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-{step}':
step: 'step3-upgrade-master'
runner: '{runner-new}'
description: 'Upgrade the master from {version-old} to {version-new}.'
job-env: |
export E2E_OPT="--check_version_skew=false"
export E2E_UP="false"
export E2E_DOWN="false"
export JENKINS_PUBLISHED_VERSION="ci/latest-{version-new}"
export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:MasterUpgrade\] --upgrade-target=ci/latest-{version-new}"
- 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-{step}':
step: 'step4-e2e-old'
runner: '{runner-old}'
description: 'Run {version-old} e2e tests against the cluster with master at {version-new} and nodes still at {version-old}.'
job-env: |
export E2E_OPT="--check_version_skew=false"
export E2E_UP="false"
export E2E_DOWN="false"
export JENKINS_PUBLISHED_VERSION="ci/latest-{version-old}"
- 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-{step}':
step: 'step5-upgrade-cluster'
runner: '{runner-new}'
description: 'Upgrade the nodes from {version-old} to {version-new}.'
job-env: |
export E2E_OPT="--check_version_skew=false"
export E2E_UP="false"
export E2E_DOWN="false"
export JENKINS_PUBLISHED_VERSION="ci/latest-{version-new}"
export GINKGO_TEST_ARGS="--ginkgo.focus=\[Feature:NodeUpgrade\] --upgrade-target=ci/latest-{version-new}"
- 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-{step}':
step: 'step6-e2e-old'
runner: '{runner-old}'
description: 'Run {version-old} e2e tests against the cluster with master and nodes at {version-new}.'
job-env: |
export E2E_OPT="--check_version_skew=false"
export E2E_UP="false"
export E2E_DOWN="false"
export JENKINS_PUBLISHED_VERSION="ci/latest-{version-old}"
- 'kubernetes-upgrade-{provider}-{version-old}-{version-new}-{step}':
step: 'step7-e2e-new'
runner: '{runner-new}'
description: 'Run {version-new} e2e tests against the cluster with master and nodes at {version-new}.'
job-env: |
# TODO(15011): these really shouldn't be (very) version skewed, but
# because we have to get ci/latest again, it could get slightly out of
# whack.
export E2E_OPT="--check_version_skew=false"
export E2E_UP="false"
export JENKINS_PUBLISHED_VERSION="ci/latest-{version-new}"
- project:
name: 'upgrade-gke'
provider: 'gke'
provider-env: |
{gke-provider-env}
export JENKINS_TOLERATE_DIRTY_WORKSPACE="y"
export FAIL_ON_GCP_RESOURCE_LEAK="false"
jobs:
- '{provider}-{version-old}-{version-new}-upgrades':
version-old: '1.0'
version-new: '1.2'
runner-old: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.0/hack/jenkins/e2e.sh" | bash -
runner-new: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh" | bash -
project-env: |
export E2E_NAME="upgrade-gke-1-0-1-2"
export PROJECT="kubernetes-jenkins-gke-upgrade"
- '{provider}-{version-old}-{version-new}-upgrades':
version-old: '1.1'
version-new: '1.2'
runner-old: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.1/hack/jenkins/e2e.sh" | bash -
runner-new: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh" | bash -
project-env: |
export E2E_NAME="upgrade-gke-1-1-1-2"
export PROJECT="kubernetes-jenkins-gke-upgrade"
- '{provider}-{version-old}-{version-new}-upgrades':
version-old: '1.1'
version-new: '1.3'
runner-old: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.1/hack/jenkins/e2e.sh" | bash -
runner-new: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh" | bash -
project-env: |
export E2E_NAME="upgrade-gke-1-1-1-3"
export PROJECT="kubernetes-jenkins-gke-upgrade"
- '{provider}-{version-old}-{version-new}-upgrades':
version-old: '1.2'
version-new: '1.3'
runner-old: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh" | bash -
runner-new: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh" | bash -
project-env: |
export E2E_NAME="upgrade-gke-1-2-1-3"
export PROJECT="kubernetes-jenkins-gke-upgrade"
- project:
name: 'upgrade-gce'
provider: 'gce'
provider-env: |
{gce-provider-env}
export NUM_NODES=5
export JENKINS_TOLERATE_DIRTY_WORKSPACE="y"
export FAIL_ON_GCP_RESOURCE_LEAK="false"
jobs:
- '{provider}-{version-old}-{version-new}-upgrades':
version-old: '1.0'
version-new: '1.2'
runner-old: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.0/hack/jenkins/e2e.sh" | bash -
runner-new: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh" | bash -
project-env: |
export E2E_NAME="upgrade-gce-1-0-1-2"
export PROJECT="kubernetes-jenkins-gce-upgrade"
- '{provider}-{version-old}-{version-new}-upgrades':
version-old: '1.1'
version-new: '1.2'
runner-old: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.1/hack/jenkins/e2e.sh" | bash -
runner-new: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh" | bash -
project-env: |
export E2E_NAME="upgrade-gce-1-1-1-2"
export PROJECT="kubernetes-jenkins-gce-upgrade"
- '{provider}-{version-old}-{version-new}-upgrades':
version-old: '1.1'
version-new: '1.3'
runner-old: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/release-1.1/hack/jenkins/e2e.sh" | bash -
runner-new: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh" | bash -
project-env: |
export E2E_NAME="upgrade-gce-1-1-1-3"
export PROJECT="kubernetes-jenkins-gce-upgrade"
- '{provider}-{version-old}-{version-new}-upgrades':
version-old: '1.2'
version-new: '1.3'
runner-old: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh" | bash -
runner-new: curl -fsS --retry 3 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/e2e-runner.sh" | bash -
project-env: |
export E2E_NAME="upgrade-gce-1-2-1-3"
export PROJECT="kubernetes-jenkins-gce-upgrade"

View File

@ -1,160 +0,0 @@
# Job template to checkout source from github to $WORKSPACE/go/src/{gitbasedir} and build + publish a docker image.
# - Tags the docker image with 'canary'.
# - Job is executed daily and when changes are found polling github (every 5m).
# - Console output is published to google cloud storage.
#
# gitproject: name of the project - for display purposes only
# owner: owner to be notified for job failures. test results are published to owner email
# repoName: github repo to checkout e.g. kubernetes/kubernetes or google/cadvisor. Must match the docker image repo name where the image will be published.
# gitbasedir: directory under $WORKSPACE/go/src to checkout source repo to - e.g. k8s.io/kubernetes or github.com/google/cadvisor
# dockerfilePath: workspace relative path to the 'Dockerfile' to build
- job-template:
name: '{gitproject}-dockercanarybuild-ci'
disabled: '{obj:disable_job}'
description: 'Build and push {gitproject} docker image.<br>Test Owner: {owner}.'
node: 'node'
logrotate:
numToKeep: 200
builders:
- docker-build-publish:
repoName: '{repoName}'
dockerfilePath: 'go/src/{gitbasedir}/{dockerfilePath}'
tag: 'canary'
credentialsId: '1f361efb-5b85-4f61-91a7-4ec7fb2a5c23'
publishers:
- claim-build
- gcs-uploader
- log-parser
- email-ext:
recipients: '{owner}'
scm:
- git:
url: 'https://github.com/{repoName}'
browser: githubweb
browser-url: 'https://github.com/{repoName}'
branches:
- 'master'
skip-tag: true
basedir: 'go/src/{gitbasedir}'
triggers:
- pollscm:
cron: 'H/5 * * * *'
- timed: '@daily'
wrappers:
- ansicolor:
colormap: xterm
- timeout:
timeout: 30
fail: true
- timestamps
# Job template to checkout source from github to $WORKSPACE/go/src/{gitbasedir}, setup go environment variables, and execute a
# shell script.
# - Job is executed daily and when changes are found polling github (every 5m).
# - Console output is published to google cloud storage.
#
# gitproject: name of the project - for display purposes only
# owner: owner to be notified for job failures. test results are published to owner email
# repoName: github repo to checkout e.g. kubernetes/kubernetes or google/cadvisor
# gitbasedir: directory under $WORKSPACE/go/src to checkout source repo to - e.g. k8s.io/kubernetes or github.com/google/cadvisor
# shell: bash command to execute from gitbasedir. should be a single script such as {gitproject}-jenkins.sh
- job-template:
name: '{gitproject}-gce-e2e-ci'
description: '{gitproject} continuous e2e tests.<br>Test Owner: {owner}.'
disabled: '{obj:disable_job}'
node: 'node'
logrotate:
numToKeep: 200
builders:
- shell: |
#!/bin/bash
set -e
set -x
cd go/src/{gitbasedir}
{shell}
publishers:
- claim-build
- gcs-uploader
- log-parser
- email-ext:
recipients: '{owner}'
scm:
- git:
url: 'https://github.com/{repoName}'
browser: githubweb
browser-url: 'https://github.com/{repoName}'
branches:
- 'master'
skip-tag: true
basedir: 'go/src/{gitbasedir}'
triggers:
- pollscm:
cron: 'H/5 * * * *'
- timed: 'H/30 * * * *'
wrappers:
- ansicolor:
colormap: xterm
- timeout:
timeout: 90
fail: true
- timestamps
- inject:
properties-content: |
GOROOT=/usr/local/go
GOPATH=$WORKSPACE/go
PATH=$PATH:$GOROOT/bin:$WORKSPACE/go/bin
- workspace-cleanup
- project:
name: node-docker-canary-build
repoName: 'google/cadvisor'
gitproject:
- 'cadvisor':
repoName: 'google/cadvisor'
gitbasedir: 'github.com/google/cadvisor'
dockerfilePath: 'deploy/canary/Dockerfile'
owner: 'stclair@google.com'
- 'heapster':
repoName: 'kubernetes/heapster'
gitbasedir: 'k8s.io/heapster'
dockerfilePath: 'deploy/docker/canary/Dockerfile'
owner: 'pszczesniak@google.com'
jobs:
- '{gitproject}-dockercanarybuild-ci'
wrappers:
- workspace-cleanup
- project:
name: node-gce-e2e
repoName: 'google/cadvisor'
gitproject:
- 'cadvisor':
repoName: 'google/cadvisor'
gitbasedir: 'github.com/google/cadvisor'
owner: 'stclair@google.com'
shell: |
go get -u github.com/tools/godep
./build/presubmit.sh
godep go build -tags test github.com/google/cadvisor/integration/runner
./runner --logtostderr --test-retry-count=8 --test-retry-whitelist=integration/runner/retrywhitelist.txt \
--ssh-options "-i /home/jenkins/.ssh/google_compute_engine -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=yes -o CheckHostIP=no -o StrictHostKeyChecking=no" \
e2e-cadvisor-ubuntu-trusty \
e2e-cadvisor-coreos-beta \
e2e-cadvisor-container-vm-v20151215 \
e2e-cadvisor-container-vm-v20160127 \
e2e-cadvisor-rhel-7
- 'heapster':
repoName: 'kubernetes/heapster'
gitbasedir: 'k8s.io/heapster'
owner: 'pszczesniak@google.com'
shell: 'make test-unit test-integration'
disable_job: true # Issue #23538
- 'kubelet':
cron-string: '{sq-cron-string}'
repoName: 'kubernetes/kubernetes'
gitbasedir: 'k8s.io/kubernetes'
owner: 'pwittroc@google.com'
shell: 'test/e2e_node/jenkins/e2e-node-jenkins.sh test/e2e_node/jenkins/jenkins-ci.properties'
jobs:
- '{gitproject}-gce-e2e-ci'

View File

@ -1,36 +0,0 @@
- job:
name: 'kubernetes-test-linkchecker'
description: |
Grab the latest from GitHub, then run hack/verify-linkcheck.sh.<br>
Test Owner: Build Cop
disable_job: true # Issue #23162
logrotate:
numToKeep: 200
builders:
- shell: |
export PATH=${PATH}:/usr/local/go/bin
./hack/verify-linkcheck.sh
publishers:
- claim-build
- gcs-uploader
- email-ext:
recipients: 'xuchao@google.com'
scm:
- git:
url: https://github.com/kubernetes/kubernetes
branches:
- 'master'
browser: githubweb
browser-url: https://github.com/kubernetes/kubernetes
wipe-workspace: false
skip-tag: true
triggers:
- timed: '@daily'
wrappers:
- ansicolor:
colormap: xterm
- timeout:
timeout: 60
fail: true
- timestamps
- workspace-cleanup

View File

@ -1,46 +0,0 @@
#!/bin/bash
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Update all Jenkins jobs in a folder specified in $1. It can be the union of
# multiple folders separated with a colon, like with the PATH variable.
if [[ $# -eq 1 ]]; then
config_dir=$1
else
echo "Usage: $0 <dir>" >&2
exit 1
fi
# Run the container if it isn't present.
if ! docker inspect job-builder &> /dev/null; then
# jenkins_jobs.ini contains administrative credentials for Jenkins.
# Store it in the workspace of the Jenkins job that calls this script.
if [[ -e jenkins_jobs.ini ]]; then
docker run -idt \
--net host \
--name job-builder \
--restart always \
gcr.io/google_containers/kubekins-job-builder:1
docker cp jenkins_jobs.ini job-builder:/etc/jenkins_jobs
else
echo "jenkins_jobs.ini not found in workspace" >&2
exit 1
fi
fi
docker exec job-builder git checkout master
docker exec job-builder git pull
docker exec job-builder jenkins-jobs update ${config_dir}

View File

@ -1,42 +0,0 @@
#!/bin/bash
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Tests the Jenkins job configs and computes a diff of any changes when there
# have been local changes of the configs.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/..
source "${KUBE_ROOT}/hack/lib/init.sh"
readonly branch=${1:-${KUBE_VERIFY_GIT_BRANCH:-master}}
if ! [[ ${KUBE_FORCE_VERIFY_CHECKS:-} =~ ^[yY]$ ]] && \
! kube::util::has_changes_against_upstream_branch "${branch}" 'hack/jenkins/job-configs/'; then
exit 0
fi
# By using ARTIFACTS_DIR, we can write the diff out to the artifacts directory
# (and then up to GCS) when running on Jenkins.
export OUTPUT_DIR="${ARTIFACTS_DIR:+${ARTIFACTS_DIR}/jjb}"
# When running inside Docker (e.g. on Jenkins) we'll need to reference the
# host's artifacts directory for the Docker-in-Docker volume mount to work.
export DOCKER_VOLUME_OUTPUT_DIR="${HOST_ARTIFACTS_DIR:+${HOST_ARTIFACTS_DIR}/jjb}"
# This script should pass, assuming the configs are not broken. Diffs won't
# cause failures.
"${KUBE_ROOT}/hack/jenkins/diff-job-config-patch.sh"