Test storage upgrade to protobufs

This commit is contained in:
Wojciech Tyczynski 2016-05-17 14:44:30 +02:00
parent 1738bbfe5f
commit 03268e44c0
2 changed files with 15 additions and 10 deletions

View File

@ -32,6 +32,9 @@ KUBE_NEW_API_VERSION=${KUBE_NEW_API_VERSION:-"v1"}
KUBE_OLD_STORAGE_VERSIONS=${KUBE_OLD_STORAGE_VERSIONs:-""}
KUBE_NEW_STORAGE_VERSIONS=${KUBE_NEW_STORAGE_VERSIONs:-""}
KUBE_STORAGE_MEDIA_TYPE_JSON="application/json"
KUBE_STORAGE_MEDIA_TYPE_PROTOBUF="application/vnd.kubernetes.protobuf"
ETCD_HOST=${ETCD_HOST:-127.0.0.1}
ETCD_PORT=${ETCD_PORT:-4001}
API_PORT=${API_PORT:-8080}
@ -44,7 +47,9 @@ UPDATE_ETCD_OBJECTS_SCRIPT="${KUBE_ROOT}/cluster/update-storage-objects.sh"
function startApiServer() {
local storage_versions=${1:-""}
local storage_media_type=${2:-""}
kube::log::status "Starting kube-apiserver with KUBE_API_VERSIONS: ${KUBE_API_VERSIONS}"
kube::log::status " and storage-media-type: ${storage_media_type}"
kube::log::status " and runtime-config: ${RUNTIME_CONFIG}"
kube::log::status " and storage-version overrides: ${storage_versions}"
@ -57,7 +62,8 @@ function startApiServer() {
--runtime-config="${RUNTIME_CONFIG}" \
--cert-dir="${TMPDIR:-/tmp/}" \
--service-cluster-ip-range="10.0.0.0/24" \
--storage-versions="${storage_versions}" 1>&2 &
--storage-versions="${storage_versions}" \
--storage-media-type=${storage_media_type} 1>&2 &
APISERVER_PID=$!
# url, prefix, wait, times
@ -109,10 +115,11 @@ KUBE_NEW_STORAGE_VERSIONS="batch/v1,autoscaling/v1"
#######################################################
# Step 1: Start a server which supports both the old and new api versions,
# but KUBE_OLD_API_VERSION is the latest (storage) version.
# Additionally use KUBE_STORAGE_MEDIA_TYPE_JSON for storage encoding.
#######################################################
KUBE_API_VERSIONS="${KUBE_OLD_API_VERSION},${KUBE_NEW_API_VERSION}"
RUNTIME_CONFIG="api/all=false,api/${KUBE_OLD_API_VERSION}=true,api/${KUBE_NEW_API_VERSION}=true"
startApiServer ${KUBE_OLD_STORAGE_VERSIONS}
startApiServer ${KUBE_OLD_STORAGE_VERSIONS} ${KUBE_STORAGE_MEDIA_TYPE_JSON}
# Create object(s)
@ -139,11 +146,12 @@ killApiServer
#######################################################
# Step 2: Start a server which supports both the old and new api versions,
# but KUBE_NEW_API_VERSION is the latest (storage) version.
# Still use KUBE_STORAGE_MEDIA_TYPE_JSON for storage encoding.
#######################################################
KUBE_API_VERSIONS="${KUBE_NEW_API_VERSION},${KUBE_OLD_API_VERSION}"
RUNTIME_CONFIG="api/all=false,api/${KUBE_OLD_API_VERSION}=true,api/${KUBE_NEW_API_VERSION}=true"
startApiServer ${KUBE_NEW_STORAGE_VERSIONS}
startApiServer ${KUBE_NEW_STORAGE_VERSIONS} ${KUBE_STORAGE_MEDIA_TYPE_JSON}
# Update etcd objects, so that will now be stored in the new api version.
kube::log::status "Updating storage versions in etcd"
@ -166,6 +174,7 @@ killApiServer
#######################################################
# Step 3 : Start a server which supports only the new api version.
# However, change storage encoding to KUBE_STORAGE_MEDIA_TYPE_PROTOBUF.
#######################################################
KUBE_API_VERSIONS="${KUBE_NEW_API_VERSION}"
@ -173,7 +182,7 @@ RUNTIME_CONFIG="api/all=false,api/${KUBE_NEW_API_VERSION}=true"
# This seems to reduce flakiness.
sleep 1
startApiServer ${KUBE_NEW_STORAGE_VERSIONS}
startApiServer ${KUBE_NEW_STORAGE_VERSIONS} ${KUBE_STORAGE_MEDIA_TYPE_PROTOBUF}
for test in ${tests[@]}; do
IFS=',' read -ra test_data <<<"$test"

View File

@ -64,13 +64,9 @@ docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js: va
docs/getting-started-guides/coreos/azure/lib/deployment_logic/kubernetes.js:var cloud_config = require('../cloud_config.js');
examples/cluster-dns/images/frontend/client.py: service_address = socket.gethostbyname(hostname)
examples/vitess/env.sh: node_ip=$(get_node_ip)
hack/jenkins/job-builder-image/Dockerfile:# JJB configuration lives in /etc/jenkins_jobs/jenkins_jobs.ini
hack/jenkins/update-jobs.sh: docker cp jenkins_jobs.ini job-builder:/etc/jenkins_jobs
hack/jenkins/update-jobs.sh: echo "jenkins_jobs.ini not found in workspace" >&2
hack/jenkins/update-jobs.sh: # jenkins_jobs.ini contains administrative credentials for Jenkins.
hack/jenkins/update-jobs.sh: if [[ -e jenkins_jobs.ini ]]; then
hack/local-up-cluster.sh: runtime_config="--runtime-config=${RUNTIME_CONFIG}"
hack/local-up-cluster.sh: runtime_config=""
hack/test-update-storage-objects.sh: local storage_media_type=${2:-""}
hack/test-update-storage-objects.sh: local storage_versions=${1:-""}
hack/test-update-storage-objects.sh: source_file=${test_data[0]}
hack/test-update-storage-objects.sh:# source_file,resource,namespace,name,old_version,new_version
@ -88,8 +84,8 @@ test/e2e/es_cluster_logging.go: framework.Failf("No cluster_name field in Elast
test/e2e/es_cluster_logging.go: // Check to see if have a cluster_name field.
test/e2e/es_cluster_logging.go: clusterName, ok := esResponse["cluster_name"]
test/e2e/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePath),
test/e2e/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
test/e2e/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
test/images/mount-tester/mt.go: flag.BoolVar(&breakOnExpectedContent, "break_on_expected_content", true, "Break out of loop on expected content, (use with --file_content_in_loop flag only)")
test/images/mount-tester/mt.go: flag.IntVar(&retryDuration, "retry_time", 180, "Retry time during the loop")
test/images/mount-tester/mt.go: flag.StringVar(&readFileContentInLoopPath, "file_content_in_loop", "", "Path to read the file content in loop from")
test/e2e/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),