mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 01:40:07 +00:00
Merge pull request #1453 from jbeda/hostIP-fix
Grab images as part of update e2e test
This commit is contained in:
commit
3e6859564a
@ -131,26 +131,23 @@ function kube-up {
|
|||||||
# Detect the project into $PROJECT if it isn't set
|
# Detect the project into $PROJECT if it isn't set
|
||||||
detect-project
|
detect-project
|
||||||
|
|
||||||
|
# This will take us up to the git repo root
|
||||||
|
local base_dir=$(dirname "${BASH_SOURCE}")/../..
|
||||||
|
|
||||||
# Build up start up script for master
|
# Build up start up script for master
|
||||||
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
|
KUBE_TEMP=$(mktemp -d -t kubernetes.XXXXXX)
|
||||||
trap "rm -rf ${KUBE_TEMP}" EXIT
|
trap 'rm -rf "${KUBE_TEMP}"' EXIT
|
||||||
|
|
||||||
get-password
|
get-password
|
||||||
echo "Using password: $user:$passwd"
|
echo "Using password: $user:$passwd"
|
||||||
python $(dirname $0)/../third_party/htpasswd/htpasswd.py -b -c ${KUBE_TEMP}/htpasswd $user $passwd
|
python "${base_dir}/third_party/htpasswd/htpasswd.py" -b \
|
||||||
HTPASSWD=$(cat ${KUBE_TEMP}/htpasswd)
|
-c "${KUBE_TEMP}/htpasswd" $user $passwd
|
||||||
|
HTPASSWD=$(cat "${KUBE_TEMP}/htpasswd")
|
||||||
(
|
|
||||||
echo "#! /bin/bash"
|
|
||||||
echo "MASTER_NAME=${MASTER_NAME}"
|
|
||||||
echo "MASTER_RELEASE_TAR=${RELEASE_NORMALIZED}/master-release.tgz"
|
|
||||||
echo "MASTER_HTPASSWD='${HTPASSWD}'"
|
|
||||||
grep -v "^#" $(dirname $0)/templates/download-release.sh
|
|
||||||
grep -v "^#" $(dirname $0)/templates/salt-master.sh
|
|
||||||
) > ${KUBE_TEMP}/master-start.sh
|
|
||||||
|
|
||||||
if ! gcutil getnetwork "${NETWORK}"; then
|
if ! gcutil getnetwork "${NETWORK}"; then
|
||||||
echo "Creating new network for: ${NETWORK}"
|
echo "Creating new network for: ${NETWORK}"
|
||||||
|
# The network needs to be created synchronously or we have a race. The
|
||||||
|
# firewalls can be added concurrent with instance creation.
|
||||||
gcutil addnetwork "${NETWORK}" --range "10.240.0.0/16"
|
gcutil addnetwork "${NETWORK}" --range "10.240.0.0/16"
|
||||||
gcutil addfirewall "${NETWORK}-default-internal" \
|
gcutil addfirewall "${NETWORK}-default-internal" \
|
||||||
--norespect_terminal_width \
|
--norespect_terminal_width \
|
||||||
@ -174,6 +171,15 @@ function kube-up {
|
|||||||
--target_tags ${MASTER_TAG} \
|
--target_tags ${MASTER_TAG} \
|
||||||
--allowed tcp:443 &
|
--allowed tcp:443 &
|
||||||
|
|
||||||
|
(
|
||||||
|
echo "#! /bin/bash"
|
||||||
|
echo "MASTER_NAME='${MASTER_NAME}'"
|
||||||
|
echo "MASTER_RELEASE_TAR=${RELEASE_NORMALIZED}/master-release.tgz"
|
||||||
|
echo "MASTER_HTPASSWD='${HTPASSWD}'"
|
||||||
|
grep -v "^#" "${base_dir}/cluster/templates/download-release.sh"
|
||||||
|
grep -v "^#" "${base_dir}/cluster/templates/salt-master.sh"
|
||||||
|
) > "${KUBE_TEMP}/master-start.sh"
|
||||||
|
|
||||||
gcutil addinstance ${MASTER_NAME}\
|
gcutil addinstance ${MASTER_NAME}\
|
||||||
--norespect_terminal_width \
|
--norespect_terminal_width \
|
||||||
--project ${PROJECT} \
|
--project ${PROJECT} \
|
||||||
@ -184,14 +190,14 @@ function kube-up {
|
|||||||
--network ${NETWORK} \
|
--network ${NETWORK} \
|
||||||
--service_account_scopes="storage-ro,compute-rw" \
|
--service_account_scopes="storage-ro,compute-rw" \
|
||||||
--automatic_restart \
|
--automatic_restart \
|
||||||
--metadata_from_file startup-script:${KUBE_TEMP}/master-start.sh &
|
--metadata_from_file "startup-script:${KUBE_TEMP}/master-start.sh" &
|
||||||
|
|
||||||
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
for (( i=0; i<${#MINION_NAMES[@]}; i++)); do
|
||||||
(
|
(
|
||||||
echo "#! /bin/bash"
|
echo "#! /bin/bash"
|
||||||
echo "MASTER_NAME=${MASTER_NAME}"
|
echo "MASTER_NAME='${MASTER_NAME}'"
|
||||||
echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}"
|
echo "MINION_IP_RANGE=${MINION_IP_RANGES[$i]}"
|
||||||
grep -v "^#" $(dirname $0)/templates/salt-minion.sh
|
grep -v "^#" "${base_dir}/cluster/templates/salt-minion.sh"
|
||||||
) > ${KUBE_TEMP}/minion-start-${i}.sh
|
) > ${KUBE_TEMP}/minion-start-${i}.sh
|
||||||
|
|
||||||
gcutil addfirewall ${MINION_NAMES[$i]}-all \
|
gcutil addfirewall ${MINION_NAMES[$i]}-all \
|
||||||
@ -212,7 +218,7 @@ function kube-up {
|
|||||||
--service_account_scopes=${MINION_SCOPES} \
|
--service_account_scopes=${MINION_SCOPES} \
|
||||||
--automatic_restart \
|
--automatic_restart \
|
||||||
--can_ip_forward \
|
--can_ip_forward \
|
||||||
--metadata_from_file startup-script:${KUBE_TEMP}/minion-start-${i}.sh &
|
--metadata_from_file "startup-script:${KUBE_TEMP}/minion-start-${i}.sh" &
|
||||||
|
|
||||||
gcutil addroute ${MINION_NAMES[$i]} ${MINION_IP_RANGES[$i]} \
|
gcutil addroute ${MINION_NAMES[$i]} ${MINION_IP_RANGES[$i]} \
|
||||||
--norespect_terminal_width \
|
--norespect_terminal_width \
|
||||||
|
@ -19,7 +19,6 @@
|
|||||||
set -o errexit
|
set -o errexit
|
||||||
set -o nounset
|
set -o nounset
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
set -x
|
|
||||||
|
|
||||||
source "${KUBE_REPO_ROOT}/cluster/kube-env.sh"
|
source "${KUBE_REPO_ROOT}/cluster/kube-env.sh"
|
||||||
source "${KUBE_REPO_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
|
source "${KUBE_REPO_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
|
||||||
@ -28,42 +27,53 @@ source "${KUBE_REPO_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
|
|||||||
CONTROLLER_NAME=update-demo
|
CONTROLLER_NAME=update-demo
|
||||||
|
|
||||||
function validate() {
|
function validate() {
|
||||||
NUM_REPLICAS=$1
|
local num_replicas=$1
|
||||||
CONTAINER_IMAGE_VERSION=$2
|
local container_image_version=$2
|
||||||
POD_ID_LIST=$($KUBECFG '-template={{range.Items}}{{.ID}} {{end}}' -l simpleService=${CONTROLLER_NAME} list pods)
|
|
||||||
POD_ARR=($POD_ID_LIST)
|
|
||||||
while [ ${#POD_ARR[@]} -ne $NUM_REPLICAS ]; do
|
|
||||||
echo "Waiting for the right number of containers"
|
|
||||||
sleep 5
|
|
||||||
POD_ID_LIST=$($KUBECFG '-template={{range.Items}}{{.ID}} {{end}}' -l simpleService=${CONTROLLER_NAME} list pods)
|
|
||||||
POD_ARR=($POD_ID_LIST)
|
|
||||||
done
|
|
||||||
|
|
||||||
# Container turn up on a clean cluster can take a while for the docker image pull.
|
# Container turn up on a clean cluster can take a while for the docker image pull.
|
||||||
ALL_RUNNING=0
|
local num_running=0
|
||||||
while [ $ALL_RUNNING -ne 1 ]; do
|
while [[ $num_running -ne $num_replicas ]]; do
|
||||||
echo "Waiting for all containers in pod to come up."
|
echo "Waiting for all containers in pod to come up. Currently: ${num_running}/${num_replicas}"
|
||||||
sleep 5
|
sleep 2
|
||||||
ALL_RUNNING=1
|
|
||||||
for id in $POD_ID_LIST; do
|
local pod_id_list
|
||||||
TEMPLATE_STRING="{{and ((index .CurrentState.Info \"${CONTROLLER_NAME}\").State.Running) .CurrentState.Info.net.State.Running}}"
|
pod_id_list=($($KUBECFG -template='{{range.Items}}{{.ID}} {{end}}' -l simpleService="${CONTROLLER_NAME}" list pods))
|
||||||
CURRENT_STATUS=$($KUBECFG -template "${TEMPLATE_STRING}" get pods/$id)
|
|
||||||
if [ "$CURRENT_STATUS" != "{}" ]; then
|
echo " ${#pod_id_list[@]} out of ${num_replicas} created"
|
||||||
ALL_RUNNING=0
|
|
||||||
else
|
local id
|
||||||
CURRENT_IMAGE=$($KUBECFG -template "{{(index .CurrentState.Info \"${CONTROLLER_NAME}\").DetailInfo.Config.Image}}" get pods/$id)
|
num_running=0
|
||||||
if [ "$CURRENT_IMAGE" != "${DOCKER_HUB_USER}/update-demo:${CONTAINER_IMAGE_VERSION}" ]; then
|
for id in "${pod_id_list[@]}"; do
|
||||||
ALL_RUNNING=0
|
local template_string current_status current_image host_ip
|
||||||
fi
|
template_string="{{and ((index .CurrentState.Info \"${CONTROLLER_NAME}\").State.Running) .CurrentState.Info.net.State.Running}}"
|
||||||
|
current_status=$($KUBECFG -template="${template_string}" get "pods/$id")
|
||||||
|
if [[ "$current_status" != "{}" ]]; then
|
||||||
|
echo " $id is created but not running"
|
||||||
|
continue
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
template_string="{{(index .CurrentState.Info \"${CONTROLLER_NAME}\").DetailInfo.Config.Image}}"
|
||||||
|
current_image=$($KUBECFG -template="${template_string}" get "pods/$id")
|
||||||
|
if [[ "$current_image" != "${DOCKER_HUB_USER}/update-demo:${container_image_version}" ]]; then
|
||||||
|
echo " ${id} is created but running wrong image"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
host_ip=$($KUBECFG -template='{{.CurrentState.HostIP}}' get pods/$id)
|
||||||
|
curl -s --max-time 5 --fail http://${host_ip}:8080/data.json \
|
||||||
|
| grep -q ${container_image_version} || {
|
||||||
|
echo " ${id} is running the right image but curl to contents failed or returned wrong info"
|
||||||
|
continue
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
echo " ${id} is verified up and running"
|
||||||
|
|
||||||
|
((num_running++)) || true
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
return 0
|
||||||
ids=($POD_ID_LIST)
|
|
||||||
if [ ${#ids[@]} -ne $NUM_REPLICAS ]; then
|
|
||||||
echo "Unexpected number of pods: ${#ids[@]}. Expected $NUM_REPLICAS"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
export DOCKER_HUB_USER=jbeda
|
export DOCKER_HUB_USER=jbeda
|
||||||
|
@ -20,6 +20,9 @@
|
|||||||
# Use testing config
|
# Use testing config
|
||||||
export KUBE_CONFIG_FILE="config-test.sh"
|
export KUBE_CONFIG_FILE="config-test.sh"
|
||||||
export KUBE_REPO_ROOT="$(dirname $0)/.."
|
export KUBE_REPO_ROOT="$(dirname $0)/.."
|
||||||
|
|
||||||
|
# TODO(jbeda): This will break on usage if there is a space in
|
||||||
|
# ${KUBE_REPO_ROOT}. Covert to an array? Or an exported function?
|
||||||
export KUBECFG="${KUBE_REPO_ROOT}/cluster/kubecfg.sh -expect_version_match"
|
export KUBECFG="${KUBE_REPO_ROOT}/cluster/kubecfg.sh -expect_version_match"
|
||||||
|
|
||||||
source $(dirname $0)/../cluster/kube-env.sh
|
source $(dirname $0)/../cluster/kube-env.sh
|
||||||
|
Loading…
Reference in New Issue
Block a user