mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Fix up e2e tests.
This commit is contained in:
parent
2d1b279311
commit
bea37d5b17
@ -15,6 +15,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
# TODO(jbeda): Provide a way to override project
|
||||
# gcloud multiplexing for shared GCE/GKE tests.
|
||||
GCLOUD=gcloud
|
||||
ZONE=us-central1-b
|
||||
MASTER_SIZE=n1-standard-1
|
||||
MINION_SIZE=n1-standard-1
|
||||
|
@ -15,6 +15,8 @@
|
||||
# limitations under the License.
|
||||
|
||||
# TODO(jbeda): Provide a way to override project
|
||||
# gcloud multiplexing for shared GCE/GKE tests.
|
||||
GCLOUD=gcloud
|
||||
ZONE=us-central1-b
|
||||
MASTER_SIZE=g1-small
|
||||
MINION_SIZE=g1-small
|
||||
|
@ -25,10 +25,6 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
|
||||
|
||||
# Launch some pods.
|
||||
num_pods=2
|
||||
$KUBECFG -p 8080:9376 run kubernetes/serve_hostname ${num_pods} my-hostname
|
||||
|
||||
function teardown() {
|
||||
echo "Cleaning up test artifacts"
|
||||
$KUBECFG stop my-hostname
|
||||
@ -37,7 +33,21 @@ function teardown() {
|
||||
|
||||
trap "teardown" EXIT
|
||||
|
||||
pod_id_list=$($KUBECFG '-template={{range.items}}{{.id}} {{end}}' -l replicationController=my-hostname list pods)
|
||||
# Determine which pod image to launch (e.g. private.sh launches a different one).
|
||||
pod_img_srv="${POD_IMG_SRV:-kubernetes/serve_hostname}"
|
||||
|
||||
# Launch some pods.
|
||||
num_pods=2
|
||||
$KUBECFG -p 8080:9376 run "${pod_img_srv}" ${num_pods} my-hostname
|
||||
|
||||
# List the pods.
|
||||
pod_id_list=$($KUBECFG '-template={{range.items}}{{.id}} {{end}}' -l name=my-hostname list pods)
|
||||
echo "pod_id_list: ${pod_id_list}"
|
||||
if [[ -z "${pod_id_list:-}" ]]; then
|
||||
echo "Pod ID list is empty. It should have a set of pods to verify."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Pod turn up on a clean cluster can take a while for the docker image pull.
|
||||
all_running=0
|
||||
for i in $(seq 1 24); do
|
||||
@ -45,7 +55,7 @@ for i in $(seq 1 24); do
|
||||
sleep 5
|
||||
all_running=1
|
||||
for id in $pod_id_list; do
|
||||
current_status=$($KUBECFG -template '{{.currentState.status}}' get pods/$id) || true
|
||||
current_status=$($KUBECFG '-template={{.currentState.status}}' get pods/$id) || true
|
||||
if [[ "$current_status" != "Running" ]]; then
|
||||
all_running=0
|
||||
break
|
||||
@ -60,22 +70,28 @@ if [[ "${all_running}" == 0 ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get minion IP addresses
|
||||
detect-minions
|
||||
|
||||
# let images stabilize
|
||||
echo "Letting images stabilize"
|
||||
sleep 5
|
||||
|
||||
# Verify that something is listening.
|
||||
for id in ${pod_id_list}; do
|
||||
ip=$($KUBECFG -template '{{.currentState.hostIP}}' get pods/$id)
|
||||
ip=$($KUBECFG '-template={{.currentState.hostIP}}' get pods/$id)
|
||||
echo "Trying to reach server that should be running at ${ip}:8080..."
|
||||
ok=0
|
||||
server_running=0
|
||||
for i in $(seq 1 5); do
|
||||
curl --connect-timeout 1 "http://${ip}:8080" >/dev/null 2>&1 && ok=1 && break
|
||||
echo "--- trial ${i}"
|
||||
output=$(curl -s -connect-timeout 1 "http://${ip}:8080" || true)
|
||||
if echo $output | grep "${id}" &> /dev/null; then
|
||||
server_running=1
|
||||
break
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
if [[ "${server_running}" -ne 1 ]]; then
|
||||
echo "Server never running at ${ip}:8080..."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
exit 0
|
||||
|
@ -25,14 +25,18 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
|
||||
|
||||
if [[ "${KUBERNETES_PROVIDER}" != "gce" ]]; then
|
||||
echo "Skipping certs test on non-gce provider."
|
||||
if [[ "${KUBERNETES_PROVIDER}" != "gce" ]] && [[ "${KUBERNETES_PROVIDER}" != "gke" ]]; then
|
||||
echo "WARNING: Skipping certs.sh for cloud provider: ${KUBERNETES_PROVIDER}."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Set KUBE_MASTER
|
||||
detect-master
|
||||
|
||||
# IMPORTANT: there are upstream things that rely on these files.
|
||||
# Do *not* fix this test by changing this path, unless you _really_ know
|
||||
# what you are doing.
|
||||
for file in kubecfg.key kubecfg.crt ca.crt; do
|
||||
gcloud compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "ls /srv/kubernetes/${file}"
|
||||
echo "Checking for ${file}"
|
||||
"${GCLOUD}" compute ssh --zone="${ZONE}" "${KUBE_MASTER}" --command "ls /srv/kubernetes/${file}"
|
||||
done
|
||||
|
@ -26,22 +26,29 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
|
||||
|
||||
if [[ "${KUBERNETES_PROVIDER}" != "gce" ]] && [[ "${KUBERNETES_PROVIDER}" != "gke" ]]; then
|
||||
echo "WARNING: Skipping monitoring.sh for cloud provider: ${KUBERNETES_PROVIDER}."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
MONITORING="${KUBE_ROOT}/examples/monitoring"
|
||||
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
|
||||
MONITORING_FIREWALL_RULE="monitoring-test"
|
||||
|
||||
function setup {
|
||||
detect-project
|
||||
if ! gcloud compute firewall-rules describe $MONITORING_FIREWALL_RULE &>/dev/null; then
|
||||
if ! gcloud compute firewall-rules create $MONITORING_FIREWALL_RULE \
|
||||
|
||||
if ! "${GCLOUD}" compute firewall-rules describe $MONITORING_FIREWALL_RULE &> /dev/null; then
|
||||
if ! "${GCLOUD}" compute firewall-rules create $MONITORING_FIREWALL_RULE \
|
||||
--project "${PROJECT}" \
|
||||
--network "e2e" \
|
||||
--network "${NETWORK}" \
|
||||
--quiet \
|
||||
--allow tcp:80 tcp:8083 tcp:8086 tcp:9200; then
|
||||
echo "Failed to set up firewall for monitoring" && false
|
||||
fi
|
||||
fi
|
||||
"${KUBECTL}" create -f "${MONITORING}/influx-grafana-pod.json"
|
||||
|
||||
"${KUBECTL}" create -f "${MONITORING}/influx-grafana-pod.json"
|
||||
"${KUBECTL}" create -f "${MONITORING}/influx-grafana-service.json"
|
||||
"${KUBECTL}" create -f "${MONITORING}/heapster-pod.json"
|
||||
}
|
||||
@ -51,8 +58,8 @@ function cleanup {
|
||||
"${KUBECTL}" delete -f "${MONITORING}/influx-grafana-pod.json" || true
|
||||
"${KUBECTL}" delete -f "${MONITORING}/influx-grafana-service.json" || true
|
||||
"${KUBECTL}" delete -f "${MONITORING}/heapster-pod.json" || true
|
||||
if gcloud compute firewall-rules describe $MONITORING_FIREWALL_RULE &> /dev/null; then
|
||||
gcloud compute firewall-rules delete \
|
||||
if "${GCLOUD}" compute firewall-rules describe $MONITORING_FIREWALL_RULE &> /dev/null; then
|
||||
"${GCLOUD}" compute firewall-rules delete \
|
||||
--project "${PROJECT}" \
|
||||
--quiet \
|
||||
$MONITORING_FIREWALL_RULE || true
|
||||
@ -71,7 +78,7 @@ function influx-data-exists {
|
||||
|
||||
function wait-for-pods {
|
||||
local running=false
|
||||
for i in `seq 1 20`; do
|
||||
for i in `seq 1 20`; do
|
||||
sleep 20
|
||||
if "${KUBECTL}" get pods influx-grafana | grep Running &> /dev/null \
|
||||
&& "${KUBECTL}" get pods heapster | grep Running &> /dev/null; then
|
||||
|
@ -25,9 +25,9 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
|
||||
|
||||
if [[ "$KUBERNETES_PROVIDER" != "gce" ]]; then
|
||||
echo "PD test is only run for GCE"
|
||||
return 0
|
||||
if [[ "$KUBERNETES_PROVIDER" != "gce" ]] && [[ "$KUBERNETES_PROVIDER" != "gke" ]]; then
|
||||
echo "WARNING: Skipping pd.sh for cloud provider: ${KUBERNETES_PROVIDER}."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
disk_name="e2e-$(date +%H-%M-%s)"
|
||||
|
@ -21,66 +21,16 @@ set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
if [[ "${KUBERNETES_PROVIDER:-gce}" != "gce" ]]; then
|
||||
echo WARNING: Skipping private.sh for cloud provider: $KUBERNETES_PROVIDER.
|
||||
exit 0
|
||||
fi
|
||||
|
||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
||||
source "${KUBE_ROOT}/cluster/kube-env.sh"
|
||||
source "${KUBE_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
|
||||
|
||||
# Launch some pods.
|
||||
num_pods=2
|
||||
$KUBECFG -p 8080:9376 run container.cloud.google.com/_b_k8s_test/serve_hostname ${num_pods} my-hostname
|
||||
|
||||
function teardown() {
|
||||
echo "Cleaning up test artifacts"
|
||||
$KUBECFG stop my-hostname
|
||||
$KUBECFG rm my-hostname
|
||||
}
|
||||
|
||||
trap "teardown" EXIT
|
||||
|
||||
pod_id_list=$($KUBECFG '-template={{range.items}}{{.id}} {{end}}' -l replicationController=my-hostname list pods)
|
||||
# Pod turn up on a clean cluster can take a while for the docker image pull.
|
||||
all_running=0
|
||||
for i in $(seq 1 24); do
|
||||
echo "Waiting for pods to come up."
|
||||
sleep 5
|
||||
all_running=1
|
||||
for id in $pod_id_list; do
|
||||
current_status=$($KUBECFG -template '{{.currentState.status}}' get pods/$id) || true
|
||||
if [[ "$current_status" != "Running" ]]; then
|
||||
all_running=0
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "${all_running}" == 1 ]]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [[ "${all_running}" == 0 ]]; then
|
||||
echo "Pods did not come up in time"
|
||||
exit 1
|
||||
# Private image works only on GCE and GKE.
|
||||
if [[ "${KUBERNETES_PROVIDER}" != "gce" ]] && [[ "${KUBERNETES_PROVIDER}" != "gke" ]]; then
|
||||
echo "WARNING: Skipping private.sh for cloud provider: ${KUBERNETES_PROVIDER}."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Get minion IP addresses
|
||||
detect-minions
|
||||
|
||||
# let images stabilize
|
||||
echo "Letting images stabilize"
|
||||
sleep 5
|
||||
|
||||
# Verify that something is listening.
|
||||
for id in ${pod_id_list}; do
|
||||
ip=$($KUBECFG -template '{{.currentState.hostIP}}' get pods/$id)
|
||||
echo "Trying to reach server that should be running at ${ip}:8080..."
|
||||
ok=0
|
||||
for i in $(seq 1 5); do
|
||||
curl --connect-timeout 1 "http://${ip}:8080" >/dev/null 2>&1 && ok=1 && break
|
||||
sleep 2
|
||||
done
|
||||
done
|
||||
|
||||
exit 0
|
||||
# Run the basic.sh test, but using this image.
|
||||
export POD_IMG_SRV="container.cloud.google.com/_b_k8s_test/serve_hostname"
|
||||
source "${KUBE_ROOT}/hack/e2e-suite/basic.sh"
|
||||
|
@ -200,6 +200,7 @@ function wait_for_pods() {
|
||||
# $5: pod IDs
|
||||
function wait_for_service_up() {
|
||||
local i
|
||||
local found_pods
|
||||
for i in $(seq 1 20); do
|
||||
results=($(ssh-to-node "${test_node}" "
|
||||
set -e;
|
||||
@ -207,7 +208,9 @@ function wait_for_service_up() {
|
||||
curl -s --connect-timeout 1 http://$2:$3;
|
||||
done | sort | uniq
|
||||
"))
|
||||
|
||||
found_pods=$(sort_args "${results[@]:+${results[@]}}")
|
||||
echo "Checking if ${found_pods} == ${5}"
|
||||
if [[ "${found_pods}" == "$5" ]]; then
|
||||
break
|
||||
fi
|
||||
@ -299,6 +302,7 @@ fi
|
||||
#
|
||||
# Test 1: Prove that the service portal is alive.
|
||||
#
|
||||
echo "Test 1: Prove that the service portal is alive."
|
||||
echo "Verifying the portals from the host"
|
||||
wait_for_service_up "${svc1_name}" "${svc1_ip}" "${svc1_port}" \
|
||||
"${svc1_count}" "${svc1_pods}"
|
||||
@ -321,6 +325,7 @@ verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
|
||||
#
|
||||
# Test 2: Bounce the proxy and make sure the portal comes back.
|
||||
#
|
||||
echo "Test 2: Bounce the proxy and make sure the portal comes back."
|
||||
echo "Restarting kube-proxy"
|
||||
restart-kube-proxy "${test_node}"
|
||||
echo "Verifying the portals from the host"
|
||||
@ -337,6 +342,7 @@ verify_from_container "${svc2_name}" "${svc2_ip}" "${svc2_port}" \
|
||||
#
|
||||
# Test 3: Stop one service and make sure it is gone.
|
||||
#
|
||||
echo "Test 3: Stop one service and make sure it is gone."
|
||||
stop_service "${svc1_name}"
|
||||
wait_for_service_down "${svc1_name}" "${svc1_ip}" "${svc1_port}"
|
||||
|
||||
@ -344,6 +350,7 @@ wait_for_service_down "${svc1_name}" "${svc1_ip}" "${svc1_port}"
|
||||
# Test 4: Bring up another service.
|
||||
# TODO: Actually add a test to force re-use.
|
||||
#
|
||||
echo "Test 4: Bring up another service."
|
||||
svc3_name="service3"
|
||||
svc3_port=80
|
||||
svc3_count=3
|
||||
@ -369,6 +376,7 @@ verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
|
||||
#
|
||||
# Test 5: Remove the iptables rules, make sure they come back.
|
||||
#
|
||||
echo "Test 5: Remove the iptables rules, make sure they come back."
|
||||
echo "Manually removing iptables rules"
|
||||
ssh-to-node "${test_node}" "sudo iptables -t nat -F KUBE-PROXY"
|
||||
echo "Verifying the portals from the host"
|
||||
@ -381,6 +389,7 @@ verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
|
||||
#
|
||||
# Test 6: Restart the master, make sure portals come back.
|
||||
#
|
||||
echo "Test 6: Restart the master, make sure portals come back."
|
||||
echo "Restarting the master"
|
||||
ssh-to-node "${master}" "sudo /etc/init.d/kube-apiserver restart"
|
||||
sleep 5
|
||||
@ -394,6 +403,7 @@ verify_from_container "${svc3_name}" "${svc3_ip}" "${svc3_port}" \
|
||||
#
|
||||
# Test 7: Bring up another service, make sure it does not re-use Portal IPs.
|
||||
#
|
||||
echo "Test 7: Bring up another service, make sure it does not re-use Portal IPs."
|
||||
svc4_name="service4"
|
||||
svc4_port=80
|
||||
svc4_count=3
|
||||
|
@ -165,7 +165,7 @@ func Up() bool {
|
||||
|
||||
// Is the e2e cluster up?
|
||||
func IsUp() bool {
|
||||
return runBash("get status", `$KUBECFG -server_version`)
|
||||
return runBash("get status", `$KUBECTL version`)
|
||||
}
|
||||
|
||||
func tryUp() bool {
|
||||
|
Loading…
Reference in New Issue
Block a user