Merge pull request #4066 from jlowdermilk/e2e-kubectl

Replace kubecfg with kubectl in e2e tests
This commit is contained in:
Zach Loafman 2015-02-03 12:19:05 -08:00
commit 04cf8c9eed
7 changed files with 39 additions and 60 deletions

View File

@ -29,23 +29,21 @@ source "${KUBE_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
GUESTBOOK="${KUBE_ROOT}/examples/guestbook"
# Launch the guestbook example
$KUBECFG -c "${GUESTBOOK}/redis-master.json" create /pods
$KUBECFG -c "${GUESTBOOK}/redis-master-service.json" create /services
$KUBECFG -c "${GUESTBOOK}/redis-slave-controller.json" create /replicationControllers
${KUBECTL} create -f "${GUESTBOOK}"
sleep 5
sleep 15
POD_LIST_1=$($KUBECFG '-template={{range.items}}{{.id}} {{end}}' list pods)
POD_LIST_1=$(${KUBECTL} get pods -o template '--template={{range.items}}{{.id}} {{end}}')
echo "Pods running: ${POD_LIST_1}"
$KUBECFG stop redis-slave-controller
# Needed until issue #103 gets fixed
sleep 25
$KUBECFG rm redis-slave-controller
$KUBECFG delete services/redis-master
$KUBECFG delete pods/redis-master
# TODO make this an actual test. Open up a firewall and use curl to post and
# read a message via the frontend
POD_LIST_2=$($KUBECFG '-template={{range.items}}{{.id}} {{end}}' list pods)
${KUBECTL} stop rc redis-slave-controller
${KUBECTL} delete services redis-master
${KUBECTL} delete pods redis-master
POD_LIST_2=$(${KUBECTL} get pods -o template '--template={{range.items}}{{.id}} {{end}}')
echo "Pods running after shutdown: ${POD_LIST_2}"
exit 0

View File

@ -37,12 +37,12 @@ fi
function teardown() {
echo "Cleaning up test artifacts"
for test in ${liveness_tests}; do
${KUBECFG} delete pods/liveness-${test}
${KUBECTL} delete pods liveness-${test}
done
}
function waitForNotPending() {
pod_id_list=$($KUBECFG '-template={{range.items}}{{.id}} {{end}}' -l test=liveness list pods)
pod_id_list=$(${KUBECTL} get pods -o template '--template={{range.items}}{{.id}} {{end}}' -l test=liveness)
# Pod turn up on a clean cluster can take a while for the docker image pull.
all_running=0
for i in $(seq 1 24); do
@ -50,7 +50,7 @@ function waitForNotPending() {
sleep 5
all_running=1
for id in $pod_id_list; do
current_status=$($KUBECFG -template '{{.currentState.status}}' get pods/$id) || true
current_status=$(${KUBECTL} get pods $id -o template '--template={{.currentState.status}}') || true
if [[ "$current_status" == "Pending" ]]; then
all_running=0
break
@ -70,18 +70,18 @@ trap "teardown" EXIT
for test in ${liveness_tests}; do
echo "Liveness test: ${test}"
${KUBECFG} -c ${KUBE_ROOT}/examples/liveness/${test}-liveness.yaml create pods
${KUBECTL} create -f ${KUBE_ROOT}/examples/liveness/${test}-liveness.yaml
waitForNotPending
before=$(${KUBECFG} '-template={{.currentState.info.liveness.restartCount}}' get pods/liveness-${test})
before=$(${KUBECTL} get pods "liveness-${test}" -o template '--template={{.currentState.info.liveness.restartCount}}')
while [[ "${before}" == "<no value>" ]]; do
before=$(${KUBECFG} '-template={{.currentState.info.liveness.restartCount}}' get pods/liveness-${test})
before=$(${KUBECTL} get pods "liveness-${test}" -o template '--template={{.currentState.info.liveness.restartCount}}')
done
echo "Waiting for restarts."
for i in $(seq 1 24); do
sleep 10
after=$(${KUBECFG} '-template={{.currentState.info.liveness.restartCount}}' get pods/liveness-${test})
sleep 10
after=$(${KUBECTL} get pods "liveness-${test}" -o template '--template={{.currentState.info.liveness.restartCount}}')
echo "Restarts: ${after} > ${before}"
if [[ "${after}" == "<no value>" ]]; then
continue

View File

@ -28,7 +28,6 @@ source "${KUBE_ROOT}/cluster/$KUBERNETES_PROVIDER/util.sh"
MONITORING="${KUBE_ROOT}/cluster/addons/cluster-monitoring"
KUBECTL="${KUBE_ROOT}/cluster/kubectl.sh"
KUBECFG="${KUBE_ROOT}/cluster/kubecfg.sh"
BIGRAND=$(printf "%x\n" $(( $RANDOM << 16 | $RANDOM ))) # random 2^32 in hex
MONITORING_FIREWALL_RULE="monitoring-test-${BIGRAND}"
@ -49,13 +48,9 @@ function setup {
}
function cleanup {
"${KUBECFG}" resize monitoring-influx-grafana-controller 0 &> /dev/null || true
"${KUBECFG}" resize monitoring-heapster-controller 0 &> /dev/null || true
while "${KUBECTL}" get pods -l "name=influxGrafana" -o template -t {{range.items}}{{.id}}:{{end}} | grep -c . &> /dev/null \
|| "${KUBECTL}" get pods -l "name=heapster" -o template -t {{range.items}}{{.id}}:{{end}} | grep -c . &> /dev/null; do
sleep 2
done
"${KUBECTL}" delete -f "${MONITORING}/" &> /dev/null || true
"${KUBECTL}" stop rc monitoring-influx-grafana-controller &> /dev/null || true
"${KUBECTL}" stop rc monitoring-heapster-controller &> /dev/null || true
r "${KUBECTL}" delete -f "${MONITORING}/" &> /dev/null || true
# This only has work to do on gce and gke
if [[ "${KUBERNETES_PROVIDER}" == "gce" ]] || [[ "${KUBERNETES_PROVIDER}" == "gke" ]]; then

View File

@ -35,13 +35,13 @@ config="/tmp/${disk_name}.yaml"
function delete_pd_pod() {
# Delete the pod this should unmount the PD
${KUBECFG} delete pods/testpd
${KUBECTL} delete pods testpd
for i in $(seq 1 30); do
echo "Waiting for pod to be deleted."
sleep 5
all_running=0
for id in $pod_id_list; do
current_status=$($KUBECFG -template '{{.currentState.status}}' get pods/$id) || true
current_status=$(${KUBECTL} get pods $id -o template '--template={{.currentState.status}}') || true
if [[ "$current_status" == "Running" ]]; then
all_running=1
break
@ -99,17 +99,17 @@ perl -p -e "s/%.*%/${disk_name}/g" ${KUBE_ROOT}/examples/gce-pd/testpd.yaml > ${
# Create and format the disk.
"${GCLOUD}" compute disks create --zone="${ZONE}" --size=10GB "${disk_name}"
"${GCLOUD}" compute instances attach-disk --zone="${ZONE}" --disk="${disk_name}" \
--device-name temp-data "${MASTER_NAME}"
--device-name tempdata "${MASTER_NAME}"
"${GCLOUD}" compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo rm -rf /mnt/tmp"
"${GCLOUD}" compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo mkdir -p /mnt/tmp"
"${GCLOUD}" compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo /usr/share/google/safe_format_and_mount /dev/disk/by-id/google-temp-data /mnt/tmp"
"${GCLOUD}" compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo /usr/share/google/safe_format_and_mount /dev/disk/by-id/google-tempdata /mnt/tmp"
"${GCLOUD}" compute ssh --zone="${ZONE}" "${MASTER_NAME}" --command "sudo umount /mnt/tmp"
"${GCLOUD}" compute instances detach-disk --zone="${ZONE}" --disk "${disk_name}" "${MASTER_NAME}"
# Create a pod that uses the PD
${KUBECFG} -c ${config} create pods
${KUBECTL} create -f ${config}
pod_id_list=$($KUBECFG '-template={{range.items}}{{.id}} {{end}}' -l test=testpd list pods)
pod_id_list=$(${KUBECTL} get pods -o template '--template={{range.items}}{{.id}} {{end}}' -l test=testpd)
# Pod turn up on a clean cluster can take a while for the docker image
# pull, and even longer if the PD mount takes a bit.
all_running=0
@ -118,7 +118,7 @@ for i in $(seq 1 30); do
sleep 5
all_running=1
for id in $pod_id_list; do
current_status=$($KUBECFG -template '{{.currentState.status}}' get pods/$id) || true
current_status=$(${KUBECTL} get pods $id -o template '--template={{.currentState.status}}') || true
if [[ "$current_status" != "Running" ]]; then
all_running=0
break
@ -141,9 +141,9 @@ sleep 20
# Recreate the pod, this should re-mount the PD
${KUBECFG} -c ${config} create pods
${KUBECTL} create -f ${config}
pod_id_list=$($KUBECFG '-template={{range.items}}{{.id}} {{end}}' -l test=testpd list pods)
pod_id_list=$(${KUBECTL} get pods -o template '--template={{range.items}}{{.id}} {{end}}' -l test=testpd)
# Pod turn up on a clean cluster can take a while for the docker image pull.
all_running=0
for i in $(seq 1 30); do
@ -151,7 +151,7 @@ for i in $(seq 1 30); do
sleep 5
all_running=1
for id in $pod_id_list; do
current_status=$($KUBECFG -template '{{.currentState.status}}' get pods/$id) || true
current_status=$(${KUBECTL} get pods $id -o template '--template={{.currentState.status}}') || true
if [[ "$current_status" != "Running" ]]; then
all_running=0
break

View File

@ -135,9 +135,8 @@ __EOF__
# $1: service name
function stop_service() {
echo "Stopping service '$1'"
${KUBECFG} stop "$1" || true
${KUBECFG} delete "/replicationControllers/$1" || true
${KUBECFG} delete "/services/$1" || true
${KUBECTL} stop rc "$1" || true
${KUBECTL} delete services "$1" || true
}
# Args:
@ -309,9 +308,9 @@ svc1_pods=$(query_pods "${svc1_name}" "${svc1_count}")
svc2_pods=$(query_pods "${svc2_name}" "${svc2_count}")
# Get the portal IPs.
svc1_ip=$(${KUBECFG} -template '{{.portalIP}}' get "services/${svc1_name}")
svc1_ip=$(${KUBECTL} get services -o template '--template={{.portalIP}}' "${svc1_name}")
test -n "${svc1_ip}" || error "Service1 IP is blank"
svc2_ip=$(${KUBECFG} -template '{{.portalIP}}' get "services/${svc2_name}")
svc2_ip=$(${KUBECTL} get services -o template '--template={{.portalIP}}' "${svc2_name}")
test -n "${svc2_ip}" || error "Service2 IP is blank"
if [[ "${svc1_ip}" == "${svc2_ip}" ]]; then
error "Portal IPs conflict: ${svc1_ip}"
@ -381,7 +380,7 @@ wait_for_pods "${svc3_name}" "${svc3_count}"
svc3_pods=$(query_pods "${svc3_name}" "${svc3_count}")
# Get the portal IP.
svc3_ip=$(${KUBECFG} -template '{{.portalIP}}' get "services/${svc3_name}")
svc3_ip=$(${KUBECTL} get services -o template '--template={{.portalIP}}' "${svc3_name}")
test -n "${svc3_ip}" || error "Service3 IP is blank"
echo "Verifying the portals from the host"
@ -437,7 +436,7 @@ wait_for_pods "${svc4_name}" "${svc4_count}"
svc4_pods=$(query_pods "${svc4_name}" "${svc4_count}")
# Get the portal IP.
svc4_ip=$(${KUBECFG} -template '{{.portalIP}}' get "services/${svc4_name}")
svc4_ip=$(${KUBECTL} get services -o template '--template={{.portalIP}}' "${svc4_name}")
test -n "${svc4_ip}" || error "Service4 IP is blank"
if [[ "${svc4_ip}" == "${svc2_ip}" || "${svc4_ip}" == "${svc3_ip}" ]]; then
error "Portal IPs conflict: ${svc4_ip}"

View File

@ -53,7 +53,7 @@ function validate() {
for id in "${pod_id_list[@]+${pod_id_list[@]}}"; do
local template_string current_status current_image host_ip
# NB: kubectl & kubecfg add the "exists" function to the standard template functions.
# NB: kubectl adds the "exists" function to the standard template functions.
# This lets us check to see if the "running" entry exists for each of the containers
# we care about. Exists will never return an error and it's safe to check a chain of
# things, any one of which may not exist. In the below template, all of info,

View File

@ -57,7 +57,6 @@ var (
"You can explicitly set to false if you're, e.g., testing client changes "+
"for which the server version doesn't make a difference.")
cfgCmd = flag.String("cfg", "", "If nonempty, pass this as an argument, and call kubecfg. Implies -v.")
ctlCmd = flag.String("ctl", "", "If nonempty, pass this as an argument, and call kubectl. Implies -v. (-test, -cfg, -ctl are mutually exclusive)")
)
@ -164,8 +163,6 @@ func main() {
failure := false
switch {
case *cfgCmd != "":
failure = !runBash("'kubecfg "+*cfgCmd+"'", "$KUBECFG "+*cfgCmd)
case *ctlCmd != "":
failure = !runBash("'kubectl "+*ctlCmd+"'", "$KUBECTL "+*ctlCmd)
case *tests != "":
@ -536,15 +533,6 @@ func printPrefixedLines(prefix, s string) {
}
}
// returns either "", or a list of args intended for appending with the
// kubecfg or kubectl commands (begining with a space).
func kubecfgArgs() string {
if *checkVersionSkew {
return " -expect_version_match"
}
return ""
}
// returns either "", or a list of args intended for appending with the
// kubectl command (begining with a space).
func kubectlArgs() string {
@ -564,7 +552,6 @@ export KUBE_CONFIG_FILE="config-test.sh"
# TODO(jbeda): This will break on usage if there is a space in
# ${KUBE_ROOT}. Convert to an array? Or an exported function?
export KUBECFG="` + versionRoot + `/cluster/kubecfg.sh` + kubecfgArgs() + `"
export KUBECTL="` + versionRoot + `/cluster/kubectl.sh` + kubectlArgs() + `"
source "` + *root + `/cluster/kube-env.sh"