Merge pull request #3656 from jbeda/vagrant-e2e

Fix up a bunch of vagrant stuff to enable e2e
This commit is contained in:
Brian Grant 2015-01-21 07:26:13 -08:00
commit 91533095e5
6 changed files with 88 additions and 28 deletions

View File

@ -98,11 +98,10 @@ grep -q kbr0 /etc/sysconfig/docker || {
systemctl restart docker.service
# setup iptables masquerade rules so the pods can reach the internet
iptables -t nat -A POSTROUTING -s ${BRIDGE_BASE}.0.0/16 ! -d ${BRIDGE_BASE}.0.0/16 -j MASQUERADE
iptables -t nat -A POSTROUTING -s ${CONTAINER_SUBNET} ! -d ${CONTAINER_SUBNET} -j MASQUERADE
# persist please
iptables-save >& /etc/sysconfig/iptables
}
EOF

View File

@ -21,12 +21,12 @@ source "${KUBE_ROOT}/cluster/vagrant/${KUBE_CONFIG_FILE-"config-default.sh"}"
function detect-master () {
KUBE_MASTER_IP=$MASTER_IP
echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}"
echo "KUBE_MASTER_IP: ${KUBE_MASTER_IP}" 1>&2
}
# Get minion IP addresses and store in KUBE_MINION_IP_ADDRESSES[]
function detect-minions {
echo "Minions already detected"
echo "Minions already detected" 1>&2
KUBE_MINION_IP_ADDRESSES=("${MINION_IPS[@]}")
}
@ -40,6 +40,10 @@ function verify-prereqs {
fi
done
# Set VAGRANT_CWD to KUBE_ROOT so that we find the right Vagrantfile no
# matter what directory the tools are called from.
export VAGRANT_CWD="${KUBE_ROOT}"
export USING_KUBE_SCRIPTS=true
}
@ -155,13 +159,15 @@ function verify-cluster {
done
done
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${MASTER_IP}"
echo
echo "The user name and password to use is located in ~/.kubernetes_vagrant_auth."
echo
(
echo
echo "Kubernetes cluster is running. The master is running at:"
echo
echo " https://${MASTER_IP}"
echo
echo "The user name and password to use is located in ~/.kubernetes_vagrant_auth."
echo
)
}
@ -218,23 +224,23 @@ function test-build-release {
# Execute prior to running tests to initialize required structure
function test-setup {
echo "Vagrant test setup complete"
echo "Vagrant test setup complete" 1>&2
}
# Execute after running tests to perform any required clean-up
function test-teardown {
echo "Vagrant ignores tear-down"
echo "Vagrant ignores tear-down" 1>&2
}
# Set the {user} and {password} environment values required to interact with provider
function get-password {
export KUBE_USER=vagrant
export KUBE_PASSWORD=vagrant
echo "Using credentials: $KUBE_USER:$KUBE_PASSWORD"
echo "Using credentials: $KUBE_USER:$KUBE_PASSWORD" 1>&2
}
# Find the minion name based on the IP address
function find-minion-by-ip {
function find-vagrant-name-by-ip {
local ip="$1"
local ip_pattern="${MINION_IP_BASE}(.*)"
@ -247,12 +253,32 @@ function find-minion-by-ip {
echo "minion-$((${BASH_REMATCH[1]} - 1))"
}
# SSH to a node by name ($1) and run a command ($2).
# Find the vagrant machien name based on the host name of the minion
function find-vagrant-name-by-minion-name {
local ip="$1"
local ip_pattern="${INSTANCE_PREFIX}-minion-(.*)"
[[ $ip =~ $ip_pattern ]] || {
return 1
}
echo "minion-${BASH_REMATCH[1]}"
}
# SSH to a node by name or IP ($1) and run a command ($2).
function ssh-to-node {
local node="$1"
local cmd="$2"
local machine
machine=$(find-minion-by-ip $node)
machine=$(find-vagrant-name-by-ip $node) || true
[[ -n ${machine-} ]] || machine=$(find-vagrant-name-by-minion-name $node) || true
[[ -n ${machine-} ]] || {
echo "Cannot find machine to ssh to: $1"
return 1
}
vagrant ssh "${machine}" -c "${cmd}" | grep -v "Connection to.*closed"
}
@ -262,16 +288,16 @@ function restart-kube-proxy {
}
function setup-monitoring {
echo "TODO"
echo "TODO" 1>&2
}
function teardown-monitoring {
echo "TODO"
echo "TODO" 1>&2
}
# Perform preparations required to run e2e tests
function prepare-e2e() {
echo "Vagrant doesn't need special preparations for e2e tests"
echo "Vagrant doesn't need special preparations for e2e tests" 1>&2
}
function setup-logging {

View File

@ -24,6 +24,11 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
source "${KUBE_ROOT}/cluster/kube-env.sh"
source "${KUBE_ROOT}/cluster/${KUBERNETES_PROVIDER}/util.sh"
if [[ "$KUBERNETES_PROVIDER" == "vagrant" ]]; then
echo "WARNING: Skipping services.sh for ${KUBERNETES_PROVIDER}. See https://github.com/GoogleCloudPlatform/kubernetes/issues/3655"
exit 0
fi
function error() {
echo "$@" >&2
exit 1
@ -50,6 +55,7 @@ function join() {
svcs_to_clean=()
function do_teardown() {
local svc
return
for svc in "${svcs_to_clean[@]:+${svcs_to_clean[@]}}"; do
stop_service "${svc}"
done

View File

@ -67,6 +67,7 @@ const (
downloadDirName = "_output/downloads"
tarDirName = "server"
tempDirName = "upgrade-e2e-temp-dir"
minMinionCount = 3
)
var (
@ -196,6 +197,26 @@ func Up() bool {
return runBash("up", path.Join(versionRoot, "/cluster/kube-up.sh; test-setup;"))
}
// Ensure that the cluster is large engough to run the e2e tests.
func ValidateClusterSize() {
// Check that there are at least 3 minions running
res, stdout, _ := runBashWithOutputs(
"validate cluster size",
"cluster/kubectl.sh get minions --no-headers | wc -l")
if !res {
log.Fatal("Could not get nodes to validate cluster size")
}
numNodes, err := strconv.Atoi(strings.TrimSpace(stdout))
if err != nil {
log.Fatalf("Could not count number of nodes to validate cluster size (%s)", err)
}
if numNodes < minMinionCount {
log.Fatalf("Cluster size (%d) is too small to run e2e tests. %d Minions are required.", numNodes, minMinionCount)
}
}
// Is the e2e cluster up?
func IsUp() bool {
return runBash("get status", `$KUBECTL version`)
@ -264,6 +285,8 @@ func Test() (results ResultsByTest) {
log.Fatal("Testing requested, but e2e cluster not up!")
}
ValidateClusterSize()
// run tests!
dir, err := os.Open(filepath.Join(*root, "hack", "e2e-suite"))
if err != nil {
@ -440,8 +463,8 @@ func finishRunning(stepName string, cmd *exec.Cmd) (bool, string, string) {
log.Printf("Running: %v", stepName)
stdout, stderr := bytes.NewBuffer(nil), bytes.NewBuffer(nil)
if *verbose {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.Stdout = io.MultiWriter(os.Stdout, stdout)
cmd.Stderr = io.MultiWriter(os.Stderr, stderr)
} else {
cmd.Stdout = stdout
cmd.Stderr = stderr
@ -462,13 +485,9 @@ func finishRunning(stepName string, cmd *exec.Cmd) (bool, string, string) {
if err := cmd.Run(); err != nil {
log.Printf("Error running %v: %v", stepName, err)
if !*verbose {
return false, string(stdout.Bytes()), string(stderr.Bytes())
} else {
return false, "", ""
}
return false, string(stdout.Bytes()), string(stderr.Bytes())
}
return true, "", ""
return true, string(stdout.Bytes()), string(stderr.Bytes())
}
func printBashOutputs(headerprefix, lineprefix, stdout, stderr string, escape bool) {

View File

@ -37,6 +37,11 @@ func TestClusterDNS(c *client.Client) bool {
return true
}
if testContext.provider == "vagrant" {
glog.Infof("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)")
return true
}
podClient := c.Pods(api.NamespaceDefault)
//TODO: Wait for skyDNS

View File

@ -25,6 +25,11 @@ import (
)
func TestNetwork(c *client.Client) bool {
if testContext.provider == "vagrant" {
glog.Infof("Skipping test which is broken for vagrant (See https://github.com/GoogleCloudPlatform/kubernetes/issues/3580)")
return true
}
ns := api.NamespaceDefault
svc, err := c.Services(ns).Create(loadObjectOrDie(assetPath(
"contrib", "for-tests", "network-tester", "service.json",