Merge pull request #17993 from eosrei/1111-minion-node-hack-e2e

Minion->Node rename: comments/vars for e2e.go, e2e.sh, resize_nodes.go for #1111
This commit is contained in:
Marek Grabowski 2015-12-03 09:23:22 +01:00
commit 1a59e031e2
3 changed files with 9 additions and 9 deletions

View File

@ -60,7 +60,7 @@ const (
downloadDirName = "_output/downloads"
tarDirName = "server"
tempDirName = "upgrade-e2e-temp-dir"
minMinionCount = 2
minNodeCount = 2
)
var (
@ -185,7 +185,7 @@ func Up() bool {
// Ensure that the cluster is large engough to run the e2e tests.
func ValidateClusterSize() {
// Check that there are at least minMinionCount minions running
// Check that there are at least minNodeCount nodes running
cmd := exec.Command(path.Join(*root, "hack/e2e-internal/e2e-cluster-size.sh"))
if *verbose {
cmd.Stderr = os.Stderr
@ -200,8 +200,8 @@ func ValidateClusterSize() {
log.Fatalf("Could not count number of nodes to validate cluster size (%s)", err)
}
if numNodes < minMinionCount {
log.Fatalf("Cluster size (%d) is too small to run e2e tests. %d Minions are required.", numNodes, minMinionCount)
if numNodes < minNodeCount {
log.Fatalf("Cluster size (%d) is too small to run e2e tests. %d Nodes are required.", numNodes, minNodeCount)
}
}

View File

@ -320,7 +320,7 @@ GCE_FLAKY_TESTS=(
"GCE\sL7\sLoadBalancer\sController" # issue: #17518
"DaemonRestart\sController\sManager" # issue: #17829
"Resource\susage\sof\ssystem\scontainers" # issue: #13931
"allows\sscheduling\sof\spods\son\sa\sminion\safter\sit\srejoins\sthe\scluster" # file: resize_nodes.go, issue: #17830
"allows\sscheduling\sof\spods\son\sa\snode\safter\sit\srejoins\sthe\scluster" # file: resize_nodes.go, issue: #17830
"NodeOutOfDisk" # issue: 17687
)
@ -1229,7 +1229,7 @@ case ${JOB_NAME} in
NUM_NODES="11"
MASTER_SIZE="n1-standard-4"
NODE_SIZE="n1-standard-8" # Note: can fit about 17 hollow nodes per core
# so NUM_NODES x cores_per_minion should
# so NUM_NODES x cores_per_node should
# be set accordingly.
KUBE_GCE_INSTANCE_PREFIX="kubemark1000"
E2E_ZONE="asia-east1-a"

View File

@ -485,7 +485,7 @@ var _ = Describe("Nodes", func() {
})
Describe("Network", func() {
Context("when a minion node becomes unreachable", func() {
Context("when a node becomes unreachable", func() {
BeforeEach(func() {
SkipUnlessProviderIs("gce", "gke", "aws")
SkipUnlessNodeCountIsAtLeast(2)
@ -497,8 +497,8 @@ var _ = Describe("Nodes", func() {
// 1. pods from a uncontactable nodes are rescheduled
// 2. when a node joins the cluster, it can host new pods.
// Factor out the cases into two separate tests.
It("[replication controller] recreates pods scheduled on the unreachable minion node "+
"AND allows scheduling of pods on a minion after it rejoins the cluster", func() {
It("[replication controller] recreates pods scheduled on the unreachable node "+
"AND allows scheduling of pods on a node after it rejoins the cluster", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname