fixit: break sig-cluster-lifecycle tests into subpackage

This commit is contained in:
Mike Danese 2017-07-18 13:04:46 -07:00
parent 772c352992
commit 3c39173ee4
17 changed files with 228 additions and 97 deletions

View File

@ -804,6 +804,7 @@ test/e2e/instrumentation
test/e2e/instrumentation/logging
test/e2e/instrumentation/monitoring
test/e2e/kubectl
test/e2e/lifecycle
test/e2e/metrics
test/e2e/scalability
test/e2e/scheduling

View File

@ -24,6 +24,7 @@ go_test(
"//test/e2e/instrumentation/logging:go_default_library",
"//test/e2e/instrumentation/monitoring:go_default_library",
"//test/e2e/kubectl:go_default_library",
"//test/e2e/lifecycle:go_default_library",
"//test/e2e/metrics:go_default_library",
"//test/e2e/scalability:go_default_library",
"//test/e2e/scheduling:go_default_library",
@ -46,11 +47,9 @@ go_test(
go_library(
name = "go_default_library",
srcs = [
"addon_update.go",
"apparmor.go",
"audit.go",
"certificates.go",
"cluster_upgrade.go",
"dashboard.go",
"dns.go",
"dns_common.go",
@ -63,7 +62,6 @@ go_library(
"generated_clientset.go",
"gke_local_ssd.go",
"gke_node_pools.go",
"ha_master.go",
"ingress.go",
"kube_proxy.go",
"kubelet.go",
@ -79,10 +77,7 @@ go_library(
"podpreset.go",
"pods.go",
"pre_stop.go",
"reboot.go",
"resize_nodes.go",
"resource_quota.go",
"restart.go",
"security_context.go",
"service.go",
"service_accounts.go",
@ -107,32 +102,26 @@ go_library(
"//pkg/controller/node:go_default_library",
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
"//pkg/kubelet/pod:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/quota/evaluator/core:go_default_library",
"//pkg/util/logs:go_default_library",
"//pkg/util/version:go_default_library",
"//plugin/pkg/admission/serviceaccount:go_default_library",
"//test/e2e/chaosmonkey:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/e2e/manifest:go_default_library",
"//test/e2e/metrics:go_default_library",
"//test/e2e/upgrades:go_default_library",
"//test/e2e/upgrades/apps:go_default_library",
"//test/e2e_federation:go_default_library",
"//test/images/net/nat:go_default_library",
"//test/utils:go_default_library",
"//test/utils/junit:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/google/cadvisor/info/v1:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/ginkgo/config:go_default_library",
"//vendor/github.com/onsi/ginkgo/reporters:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/crypto/ssh:go_default_library",
"//vendor/k8s.io/api/batch/v1:go_default_library",
"//vendor/k8s.io/api/batch/v2alpha1:go_default_library",
"//vendor/k8s.io/api/certificates/v1beta1:go_default_library",
@ -153,7 +142,6 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
@ -191,6 +179,7 @@ filegroup(
"//test/e2e/generated:all-srcs",
"//test/e2e/instrumentation:all-srcs",
"//test/e2e/kubectl:all-srcs",
"//test/e2e/lifecycle:all-srcs",
"//test/e2e/manifest:all-srcs",
"//test/e2e/metrics:all-srcs",
"//test/e2e/perftype:all-srcs",

View File

@ -17,7 +17,18 @@ limitations under the License.
package common
import (
"fmt"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
type Suite string
@ -49,3 +60,72 @@ var CommonImageWhiteList = sets.NewString(
"gcr.io/google_containers/volume-nfs:0.8",
"gcr.io/google_containers/volume-gluster:0.2",
)
func svcByName(name string, port int) *v1.Service {
return &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Selector: map[string]string{
"name": name,
},
Ports: []v1.ServicePort{{
Port: int32(port),
TargetPort: intstr.FromInt(port),
}},
},
}
}
func NewSVCByName(c clientset.Interface, ns, name string) error {
const testPort = 9376
_, err := c.Core().Services(ns).Create(svcByName(name, testPort))
return err
}
// NewRCByName creates a replication controller with a selector by name of name.
func NewRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*v1.ReplicationController, error) {
By(fmt.Sprintf("creating replication controller %s", name))
return c.Core().ReplicationControllers(ns).Create(framework.RcByNamePort(
name, replicas, framework.ServeHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod))
}
func RestartNodes(c clientset.Interface, nodeNames []string) error {
// List old boot IDs.
oldBootIDs := make(map[string]string)
for _, name := range nodeNames {
node, err := c.Core().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error getting node info before reboot: %s", err)
}
oldBootIDs[name] = node.Status.NodeInfo.BootID
}
// Reboot the nodes.
args := []string{
"compute",
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
"instances",
"reset",
}
args = append(args, nodeNames...)
args = append(args, fmt.Sprintf("--zone=%s", framework.TestContext.CloudConfig.Zone))
stdout, stderr, err := framework.RunCmd("gcloud", args...)
if err != nil {
return fmt.Errorf("error restarting nodes: %s\nstdout: %s\nstderr: %s", err, stdout, stderr)
}
// Wait for their boot IDs to change.
for _, name := range nodeNames {
if err := wait.Poll(30*time.Second, 5*time.Minute, func() (bool, error) {
node, err := c.Core().Nodes().Get(name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("error getting node info after reboot: %s", err)
}
return node.Status.NodeInfo.BootID != oldBootIDs[name], nil
}); err != nil {
return fmt.Errorf("error waiting for node %s boot ID to change: %s", name, err)
}
}
return nil
}

View File

@ -47,12 +47,6 @@ import (
)
const (
// imagePrePullingTimeout is the time we wait for the e2e-image-puller
// static pods to pull the list of seeded images. If they don't pull
// images within this time we simply log their output and carry on
// with the tests.
imagePrePullingTimeout = 5 * time.Minute
// TODO: Delete this once all the tests that depend upon it are moved out of test/e2e and into subdirs
podName = "pfpod"
)
@ -172,12 +166,12 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
}
if err := framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout); err != nil {
if err := framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout); err != nil {
// There is no guarantee that the image pulling will succeed in 3 minutes
// and we don't even run the image puller on all platforms (including GKE).
// We wait for it so we get an indication of failures in the logs, and to
// maximize benefit of image pre-pulling.
framework.Logf("WARNING: Image pulling pods failed to enter success in %v: %v", imagePrePullingTimeout, err)
framework.Logf("WARNING: Image pulling pods failed to enter success in %v: %v", framework.ImagePrePullingTimeout, err)
}
// Dump the output of the nethealth containers only once per run

View File

@ -25,6 +25,7 @@ import (
_ "k8s.io/kubernetes/test/e2e/instrumentation/logging"
_ "k8s.io/kubernetes/test/e2e/instrumentation/monitoring"
_ "k8s.io/kubernetes/test/e2e/kubectl"
_ "k8s.io/kubernetes/test/e2e/lifecycle"
_ "k8s.io/kubernetes/test/e2e/scalability"
_ "k8s.io/kubernetes/test/e2e/scheduling"
_ "k8s.io/kubernetes/test/e2e/storage"

View File

@ -183,6 +183,12 @@ const (
ServeHostnameImage = "gcr.io/google_containers/serve_hostname:v1.4"
// ssh port
sshPort = "22"
// ImagePrePullingTimeout is the time we wait for the e2e-image-puller
// static pods to pull the list of seeded images. If they don't pull
// images within this time we simply log their output and carry on
// with the tests.
ImagePrePullingTimeout = 5 * time.Minute
)
var (

59
test/e2e/lifecycle/BUILD Normal file
View File

@ -0,0 +1,59 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"addon_update.go",
"cluster_upgrade.go",
"ha_master.go",
"reboot.go",
"resize_nodes.go",
"restart.go",
"sig.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/kubelet/pod:go_default_library",
"//pkg/util/version:go_default_library",
"//test/e2e/chaosmonkey:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/framework/ginkgowrapper:go_default_library",
"//test/e2e/upgrades:go_default_library",
"//test/e2e/upgrades/apps:go_default_library",
"//test/utils:go_default_library",
"//test/utils/junit:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/github.com/onsi/gomega:go_default_library",
"//vendor/golang.org/x/crypto/ssh:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/fields:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -0,0 +1,4 @@
approvers:
- sig-cluster-lifecycle-maintainers
reviewers:
- sig-cluster-lifecycle

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
package lifecycle
import (
"bytes"
@ -205,7 +205,7 @@ type stringPair struct {
data, fileName string
}
var _ = framework.KubeDescribe("Addon update", func() {
var _ = SIGDescribe("Addon update", func() {
var dir string
var sshClient *ssh.Client

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
package lifecycle
import (
"encoding/xml"
@ -58,13 +58,16 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
// Create the frameworks here because we can only create them
// in a "Describe".
testFrameworks := createUpgradeFrameworks()
framework.KubeDescribe("master upgrade", func() {
SIGDescribe("master upgrade", func() {
It("should maintain a functioning cluster [Feature:MasterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Master upgrade"}
masterUpgradeTest := &junit.TestCase{Name: "master-upgrade", Classname: "upgrade_tests"}
masterUpgradeTest := &junit.TestCase{
Name: "[sig-cluster-lifecycle] master-upgrade",
Classname: "upgrade_tests",
}
testSuite.TestCases = append(testSuite.TestCases, masterUpgradeTest)
upgradeFunc := func() {
@ -78,13 +81,16 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
})
})
framework.KubeDescribe("node upgrade", func() {
SIGDescribe("node upgrade", func() {
It("should maintain a functioning cluster [Feature:NodeUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Node upgrade"}
nodeUpgradeTest := &junit.TestCase{Name: "node-upgrade", Classname: "upgrade_tests"}
nodeUpgradeTest := &junit.TestCase{
Name: "node-upgrade",
Classname: "upgrade_tests",
}
upgradeFunc := func() {
start := time.Now()
@ -97,13 +103,13 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
})
})
framework.KubeDescribe("cluster upgrade", func() {
SIGDescribe("cluster upgrade", func() {
It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Cluster upgrade"}
clusterUpgradeTest := &junit.TestCase{Name: "cluster-upgrade", Classname: "upgrade_tests"}
clusterUpgradeTest := &junit.TestCase{Name: "[sig-cluster-lifecycle] cluster-upgrade", Classname: "upgrade_tests"}
testSuite.TestCases = append(testSuite.TestCases, clusterUpgradeTest)
upgradeFunc := func() {
start := time.Now()
@ -119,20 +125,20 @@ var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
})
})
var _ = framework.KubeDescribe("Downgrade [Feature:Downgrade]", func() {
var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
f := framework.NewDefaultFramework("cluster-downgrade")
// Create the frameworks here because we can only create them
// in a "Describe".
testFrameworks := createUpgradeFrameworks()
framework.KubeDescribe("cluster downgrade", func() {
SIGDescribe("cluster downgrade", func() {
It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Cluster downgrade"}
clusterDowngradeTest := &junit.TestCase{Name: "cluster-downgrade", Classname: "upgrade_tests"}
clusterDowngradeTest := &junit.TestCase{Name: "[sig-cluster-lifecycle] cluster-downgrade", Classname: "upgrade_tests"}
testSuite.TestCases = append(testSuite.TestCases, clusterDowngradeTest)
upgradeFunc := func() {
@ -150,19 +156,19 @@ var _ = framework.KubeDescribe("Downgrade [Feature:Downgrade]", func() {
})
})
var _ = framework.KubeDescribe("etcd Upgrade [Feature:EtcdUpgrade]", func() {
var _ = SIGDescribe("etcd Upgrade [Feature:EtcdUpgrade]", func() {
f := framework.NewDefaultFramework("etc-upgrade")
// Create the frameworks here because we can only create them
// in a "Describe".
testFrameworks := createUpgradeFrameworks()
framework.KubeDescribe("etcd upgrade", func() {
SIGDescribe("etcd upgrade", func() {
It("should maintain a functioning cluster", func() {
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), "")
framework.ExpectNoError(err)
testSuite := &junit.TestSuite{Name: "Etcd upgrade"}
etcdTest := &junit.TestCase{Name: "etcd-upgrade", Classname: "upgrade_tests"}
etcdTest := &junit.TestCase{Name: "[sig-cluster-lifecycle] etcd-upgrade", Classname: "upgrade_tests"}
testSuite.TestCases = append(testSuite.TestCases, etcdTest)
upgradeFunc := func() {

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
package lifecycle
import (
"fmt"
@ -26,6 +26,7 @@ import (
. "github.com/onsi/ginkgo"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
)
@ -76,7 +77,7 @@ func verifyRCs(c clientset.Interface, ns string, names []string) {
}
func createNewRC(c clientset.Interface, ns string, name string) {
_, err := newRCByName(c, ns, name, 1, nil)
_, err := common.NewRCByName(c, ns, name, 1, nil)
framework.ExpectNoError(err)
}
@ -113,7 +114,7 @@ func removeZoneFromZones(zones []string, zone string) []string {
return zones
}
var _ = framework.KubeDescribe("HA-master [Feature:HAMaster]", func() {
var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
f := framework.NewDefaultFramework("ha-master")
var c clientset.Interface
var ns string

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
package lifecycle
import (
"fmt"
@ -50,7 +50,7 @@ const (
rebootPodReadyAgainTimeout = 5 * time.Minute
)
var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
var f *framework.Framework
BeforeEach(func() {

View File

@ -14,60 +14,23 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
package lifecycle
import (
"fmt"
"strings"
"time"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
resizeNodeReadyTimeout = 2 * time.Minute
nodeReadinessTimeout = 3 * time.Minute
podNotReadyTimeout = 1 * time.Minute
podReadyTimeout = 2 * time.Minute
testPort = 9376
)
func svcByName(name string, port int) *v1.Service {
return &v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Selector: map[string]string{
"name": name,
},
Ports: []v1.ServicePort{{
Port: int32(port),
TargetPort: intstr.FromInt(port),
}},
},
}
}
func newSVCByName(c clientset.Interface, ns, name string) error {
_, err := c.Core().Services(ns).Create(svcByName(name, testPort))
return err
}
// newRCByName creates a replication controller with a selector by name of name.
func newRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*v1.ReplicationController, error) {
By(fmt.Sprintf("creating replication controller %s", name))
return c.Core().ReplicationControllers(ns).Create(framework.RcByNamePort(
name, replicas, framework.ServeHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod))
}
const resizeNodeReadyTimeout = 2 * time.Minute
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
rc, err := c.Core().ReplicationControllers(ns).Get(name, metav1.GetOptions{})
@ -79,7 +42,7 @@ func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
return err
}
var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
var _ = SIGDescribe("Nodes [Disruptive]", func() {
f := framework.NewDefaultFramework("resize-nodes")
var systemPodsNo int32
var c clientset.Interface
@ -101,7 +64,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
})
// Slow issue #13323 (8 min)
framework.KubeDescribe("Resize [Slow]", func() {
SIGDescribe("Resize [Slow]", func() {
var skipped bool
BeforeEach(func() {
@ -144,7 +107,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
By("waiting for image prepulling pods to complete")
framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout)
framework.WaitForPodsSuccess(c, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)
})
It("should be able to delete nodes", func() {
@ -152,7 +115,7 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-delete-node"
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
newRCByName(c, ns, name, replicas, nil)
common.NewRCByName(c, ns, name, replicas, nil)
err := framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred())
@ -178,9 +141,9 @@ var _ = framework.KubeDescribe("Nodes [Disruptive]", func() {
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-add-node"
newSVCByName(c, ns, name)
common.NewSVCByName(c, ns, name)
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
newRCByName(c, ns, name, replicas, nil)
common.NewRCByName(c, ns, name, replicas, nil)
err := framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred())

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
package lifecycle
import (
"fmt"
@ -55,7 +55,7 @@ func filterIrrelevantPods(pods []*v1.Pod) []*v1.Pod {
return results
}
var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
var _ = SIGDescribe("Restart [Disruptive]", func() {
f := framework.NewDefaultFramework("restart")
var ps *testutils.PodStore
var originalNodeNames []string

23
test/e2e/lifecycle/sig.go Normal file
View File

@ -0,0 +1,23 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package lifecycle
import "github.com/onsi/ginkgo"
func SIGDescribe(text string, body func()) bool {
return ginkgo.Describe("[sig-cluster-lifecycle] "+text, body)
}

View File

@ -34,6 +34,7 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/api"
nodepkg "k8s.io/kubernetes/pkg/controller/node"
"k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
@ -42,7 +43,11 @@ import (
)
const (
timeout = 60 * time.Second
timeout = 60 * time.Second
podReadyTimeout = 2 * time.Minute
podNotReadyTimeout = 1 * time.Minute
nodeReadinessTimeout = 3 * time.Minute
resizeNodeReadyTimeout = 2 * time.Minute
)
func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
@ -231,9 +236,9 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
// Create a replication controller for a service that serves its hostname.
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
name := "my-hostname-net"
newSVCByName(c, ns, name)
common.NewSVCByName(c, ns, name)
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
newRCByName(c, ns, name, replicas, nil)
common.NewRCByName(c, ns, name, replicas, nil)
err := framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
@ -296,9 +301,9 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
name := "my-hostname-net"
gracePeriod := int64(30)
newSVCByName(c, ns, name)
common.NewSVCByName(c, ns, name)
replicas := int32(framework.TestContext.CloudConfig.NumNodes)
newRCByName(c, ns, name, replicas, &gracePeriod)
common.NewRCByName(c, ns, name, replicas, &gracePeriod)
err := framework.VerifyPods(c, ns, name, true, replicas)
Expect(err).NotTo(HaveOccurred(), "Each pod should start running and responding")
@ -372,7 +377,7 @@ var _ = framework.KubeDescribe("[sig-apps] Network Partition [Disruptive] [Slow]
nn := framework.TestContext.CloudConfig.NumNodes
nodeNames, err := framework.CheckNodesReady(f.ClientSet, framework.NodeReadyInitialTimeout, nn)
framework.ExpectNoError(err)
restartNodes(f, nodeNames)
common.RestartNodes(f.ClientSet, nodeNames)
By("waiting for pods to be running again")
pst.WaitForRunningAndReady(*ps.Spec.Replicas, ps)

View File

@ -40,7 +40,6 @@ import (
const maxNumberOfPods int64 = 10
const minPodCPURequest int64 = 500
const imagePrePullingTimeout = 5 * time.Minute
// variable set in BeforeEach, never modified afterwards
var masterNodes sets.String
@ -101,7 +100,7 @@ var _ = SIGDescribe("SchedulerPredicates [Serial]", func() {
err = framework.WaitForPodsRunningReady(cs, metav1.NamespaceSystem, int32(systemPodsNo), 0, framework.PodReadyBeforeTimeout, ignoreLabels)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForPodsSuccess(cs, metav1.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout)
err = framework.WaitForPodsSuccess(cs, metav1.NamespaceSystem, framework.ImagePullerLabels, framework.ImagePrePullingTimeout)
Expect(err).NotTo(HaveOccurred())
for _, node := range nodeList.Items {