test: update replica set e2e test

This commit is contained in:
Michail Kargakis 2017-03-11 16:50:36 +01:00
parent 03aba86974
commit b0b0ef7377
3 changed files with 65 additions and 92 deletions

View File

@ -73,7 +73,7 @@ func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) {
checkExistingRCRecovers(f)
ServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
testReplicationControllerServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
}
// For this duration, etcd will be failed by executing a failCommand on the master.

View File

@ -37,18 +37,18 @@ var _ = framework.KubeDescribe("ReplicationController", func() {
f := framework.NewDefaultFramework("replication-controller")
It("should serve a basic image on each replica with a public image [Conformance]", func() {
ServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
testReplicationControllerServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
})
It("should serve a basic image on each replica with a private image", func() {
// requires private images
framework.SkipUnlessProviderIs("gce", "gke")
ServeImageOrFail(f, "private", "gcr.io/k8s-authenticated-test/serve_hostname:v1.4")
testReplicationControllerServeImageOrFail(f, "private", "gcr.io/k8s-authenticated-test/serve_hostname:v1.4")
})
It("should surface a failure condition on a common issue like exceeded quota", func() {
rcConditionCheck(f)
testReplicationControllerConditionCheck(f)
})
})
@ -81,70 +81,57 @@ func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageNa
// A basic test to check the deployment of an image using
// a replication controller. The image serves its hostname
// which is checked for each replica.
func ServeImageOrFail(f *framework.Framework, test string, image string) {
func testReplicationControllerServeImageOrFail(f *framework.Framework, test string, image string) {
name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
replicas := int32(2)
replicas := int32(1)
// Create a replication controller for a service
// that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is
// in contrib/for-demos/serve_hostname
By(fmt.Sprintf("Creating replication controller %s", name))
controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{
"name": name,
},
Template: &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: image,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
newRC := newRC(name, replicas, map[string]string{"name": name}, name, image)
newRC.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(newRC)
Expect(err).NotTo(HaveOccurred())
// Cleanup the replication controller when we are done.
defer func() {
// Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, controller.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
}
}()
// List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
// Check that pods for the new RC were created.
// TODO: Maybe switch PodsCreated to just check owner references.
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
By("Ensuring each pod is running")
Expect(err).NotTo(HaveOccurred())
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
framework.Logf("Ensuring all pods for ReplicationController %q are running", name)
running := int32(0)
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
err = f.WaitForPodRunning(pod.Name)
if err != nil {
updatePod, getErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
if getErr == nil {
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else {
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
}
}
Expect(err).NotTo(HaveOccurred())
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
running++
}
// Sanity check
if running != replicas {
Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(HaveOccurred())
}
// Verify that something is listening.
By("Trying to dial each unique pod")
framework.Logf("Trying to dial the pod")
retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil {
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
@ -155,12 +142,12 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
// 2. Create a replication controller that wants to run 3 pods.
// 3. Check replication controller conditions for a ReplicaFailure condition.
// 4. Relax quota or scale down the controller and observe the condition is gone.
func rcConditionCheck(f *framework.Framework) {
func testReplicationControllerConditionCheck(f *framework.Framework) {
c := f.ClientSet
namespace := f.Namespace.Name
name := "condition-test"
By(fmt.Sprintf("Creating quota %q that allows only two pods to run in the current namespace", name))
framework.Logf("Creating quota %q that allows only two pods to run in the current namespace", name)
quota := newPodQuota(name, "2")
_, err := c.Core().ResourceQuotas(namespace).Create(quota)
Expect(err).NotTo(HaveOccurred())

View File

@ -41,7 +41,7 @@ func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageNa
Name: rsName,
},
Spec: extensions.ReplicaSetSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Replicas: &replicas,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: rsPodLabels,
@ -77,87 +77,73 @@ var _ = framework.KubeDescribe("ReplicaSet", func() {
f := framework.NewDefaultFramework("replicaset")
It("should serve a basic image on each replica with a public image [Conformance]", func() {
ReplicaSetServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
testReplicaSetServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
})
It("should serve a basic image on each replica with a private image", func() {
// requires private images
framework.SkipUnlessProviderIs("gce", "gke")
ReplicaSetServeImageOrFail(f, "private", "gcr.io/k8s-authenticated-test/serve_hostname:v1.4")
testReplicaSetServeImageOrFail(f, "private", "gcr.io/k8s-authenticated-test/serve_hostname:v1.4")
})
It("should surface a failure condition on a common issue like exceeded quota", func() {
rsConditionCheck(f)
testReplicaSetConditionCheck(f)
})
})
// A basic test to check the deployment of an image using a ReplicaSet. The
// image serves its hostname which is checked for each replica.
func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image string) {
func testReplicaSetServeImageOrFail(f *framework.Framework, test string, image string) {
name := "my-hostname-" + test + "-" + string(uuid.NewUUID())
replicas := int32(2)
replicas := int32(1)
// Create a ReplicaSet for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is
// in contrib/for-demos/serve_hostname
By(fmt.Sprintf("Creating ReplicaSet %s", name))
rs, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(&extensions.ReplicaSet{
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
Spec: extensions.ReplicaSetSpec{
Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{
"name": name,
}},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: image,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
framework.Logf("Creating ReplicaSet %s", name)
newRS := newRS(name, replicas, map[string]string{"name": name}, name, image)
newRS.Spec.Template.Spec.Containers[0].Ports = []v1.ContainerPort{{ContainerPort: 9376}}
_, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(newRS)
Expect(err).NotTo(HaveOccurred())
// Cleanup the ReplicaSet when we are done.
defer func() {
// Resize the ReplicaSet to zero to get rid of pods.
if err := framework.DeleteReplicaSet(f.ClientSet, f.InternalClientset, f.Namespace.Name, rs.Name); err != nil {
framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err)
}
}()
// List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
// Check that pods for the new RS were created.
// TODO: Maybe switch PodsCreated to just check owner references.
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicas)
Expect(err).NotTo(HaveOccurred())
By("Ensuring each pod is running")
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
framework.Logf("Ensuring a pod for ReplicaSet %q is running", name)
running := int32(0)
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
err = f.WaitForPodRunning(pod.Name)
if err != nil {
updatePod, getErr := f.ClientSet.Core().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
if getErr == nil {
err = fmt.Errorf("Pod %q never run (phase: %s, conditions: %+v): %v", updatePod.Name, updatePod.Status.Phase, updatePod.Status.Conditions, err)
} else {
err = fmt.Errorf("Pod %q never run: %v", pod.Name, err)
}
}
Expect(err).NotTo(HaveOccurred())
framework.Logf("Pod %q is running (conditions: %+v)", pod.Name, pod.Status.Conditions)
running++
}
// Sanity check
if running != replicas {
Expect(fmt.Errorf("unexpected number of running pods: %+v", pods.Items)).NotTo(HaveOccurred())
}
// Verify that something is listening.
By("Trying to dial each unique pod")
framework.Logf("Trying to dial the pod")
retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
err = wait.Poll(retryInterval, retryTimeout, framework.PodProxyResponseChecker(f.ClientSet, f.Namespace.Name, label, name, true, pods).CheckAllResponses)
if err != nil {
framework.Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
@ -168,7 +154,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
// 2. Create a replica set that wants to run 3 pods.
// 3. Check replica set conditions for a ReplicaFailure condition.
// 4. Scale down the replica set and observe the condition is gone.
func rsConditionCheck(f *framework.Framework) {
func testReplicaSetConditionCheck(f *framework.Framework) {
c := f.ClientSet
namespace := f.Namespace.Name
name := "condition-test"