Merge pull request #19846 from madhusudancs/replicaset-e2e

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2016-02-07 13:46:57 -08:00
commit f15996d509
2 changed files with 167 additions and 0 deletions

118
test/e2e/replica_set.go Normal file
View File

@ -0,0 +1,118 @@
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util"
"k8s.io/kubernetes/pkg/util/wait"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = Describe("ReplicaSet", func() {
framework := NewFramework("replicaset")
It("should serve a basic image on each replica with a public image [Conformance]", func() {
ReplicaSetServeImageOrFail(framework, "basic", "gcr.io/google_containers/serve_hostname:1.1")
})
It("should serve a basic image on each replica with a private image", func() {
// requires private images
SkipUnlessProviderIs("gce", "gke")
ReplicaSetServeImageOrFail(framework, "private", "b.gcr.io/k8s_authenticated_test/serve_hostname:1.1")
})
})
// A basic test to check the deployment of an image using a ReplicaSet. The
// image serves its hostname which is checked for each replica.
func ReplicaSetServeImageOrFail(f *Framework, test string, image string) {
name := "my-hostname-" + test + "-" + string(util.NewUUID())
replicas := 2
// Create a ReplicaSet for a service that serves its hostname.
// The source for the Docker containter kubernetes/serve_hostname is
// in contrib/for-demos/serve_hostname
By(fmt.Sprintf("Creating ReplicaSet %s", name))
rs, err := f.Client.Extensions().ReplicaSets(f.Namespace.Name).Create(&extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{
Name: name,
},
Spec: extensions.ReplicaSetSpec{
Replicas: replicas,
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{
"name": name,
}},
Template: &api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": name},
},
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: name,
Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
// Cleanup the ReplicaSet when we are done.
defer func() {
// Resize the ReplicaSet to zero to get rid of pods.
if err := DeleteReplicaSet(f.Client, f.Namespace.Name, rs.Name); err != nil {
Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err)
}
}()
// List the pods, making sure we observe all the replicas.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := podsCreated(f.Client, f.Namespace.Name, name, replicas)
By("Ensuring each pod is running")
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
err = f.WaitForPodRunning(pod.Name)
Expect(err).NotTo(HaveOccurred())
}
// Verify that something is listening.
By("Trying to dial each unique pod")
retryTimeout := 2 * time.Minute
retryInterval := 5 * time.Second
err = wait.Poll(retryInterval, retryTimeout, podProxyResponseChecker{f.Client, f.Namespace.Name, label, name, true, pods}.checkAllResponses)
if err != nil {
Failf("Did not get expected responses within the timeout period of %.2f seconds.", retryTimeout.Seconds())
}
}

View File

@ -2001,6 +2001,55 @@ func waitForRCPodsGone(c *client.Client, rc *api.ReplicationController) error {
})
}
// Delete a ReplicaSet and all pods it spawned
func DeleteReplicaSet(c *client.Client, ns, name string) error {
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
rc, err := c.Extensions().ReplicaSets(ns).Get(name)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), c)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
return err
}
startTime := time.Now()
err = reaper.Stop(ns, name, 0, api.NewDeleteOptions(0))
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
return nil
}
deleteRSTime := time.Now().Sub(startTime)
Logf("Deleting RS %s took: %v", name, deleteRSTime)
if err == nil {
err = waitForReplicaSetPodsGone(c, rc)
}
terminatePodTime := time.Now().Sub(startTime) - deleteRSTime
Logf("Terminating ReplicaSet %s pods took: %v", name, terminatePodTime)
return err
}
// waitForReplicaSetPodsGone waits until there are no pods reported under a
// ReplicaSet selector (because the pods have completed termination).
func waitForReplicaSetPodsGone(c *client.Client, rs *extensions.ReplicaSet) error {
return wait.PollImmediate(poll, 2*time.Minute, func() (bool, error) {
selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
expectNoError(err)
options := api.ListOptions{LabelSelector: selector}
if pods, err := c.Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
return true, nil
}
return false, nil
})
}
// Waits for the deployment to reach desired state.
// Returns an error if minAvailable or maxCreated is broken at any times.
func waitForDeploymentStatus(c clientset.Interface, ns, deploymentName string, desiredUpdatedReplicas, minAvailable, maxCreated, minReadySeconds int) error {