Merge pull request #45733 from danwinship/network-test-timeout

Automatic merge from submit-queue (batch tested with PRs 45685, 45572, 45624, 45723, 45733)

Remove a test utility function that is redundant and kinda broken

Framework.WaitForAnEndpoint() has no timeout, so if something goes wrong and the endpoint doesn't get created, the test will hang forever. (This is happening for some reason sometimes in OpenShift right now, and when the CI system eventually times out and kills the VM, it loses the logs that would explain what failed.)

There's already another nearly-identical WaitForEndpoint() method that *does* take a timeout, so people can just use that instead.

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2017-05-12 14:01:00 -07:00 committed by GitHub
commit 7a9c2f7f01
3 changed files with 5 additions and 48 deletions

View File

@ -30,7 +30,6 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/intstr"
@ -407,52 +406,6 @@ func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod,
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, MatchRegexp)
}
// WaitForAnEndpoint waits for at least one endpoint to become available in the
// service's corresponding endpoints object.
func (f *Framework) WaitForAnEndpoint(serviceName string) error {
for {
// TODO: Endpoints client should take a field selector so we
// don't have to list everything.
list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(metav1.ListOptions{})
if err != nil {
return err
}
rv := list.ResourceVersion
isOK := func(e *v1.Endpoints) bool {
return e.Name == serviceName && len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0
}
for i := range list.Items {
if isOK(&list.Items[i]) {
return nil
}
}
options := metav1.ListOptions{
FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector().String(),
ResourceVersion: rv,
}
w, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options)
if err != nil {
return err
}
defer w.Stop()
for {
val, ok := <-w.ResultChan()
if !ok {
// reget and re-watch
break
}
if e, ok := val.Object.(*v1.Endpoints); ok {
if isOK(e) {
return nil
}
}
}
}
}
// Write a file using kubectl exec echo <contents> > <path> via specified container
// Because of the primitive technique we're using here, we only allow ASCII alphanumeric characters
func (f *Framework) WriteFileViaContainer(podName, containerName string, path string, contents string) error {

View File

@ -1569,6 +1569,10 @@ func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace s
func WaitForEndpoint(c clientset.Interface, ns, name string) error {
for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) {
endpoint, err := c.Core().Endpoints(ns).Get(name, metav1.GetOptions{})
if apierrs.IsNotFound(err) {
Logf("Endpoint %s/%s is not ready yet", ns, name)
continue
}
Expect(err).NotTo(HaveOccurred())
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
Logf("Endpoint %s/%s is not ready yet", ns, name)

View File

@ -150,7 +150,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, cfg.Name)
Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())
Expect(framework.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(HaveOccurred())
// table constructors
// Try proxying through the service and directly to through the pod.