mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
Remove one slightly-broken wait-for-endpoints test util and fix another
This commit is contained in:
parent
b4b5bfdb46
commit
35bb7825fe
@ -30,7 +30,6 @@ import (
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@ -407,52 +406,6 @@ func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod,
|
||||
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, MatchRegexp)
|
||||
}
|
||||
|
||||
// WaitForAnEndpoint waits for at least one endpoint to become available in the
|
||||
// service's corresponding endpoints object.
|
||||
func (f *Framework) WaitForAnEndpoint(serviceName string) error {
|
||||
for {
|
||||
// TODO: Endpoints client should take a field selector so we
|
||||
// don't have to list everything.
|
||||
list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv := list.ResourceVersion
|
||||
|
||||
isOK := func(e *v1.Endpoints) bool {
|
||||
return e.Name == serviceName && len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0
|
||||
}
|
||||
for i := range list.Items {
|
||||
if isOK(&list.Items[i]) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
options := metav1.ListOptions{
|
||||
FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector().String(),
|
||||
ResourceVersion: rv,
|
||||
}
|
||||
w, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer w.Stop()
|
||||
|
||||
for {
|
||||
val, ok := <-w.ResultChan()
|
||||
if !ok {
|
||||
// reget and re-watch
|
||||
break
|
||||
}
|
||||
if e, ok := val.Object.(*v1.Endpoints); ok {
|
||||
if isOK(e) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Write a file using kubectl exec echo <contents> > <path> via specified container
|
||||
// Because of the primitive technique we're using here, we only allow ASCII alphanumeric characters
|
||||
func (f *Framework) WriteFileViaContainer(podName, containerName string, path string, contents string) error {
|
||||
|
@ -1569,6 +1569,10 @@ func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace s
|
||||
func WaitForEndpoint(c clientset.Interface, ns, name string) error {
|
||||
for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) {
|
||||
endpoint, err := c.Core().Endpoints(ns).Get(name, metav1.GetOptions{})
|
||||
if apierrs.IsNotFound(err) {
|
||||
Logf("Endpoint %s/%s is not ready yet", ns, name)
|
||||
continue
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(endpoint.Subsets) == 0 || len(endpoint.Subsets[0].Addresses) == 0 {
|
||||
Logf("Endpoint %s/%s is not ready yet", ns, name)
|
||||
|
@ -150,7 +150,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
|
||||
Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
|
||||
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, cfg.Name)
|
||||
|
||||
Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())
|
||||
Expect(framework.WaitForEndpoint(f.ClientSet, f.Namespace.Name, service.Name)).NotTo(HaveOccurred())
|
||||
|
||||
// table constructors
|
||||
// Try proxying through the service and directly to through the pod.
|
||||
|
Loading…
Reference in New Issue
Block a user