mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 07:20:13 +00:00
Updating EndpointSlice e2e tests to be less flaky and easier to debug
This adjusts tests that were waiting for Pods to be ready to instead just wait for them to have IPs assigned to them. This relies on the associated publishNotReadyAddresses field on Services. Additionally this increases the the length of time we'll wait for EndpointSlices to be garbage collected from 12s to 30s. Finally, this adds additional logging to ExpectNoError calls so it's easier to understand where and why a test failed.
This commit is contained in:
parent
8e5c02255c
commit
3e4745ddaf
@ -49,14 +49,14 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
||||
namespace := "default"
|
||||
name := "kubernetes"
|
||||
endpoints, err := cs.CoreV1().Endpoints(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(err, "error creating Endpoints resource")
|
||||
if len(endpoints.Subsets) != 1 {
|
||||
framework.Failf("Expected 1 subset in endpoints, got %d: %#v", len(endpoints.Subsets), endpoints.Subsets)
|
||||
}
|
||||
|
||||
endpointSubset := endpoints.Subsets[0]
|
||||
endpointSlice, err := cs.DiscoveryV1beta1().EndpointSlices(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(err, "error creating EndpointSlice resource")
|
||||
if len(endpointSlice.Ports) != len(endpointSubset.Ports) {
|
||||
framework.Failf("Expected EndpointSlice to have %d ports, got %d: %#v", len(endpointSubset.Ports), len(endpointSlice.Ports), endpointSlice.Ports)
|
||||
}
|
||||
@ -85,7 +85,7 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
||||
})
|
||||
|
||||
// Expect Endpoints resource to be created.
|
||||
if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) {
|
||||
if err := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
_, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
@ -97,7 +97,7 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
||||
|
||||
// Expect EndpointSlice resource to be created.
|
||||
var endpointSlice discoveryv1beta1.EndpointSlice
|
||||
if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) {
|
||||
if err := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
endpointSliceList, err := cs.DiscoveryV1beta1().EndpointSlices(svc.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: "kubernetes.io/service-name=" + svc.Name,
|
||||
})
|
||||
@ -126,10 +126,10 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
||||
}
|
||||
|
||||
err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(err, "error deleting Service")
|
||||
|
||||
// Expect Endpoints resource to be deleted when Service is.
|
||||
if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) {
|
||||
if err := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
_, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
@ -143,7 +143,7 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
||||
}
|
||||
|
||||
// Expect EndpointSlice resource to be deleted when Service is.
|
||||
if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) {
|
||||
if err := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
endpointSliceList, err := cs.DiscoveryV1beta1().EndpointSlices(svc.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: "kubernetes.io/service-name=" + svc.Name,
|
||||
})
|
||||
@ -218,7 +218,8 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
||||
Name: "example-int-port",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{labelPod1: labelValue},
|
||||
Selector: map[string]string{labelPod1: labelValue},
|
||||
PublishNotReadyAddresses: true,
|
||||
Ports: []v1.ServicePort{{
|
||||
Name: "example",
|
||||
Port: 80,
|
||||
@ -233,7 +234,8 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
||||
Name: "example-named-port",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{labelShared12: labelValue},
|
||||
Selector: map[string]string{labelShared12: labelValue},
|
||||
PublishNotReadyAddresses: true,
|
||||
Ports: []v1.ServicePort{{
|
||||
Name: "http",
|
||||
Port: 80,
|
||||
@ -248,7 +250,8 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
||||
Name: "example-no-match",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{labelPod3: labelValue},
|
||||
Selector: map[string]string{labelPod3: labelValue},
|
||||
PublishNotReadyAddresses: true,
|
||||
Ports: []v1.ServicePort{{
|
||||
Name: "example-no-match",
|
||||
Port: 80,
|
||||
@ -259,30 +262,26 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
||||
})
|
||||
|
||||
err := wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
|
||||
if !podClient.PodIsReady(pod1.Name) {
|
||||
framework.Logf("Pod 1 not ready yet")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !podClient.PodIsReady(pod2.Name) {
|
||||
framework.Logf("Pod 2 not ready yet")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var err error
|
||||
pod1, err = podClient.Get(context.TODO(), pod1.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(pod1.Status.PodIPs) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pod2, err = podClient.Get(context.TODO(), pod2.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(pod2.Status.PodIPs) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(err, "timed out waiting for Pods to have IPs assigned")
|
||||
|
||||
ginkgo.By("referencing a single matching pod")
|
||||
expectEndpointsAndSlices(cs, f.Namespace.Name, svc1, []*v1.Pod{pod1}, 1, 1, false)
|
||||
@ -560,6 +559,6 @@ func ensurePodTargetRef(pod *v1.Pod, targetRef *v1.ObjectReference) {
|
||||
// createServiceReportErr creates a Service and reports any associated error.
|
||||
func createServiceReportErr(cs clientset.Interface, ns string, service *v1.Service) *v1.Service {
|
||||
svc, err := cs.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err)
|
||||
framework.ExpectNoError(err, "error deleting Service")
|
||||
return svc
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user