mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 09:22:44 +00:00
Updating EndpointSlice e2e tests to be less flaky and easier to debug
This adjusts tests that were waiting for Pods to be ready to instead just wait for them to have IPs assigned to them. This relies on the associated publishNotReadyAddresses field on Services. Additionally this increases the the length of time we'll wait for EndpointSlices to be garbage collected from 12s to 30s. Finally, this adds additional logging to ExpectNoError calls so it's easier to understand where and why a test failed.
This commit is contained in:
parent
8e5c02255c
commit
3e4745ddaf
@ -49,14 +49,14 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
|||||||
namespace := "default"
|
namespace := "default"
|
||||||
name := "kubernetes"
|
name := "kubernetes"
|
||||||
endpoints, err := cs.CoreV1().Endpoints(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
endpoints, err := cs.CoreV1().Endpoints(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err, "error creating Endpoints resource")
|
||||||
if len(endpoints.Subsets) != 1 {
|
if len(endpoints.Subsets) != 1 {
|
||||||
framework.Failf("Expected 1 subset in endpoints, got %d: %#v", len(endpoints.Subsets), endpoints.Subsets)
|
framework.Failf("Expected 1 subset in endpoints, got %d: %#v", len(endpoints.Subsets), endpoints.Subsets)
|
||||||
}
|
}
|
||||||
|
|
||||||
endpointSubset := endpoints.Subsets[0]
|
endpointSubset := endpoints.Subsets[0]
|
||||||
endpointSlice, err := cs.DiscoveryV1beta1().EndpointSlices(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
endpointSlice, err := cs.DiscoveryV1beta1().EndpointSlices(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err, "error creating EndpointSlice resource")
|
||||||
if len(endpointSlice.Ports) != len(endpointSubset.Ports) {
|
if len(endpointSlice.Ports) != len(endpointSubset.Ports) {
|
||||||
framework.Failf("Expected EndpointSlice to have %d ports, got %d: %#v", len(endpointSubset.Ports), len(endpointSlice.Ports), endpointSlice.Ports)
|
framework.Failf("Expected EndpointSlice to have %d ports, got %d: %#v", len(endpointSubset.Ports), len(endpointSlice.Ports), endpointSlice.Ports)
|
||||||
}
|
}
|
||||||
@ -85,7 +85,7 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Expect Endpoints resource to be created.
|
// Expect Endpoints resource to be created.
|
||||||
if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) {
|
if err := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
_, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{})
|
_, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -97,7 +97,7 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
|||||||
|
|
||||||
// Expect EndpointSlice resource to be created.
|
// Expect EndpointSlice resource to be created.
|
||||||
var endpointSlice discoveryv1beta1.EndpointSlice
|
var endpointSlice discoveryv1beta1.EndpointSlice
|
||||||
if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) {
|
if err := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
endpointSliceList, err := cs.DiscoveryV1beta1().EndpointSlices(svc.Namespace).List(context.TODO(), metav1.ListOptions{
|
endpointSliceList, err := cs.DiscoveryV1beta1().EndpointSlices(svc.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||||
LabelSelector: "kubernetes.io/service-name=" + svc.Name,
|
LabelSelector: "kubernetes.io/service-name=" + svc.Name,
|
||||||
})
|
})
|
||||||
@ -126,10 +126,10 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
|
err := cs.CoreV1().Services(svc.Namespace).Delete(context.TODO(), svc.Name, metav1.DeleteOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err, "error deleting Service")
|
||||||
|
|
||||||
// Expect Endpoints resource to be deleted when Service is.
|
// Expect Endpoints resource to be deleted when Service is.
|
||||||
if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) {
|
if err := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
_, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{})
|
_, err := cs.CoreV1().Endpoints(svc.Namespace).Get(context.TODO(), svc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if apierrors.IsNotFound(err) {
|
if apierrors.IsNotFound(err) {
|
||||||
@ -143,7 +143,7 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Expect EndpointSlice resource to be deleted when Service is.
|
// Expect EndpointSlice resource to be deleted when Service is.
|
||||||
if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) {
|
if err := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||||
endpointSliceList, err := cs.DiscoveryV1beta1().EndpointSlices(svc.Namespace).List(context.TODO(), metav1.ListOptions{
|
endpointSliceList, err := cs.DiscoveryV1beta1().EndpointSlices(svc.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||||
LabelSelector: "kubernetes.io/service-name=" + svc.Name,
|
LabelSelector: "kubernetes.io/service-name=" + svc.Name,
|
||||||
})
|
})
|
||||||
@ -219,6 +219,7 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
|||||||
},
|
},
|
||||||
Spec: v1.ServiceSpec{
|
Spec: v1.ServiceSpec{
|
||||||
Selector: map[string]string{labelPod1: labelValue},
|
Selector: map[string]string{labelPod1: labelValue},
|
||||||
|
PublishNotReadyAddresses: true,
|
||||||
Ports: []v1.ServicePort{{
|
Ports: []v1.ServicePort{{
|
||||||
Name: "example",
|
Name: "example",
|
||||||
Port: 80,
|
Port: 80,
|
||||||
@ -234,6 +235,7 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
|||||||
},
|
},
|
||||||
Spec: v1.ServiceSpec{
|
Spec: v1.ServiceSpec{
|
||||||
Selector: map[string]string{labelShared12: labelValue},
|
Selector: map[string]string{labelShared12: labelValue},
|
||||||
|
PublishNotReadyAddresses: true,
|
||||||
Ports: []v1.ServicePort{{
|
Ports: []v1.ServicePort{{
|
||||||
Name: "http",
|
Name: "http",
|
||||||
Port: 80,
|
Port: 80,
|
||||||
@ -249,6 +251,7 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
|||||||
},
|
},
|
||||||
Spec: v1.ServiceSpec{
|
Spec: v1.ServiceSpec{
|
||||||
Selector: map[string]string{labelPod3: labelValue},
|
Selector: map[string]string{labelPod3: labelValue},
|
||||||
|
PublishNotReadyAddresses: true,
|
||||||
Ports: []v1.ServicePort{{
|
Ports: []v1.ServicePort{{
|
||||||
Name: "example-no-match",
|
Name: "example-no-match",
|
||||||
Port: 80,
|
Port: 80,
|
||||||
@ -259,30 +262,26 @@ var _ = SIGDescribe("EndpointSlice", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
err := wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
|
err := wait.Poll(5*time.Second, 3*time.Minute, func() (bool, error) {
|
||||||
if !podClient.PodIsReady(pod1.Name) {
|
|
||||||
framework.Logf("Pod 1 not ready yet")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !podClient.PodIsReady(pod2.Name) {
|
|
||||||
framework.Logf("Pod 2 not ready yet")
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
pod1, err = podClient.Get(context.TODO(), pod1.Name, metav1.GetOptions{})
|
pod1, err = podClient.Get(context.TODO(), pod1.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
if len(pod1.Status.PodIPs) == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
pod2, err = podClient.Get(context.TODO(), pod2.Name, metav1.GetOptions{})
|
pod2, err = podClient.Get(context.TODO(), pod2.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
if len(pod2.Status.PodIPs) == 0 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
})
|
})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err, "timed out waiting for Pods to have IPs assigned")
|
||||||
|
|
||||||
ginkgo.By("referencing a single matching pod")
|
ginkgo.By("referencing a single matching pod")
|
||||||
expectEndpointsAndSlices(cs, f.Namespace.Name, svc1, []*v1.Pod{pod1}, 1, 1, false)
|
expectEndpointsAndSlices(cs, f.Namespace.Name, svc1, []*v1.Pod{pod1}, 1, 1, false)
|
||||||
@ -560,6 +559,6 @@ func ensurePodTargetRef(pod *v1.Pod, targetRef *v1.ObjectReference) {
|
|||||||
// createServiceReportErr creates a Service and reports any associated error.
|
// createServiceReportErr creates a Service and reports any associated error.
|
||||||
func createServiceReportErr(cs clientset.Interface, ns string, service *v1.Service) *v1.Service {
|
func createServiceReportErr(cs clientset.Interface, ns string, service *v1.Service) *v1.Service {
|
||||||
svc, err := cs.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{})
|
svc, err := cs.CoreV1().Services(ns).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err, "error deleting Service")
|
||||||
return svc
|
return svc
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user