mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-20 10:20:51 +00:00
Merge pull request #99916 from swetharepakula/eps-ga-e2e
Promote Endpoint Slice E2E Tests to Conformance
This commit is contained in:
commit
faa3a5fbd4
40
test/conformance/testdata/conformance.yaml
vendored
40
test/conformance/testdata/conformance.yaml
vendored
@ -1157,6 +1157,46 @@
|
||||
DNS configuration MUST be configured in the Pod.
|
||||
release: v1.17
|
||||
file: test/e2e/network/dns.go
|
||||
- testname: EndpointSlice API
|
||||
codename: '[sig-network] EndpointSlice should create Endpoints and EndpointSlices
|
||||
for Pods matching a Service [Conformance]'
|
||||
description: The discovery.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The discovery.k8s.io/v1 API group/version MUST exist in the /apis/discovery.k8s.io
|
||||
discovery document. The endpointslices resource MUST exist in the /apis/discovery.k8s.io/v1
|
||||
discovery document. The endpointslice controller must create EndpointSlices for
|
||||
Pods mataching a Service.
|
||||
release: v1.21
|
||||
file: test/e2e/network/endpointslice.go
|
||||
- testname: EndpointSlice API
|
||||
codename: '[sig-network] EndpointSlice should create and delete Endpoints and EndpointSlices
|
||||
for a Service with a selector specified [Conformance]'
|
||||
description: The discovery.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The discovery.k8s.io/v1 API group/version MUST exist in the /apis/discovery.k8s.io
|
||||
discovery document. The endpointslices resource MUST exist in the /apis/discovery.k8s.io/v1
|
||||
discovery document. The endpointslice controller should create and delete EndpointSlices
|
||||
for Pods matching a Service.
|
||||
release: v1.21
|
||||
file: test/e2e/network/endpointslice.go
|
||||
- testname: EndpointSlice API
|
||||
codename: '[sig-network] EndpointSlice should have Endpoints and EndpointSlices
|
||||
pointing to API Server [Conformance]'
|
||||
description: The discovery.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The discovery.k8s.io/v1 API group/version MUST exist in the /apis/discovery.k8s.io
|
||||
discovery document. The endpointslices resource MUST exist in the /apis/discovery.k8s.io/v1
|
||||
discovery document. API Server should create self referential Endpoints and EndpointSlices
|
||||
named "kubernetes" in the default namespace.
|
||||
release: v1.21
|
||||
file: test/e2e/network/endpointslice.go
|
||||
- testname: EndpointSlice Mirroring
|
||||
codename: '[sig-network] EndpointSliceMirroring should mirror a custom Endpoints
|
||||
resource through create update and delete [Conformance]'
|
||||
description: The discovery.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The discovery.k8s.io/v1 API group/version MUST exist in the /apis/discovery.k8s.io
|
||||
discovery document. The endpointslices resource MUST exist in the /apis/discovery.k8s.io/v1
|
||||
discovery document. The endpointslices mirrorowing must mirror endpoint create,
|
||||
update, and delete actions.
|
||||
release: v1.21
|
||||
file: test/e2e/network/endpointslicemirroring.go
|
||||
- testname: Scheduling, HostPort matching and HostIP and Protocol not-matching
|
||||
codename: '[sig-network] HostPort validates that there is no conflict between pods
|
||||
with same hostPort but different hostIP and protocol [LinuxOnly] [Conformance]'
|
||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||
package endpointslice
|
||||
|
||||
import (
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
@ -25,7 +25,7 @@ import (
|
||||
type PortsByPodUID map[types.UID][]int
|
||||
|
||||
// GetContainerPortsByPodUID returns a PortsByPodUID map on the given endpoints.
|
||||
func GetContainerPortsByPodUID(eps []discoveryv1beta1.EndpointSlice) PortsByPodUID {
|
||||
func GetContainerPortsByPodUID(eps []discoveryv1.EndpointSlice) PortsByPodUID {
|
||||
m := PortsByPodUID{}
|
||||
|
||||
for _, es := range eps {
|
||||
|
@ -27,7 +27,7 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@ -395,52 +395,47 @@ func (j *TestJig) waitForAvailableEndpoint(timeout time.Duration) error {
|
||||
|
||||
go controller.Run(stopCh)
|
||||
|
||||
// If EndpointSlice API is enabled, then validate if appropriate EndpointSlice objects were also create/updated/deleted.
|
||||
if _, err := j.Client.Discovery().ServerResourcesForGroupVersion(discoveryv1beta1.SchemeGroupVersion.String()); err == nil {
|
||||
var esController cache.Controller
|
||||
_, esController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = "kubernetes.io/service-name=" + j.Name
|
||||
obj, err := j.Client.DiscoveryV1beta1().EndpointSlices(j.Namespace).List(context.TODO(), options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = "kubernetes.io/service-name=" + j.Name
|
||||
return j.Client.DiscoveryV1beta1().EndpointSlices(j.Namespace).Watch(context.TODO(), options)
|
||||
},
|
||||
var esController cache.Controller
|
||||
_, esController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.LabelSelector = "kubernetes.io/service-name=" + j.Name
|
||||
obj, err := j.Client.DiscoveryV1().EndpointSlices(j.Namespace).List(context.TODO(), options)
|
||||
return runtime.Object(obj), err
|
||||
},
|
||||
&discoveryv1beta1.EndpointSlice{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
if es, ok := obj.(*discoveryv1beta1.EndpointSlice); ok {
|
||||
// TODO: currently we only consider addreses in 1 slice, but services with
|
||||
// a large number of endpoints (>1000) may have multiple slices. Some slices
|
||||
// with only a few addresses. We should check the addresses in all slices.
|
||||
if len(es.Endpoints) > 0 && len(es.Endpoints[0].Addresses) > 0 {
|
||||
endpointSliceAvailable = true
|
||||
}
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if es, ok := cur.(*discoveryv1beta1.EndpointSlice); ok {
|
||||
// TODO: currently we only consider addreses in 1 slice, but services with
|
||||
// a large number of endpoints (>1000) may have multiple slices. Some slices
|
||||
// with only a few addresses. We should check the addresses in all slices.
|
||||
if len(es.Endpoints) > 0 && len(es.Endpoints[0].Addresses) > 0 {
|
||||
endpointSliceAvailable = true
|
||||
}
|
||||
}
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.LabelSelector = "kubernetes.io/service-name=" + j.Name
|
||||
return j.Client.DiscoveryV1().EndpointSlices(j.Namespace).Watch(context.TODO(), options)
|
||||
},
|
||||
)
|
||||
},
|
||||
&discoveryv1.EndpointSlice{},
|
||||
0,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
if es, ok := obj.(*discoveryv1.EndpointSlice); ok {
|
||||
// TODO: currently we only consider addreses in 1 slice, but services with
|
||||
// a large number of endpoints (>1000) may have multiple slices. Some slices
|
||||
// with only a few addresses. We should check the addresses in all slices.
|
||||
if len(es.Endpoints) > 0 && len(es.Endpoints[0].Addresses) > 0 {
|
||||
endpointSliceAvailable = true
|
||||
}
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
if es, ok := cur.(*discoveryv1.EndpointSlice); ok {
|
||||
// TODO: currently we only consider addreses in 1 slice, but services with
|
||||
// a large number of endpoints (>1000) may have multiple slices. Some slices
|
||||
// with only a few addresses. We should check the addresses in all slices.
|
||||
if len(es.Endpoints) > 0 && len(es.Endpoints[0].Addresses) > 0 {
|
||||
endpointSliceAvailable = true
|
||||
}
|
||||
}
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
go esController.Run(stopCh)
|
||||
go esController.Run(stopCh)
|
||||
|
||||
} else {
|
||||
endpointSliceAvailable = true
|
||||
}
|
||||
err := wait.Poll(1*time.Second, timeout, func() (bool, error) {
|
||||
return endpointAvailable && endpointSliceAvailable, nil
|
||||
})
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@ -48,7 +48,15 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
|
||||
podClient = f.PodClient()
|
||||
})
|
||||
|
||||
ginkgo.It("should have Endpoints and EndpointSlices pointing to API Server", func() {
|
||||
/*
|
||||
Release: v1.21
|
||||
Testname: EndpointSlice API
|
||||
Description: The discovery.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The discovery.k8s.io/v1 API group/version MUST exist in the /apis/discovery.k8s.io discovery document.
|
||||
The endpointslices resource MUST exist in the /apis/discovery.k8s.io/v1 discovery document.
|
||||
API Server should create self referential Endpoints and EndpointSlices named "kubernetes" in the default namespace.
|
||||
*/
|
||||
framework.ConformanceIt("should have Endpoints and EndpointSlices pointing to API Server", func() {
|
||||
namespace := "default"
|
||||
name := "kubernetes"
|
||||
endpoints, err := cs.CoreV1().Endpoints(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
@ -58,7 +66,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
|
||||
}
|
||||
|
||||
endpointSubset := endpoints.Subsets[0]
|
||||
endpointSlice, err := cs.DiscoveryV1beta1().EndpointSlices(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
endpointSlice, err := cs.DiscoveryV1().EndpointSlices(namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
framework.ExpectNoError(err, "error creating EndpointSlice resource")
|
||||
if len(endpointSlice.Ports) != len(endpointSubset.Ports) {
|
||||
framework.Failf("Expected EndpointSlice to have %d ports, got %d: %#v", len(endpointSubset.Ports), len(endpointSlice.Ports), endpointSlice.Ports)
|
||||
@ -70,7 +78,15 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
|
||||
|
||||
})
|
||||
|
||||
ginkgo.It("should create and delete Endpoints and EndpointSlices for a Service with a selector specified", func() {
|
||||
/*
|
||||
Release: v1.21
|
||||
Testname: EndpointSlice API
|
||||
Description: The discovery.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The discovery.k8s.io/v1 API group/version MUST exist in the /apis/discovery.k8s.io discovery document.
|
||||
The endpointslices resource MUST exist in the /apis/discovery.k8s.io/v1 discovery document.
|
||||
The endpointslice controller should create and delete EndpointSlices for Pods matching a Service.
|
||||
*/
|
||||
framework.ConformanceIt("should create and delete Endpoints and EndpointSlices for a Service with a selector specified", func() {
|
||||
svc := createServiceReportErr(cs, f.Namespace.Name, &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-empty-selector",
|
||||
@ -99,9 +115,9 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
|
||||
}
|
||||
|
||||
// Expect EndpointSlice resource to be created.
|
||||
var endpointSlice discoveryv1beta1.EndpointSlice
|
||||
var endpointSlice discoveryv1.EndpointSlice
|
||||
if err := wait.PollImmediate(2*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
endpointSliceList, err := cs.DiscoveryV1beta1().EndpointSlices(svc.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
endpointSliceList, err := cs.DiscoveryV1().EndpointSlices(svc.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: "kubernetes.io/service-name=" + svc.Name,
|
||||
})
|
||||
if err != nil {
|
||||
@ -117,12 +133,12 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
|
||||
}
|
||||
|
||||
// Ensure EndpointSlice has expected values.
|
||||
managedBy, ok := endpointSlice.Labels[discoveryv1beta1.LabelManagedBy]
|
||||
managedBy, ok := endpointSlice.Labels[discoveryv1.LabelManagedBy]
|
||||
expectedManagedBy := "endpointslice-controller.k8s.io"
|
||||
if !ok {
|
||||
framework.Failf("Expected EndpointSlice to have %s label, got %#v", discoveryv1beta1.LabelManagedBy, endpointSlice.Labels)
|
||||
framework.Failf("Expected EndpointSlice to have %s label, got %#v", discoveryv1.LabelManagedBy, endpointSlice.Labels)
|
||||
} else if managedBy != expectedManagedBy {
|
||||
framework.Failf("Expected EndpointSlice to have %s label with %s value, got %s", discoveryv1beta1.LabelManagedBy, expectedManagedBy, managedBy)
|
||||
framework.Failf("Expected EndpointSlice to have %s label with %s value, got %s", discoveryv1.LabelManagedBy, expectedManagedBy, managedBy)
|
||||
}
|
||||
if len(endpointSlice.Endpoints) != 0 {
|
||||
framework.Failf("Expected EndpointSlice to have 0 endpoints, got %d: %#v", len(endpointSlice.Endpoints), endpointSlice.Endpoints)
|
||||
@ -150,7 +166,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
|
||||
// and may need to retry informer resync at some point during an e2e
|
||||
// run.
|
||||
if err := wait.PollImmediate(2*time.Second, 90*time.Second, func() (bool, error) {
|
||||
endpointSliceList, err := cs.DiscoveryV1beta1().EndpointSlices(svc.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
endpointSliceList, err := cs.DiscoveryV1().EndpointSlices(svc.Namespace).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: "kubernetes.io/service-name=" + svc.Name,
|
||||
})
|
||||
if err != nil {
|
||||
@ -165,7 +181,15 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
|
||||
}
|
||||
})
|
||||
|
||||
ginkgo.It("should create Endpoints and EndpointSlices for Pods matching a Service", func() {
|
||||
/*
|
||||
Release: v1.21
|
||||
Testname: EndpointSlice API
|
||||
Description: The discovery.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The discovery.k8s.io/v1 API group/version MUST exist in the /apis/discovery.k8s.io discovery document.
|
||||
The endpointslices resource MUST exist in the /apis/discovery.k8s.io/v1 discovery document.
|
||||
The endpointslice controller must create EndpointSlices for Pods mataching a Service.
|
||||
*/
|
||||
framework.ConformanceIt("should create Endpoints and EndpointSlices for Pods matching a Service", func() {
|
||||
labelPod1 := "pod1"
|
||||
labelPod2 := "pod2"
|
||||
labelPod3 := "pod3"
|
||||
@ -313,7 +337,7 @@ var _ = common.SIGDescribe("EndpointSlice", func() {
|
||||
// and takes some shortcuts with the assumption that those test cases will be
|
||||
// the only caller of this function.
|
||||
func expectEndpointsAndSlices(cs clientset.Interface, ns string, svc *v1.Service, pods []*v1.Pod, numSubsets, numSlices int, namedPort bool) {
|
||||
endpointSlices := []discoveryv1beta1.EndpointSlice{}
|
||||
endpointSlices := []discoveryv1.EndpointSlice{}
|
||||
if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
|
||||
endpointSlicesFound, hasMatchingSlices := hasMatchingEndpointSlices(cs, ns, svc.Name, len(pods), numSlices)
|
||||
if !hasMatchingSlices {
|
||||
@ -479,12 +503,12 @@ func expectEndpointsAndSlices(cs clientset.Interface, ns string, svc *v1.Service
|
||||
|
||||
// deleteEndpointSlices deletes EndpointSlices for the specified Service.
|
||||
func deleteEndpointSlices(cs clientset.Interface, ns string, svc *v1.Service) {
|
||||
listOptions := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1beta1.LabelServiceName, svc.Name)}
|
||||
esList, err := cs.DiscoveryV1beta1().EndpointSlices(ns).List(context.TODO(), listOptions)
|
||||
listOptions := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, svc.Name)}
|
||||
esList, err := cs.DiscoveryV1().EndpointSlices(ns).List(context.TODO(), listOptions)
|
||||
framework.ExpectNoError(err, "Error fetching EndpointSlices for %s/%s Service", ns, svc.Name)
|
||||
|
||||
for _, endpointSlice := range esList.Items {
|
||||
err := cs.DiscoveryV1beta1().EndpointSlices(ns).Delete(context.TODO(), endpointSlice.Name, metav1.DeleteOptions{})
|
||||
err := cs.DiscoveryV1().EndpointSlices(ns).Delete(context.TODO(), endpointSlice.Name, metav1.DeleteOptions{})
|
||||
framework.ExpectNoError(err, "Error deleting %s/%s EndpointSlice", ns, endpointSlice.Name)
|
||||
}
|
||||
}
|
||||
@ -492,14 +516,14 @@ func deleteEndpointSlices(cs clientset.Interface, ns string, svc *v1.Service) {
|
||||
// hasMatchingEndpointSlices returns any EndpointSlices that match the
|
||||
// conditions along with a boolean indicating if all the conditions have been
|
||||
// met.
|
||||
func hasMatchingEndpointSlices(cs clientset.Interface, ns, svcName string, numEndpoints, numSlices int) ([]discoveryv1beta1.EndpointSlice, bool) {
|
||||
listOptions := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1beta1.LabelServiceName, svcName)}
|
||||
esList, err := cs.DiscoveryV1beta1().EndpointSlices(ns).List(context.TODO(), listOptions)
|
||||
func hasMatchingEndpointSlices(cs clientset.Interface, ns, svcName string, numEndpoints, numSlices int) ([]discoveryv1.EndpointSlice, bool) {
|
||||
listOptions := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, svcName)}
|
||||
esList, err := cs.DiscoveryV1().EndpointSlices(ns).List(context.TODO(), listOptions)
|
||||
framework.ExpectNoError(err, "Error fetching EndpointSlice for Service %s/%s", ns, svcName)
|
||||
|
||||
if len(esList.Items) == 0 {
|
||||
framework.Logf("EndpointSlice for Service %s/%s not found", ns, svcName)
|
||||
return []discoveryv1beta1.EndpointSlice{}, false
|
||||
return []discoveryv1.EndpointSlice{}, false
|
||||
}
|
||||
// In some cases the EndpointSlice controller will create more
|
||||
// EndpointSlices than necessary resulting in some duplication. This is
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -40,7 +40,15 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() {
|
||||
cs = f.ClientSet
|
||||
})
|
||||
|
||||
ginkgo.It("should mirror a custom Endpoints resource through create update and delete", func() {
|
||||
/*
|
||||
Release: v1.21
|
||||
Testname: EndpointSlice Mirroring
|
||||
Description: The discovery.k8s.io API group MUST exist in the /apis discovery document.
|
||||
The discovery.k8s.io/v1 API group/version MUST exist in the /apis/discovery.k8s.io discovery document.
|
||||
The endpointslices resource MUST exist in the /apis/discovery.k8s.io/v1 discovery document.
|
||||
The endpointslices mirrorowing must mirror endpoint create, update, and delete actions.
|
||||
*/
|
||||
framework.ConformanceIt("should mirror a custom Endpoints resource through create update and delete", func() {
|
||||
svc := createServiceReportErr(cs, f.Namespace.Name, &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-custom-endpoints",
|
||||
@ -73,8 +81,8 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() {
|
||||
framework.ExpectNoError(err, "Unexpected error creating Endpoints")
|
||||
|
||||
if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) {
|
||||
esList, err := cs.DiscoveryV1beta1().EndpointSlices(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: discoveryv1beta1.LabelServiceName + "=" + svc.Name,
|
||||
esList, err := cs.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: discoveryv1.LabelServiceName + "=" + svc.Name,
|
||||
})
|
||||
if err != nil {
|
||||
framework.Logf("Error listing EndpointSlices: %v", err)
|
||||
@ -125,8 +133,8 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() {
|
||||
|
||||
// Expect mirrored EndpointSlice resource to be updated.
|
||||
if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) {
|
||||
esList, err := cs.DiscoveryV1beta1().EndpointSlices(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: discoveryv1beta1.LabelServiceName + "=" + svc.Name,
|
||||
esList, err := cs.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: discoveryv1.LabelServiceName + "=" + svc.Name,
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
@ -172,8 +180,8 @@ var _ = common.SIGDescribe("EndpointSliceMirroring", func() {
|
||||
|
||||
// Expect mirrored EndpointSlice resource to be updated.
|
||||
if err := wait.PollImmediate(2*time.Second, 12*time.Second, func() (bool, error) {
|
||||
esList, err := cs.DiscoveryV1beta1().EndpointSlices(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: discoveryv1beta1.LabelServiceName + "=" + svc.Name,
|
||||
esList, err := cs.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{
|
||||
LabelSelector: discoveryv1.LabelServiceName + "=" + svc.Name,
|
||||
})
|
||||
if err != nil {
|
||||
return false, err
|
||||
|
@ -20,12 +20,12 @@ package network
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
"time"
|
||||
|
||||
"github.com/onsi/ginkgo"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
@ -212,8 +212,8 @@ var _ = common.SIGDescribe("Networking IPerf2 [Feature:Networking-Performance]",
|
||||
// Make sure the server is ready to go
|
||||
framework.Logf("waiting for iperf2 server endpoints")
|
||||
err = wait.Poll(2*time.Second, largeClusterTimeout, func() (done bool, err error) {
|
||||
listOptions := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1beta1.LabelServiceName, serverServiceName)}
|
||||
esList, err := f.ClientSet.DiscoveryV1beta1().EndpointSlices(f.Namespace.Name).List(context.TODO(), listOptions)
|
||||
listOptions := metav1.ListOptions{LabelSelector: fmt.Sprintf("%s=%s", discoveryv1.LabelServiceName, serverServiceName)}
|
||||
esList, err := f.ClientSet.DiscoveryV1().EndpointSlices(f.Namespace.Name).List(context.TODO(), listOptions)
|
||||
framework.ExpectNoError(err, "Error fetching EndpointSlice for Service %s/%s", f.Namespace.Name, serverServiceName)
|
||||
|
||||
if len(esList.Items) == 0 {
|
||||
|
@ -36,7 +36,7 @@ import (
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
@ -2785,11 +2785,11 @@ func validateEndpointsPortsOrFail(c clientset.Interface, namespace, serviceName
|
||||
|
||||
// If EndpointSlice API is enabled, then validate if appropriate EndpointSlice objects
|
||||
// were also create/updated/deleted.
|
||||
if _, err := c.Discovery().ServerResourcesForGroupVersion(discoveryv1beta1.SchemeGroupVersion.String()); err == nil {
|
||||
if _, err := c.Discovery().ServerResourcesForGroupVersion(discoveryv1.SchemeGroupVersion.String()); err == nil {
|
||||
opts := metav1.ListOptions{
|
||||
LabelSelector: "kubernetes.io/service-name=" + serviceName,
|
||||
}
|
||||
es, err := c.DiscoveryV1beta1().EndpointSlices(namespace).List(context.TODO(), opts)
|
||||
es, err := c.DiscoveryV1().EndpointSlices(namespace).List(context.TODO(), opts)
|
||||
if err != nil {
|
||||
framework.Logf("Failed go list EndpointSlice objects: %v", err)
|
||||
// Retry the error
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discovery "k8s.io/api/discovery/v1beta1"
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
@ -253,7 +253,7 @@ func TestDualStackEndpoints(t *testing.T) {
|
||||
// wait until the endpoint slices are created
|
||||
err = wait.PollImmediate(1*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
|
||||
lSelector := discovery.LabelServiceName + "=" + svc.Name
|
||||
esList, err := client.DiscoveryV1beta1().EndpointSlices(ns.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: lSelector})
|
||||
esList, err := client.DiscoveryV1().EndpointSlices(ns.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: lSelector})
|
||||
if err != nil {
|
||||
t.Logf("Error listing EndpointSlices: %v", err)
|
||||
return false, nil
|
||||
|
Loading…
Reference in New Issue
Block a user