mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-26 21:17:23 +00:00
endpointslice controller: refactor TestSyncServiceFull to use test tables
Signed-off-by: Andrew Sy Kim <kim.andrewsy@gmail.com>
This commit is contained in:
parent
80046582d7
commit
2947f5ce4f
@ -33,12 +33,15 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
@ -335,27 +338,138 @@ func TestOnEndpointSliceUpdate(t *testing.T) {
|
||||
assert.Equal(t, 1, esController.queue.Len())
|
||||
}
|
||||
|
||||
// Ensure SyncService handles a variety of protocols and IPs appropriately.
|
||||
func TestSyncServiceFull(t *testing.T) {
|
||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
||||
namespace := metav1.NamespaceDefault
|
||||
serviceName := "all-the-protocols"
|
||||
func TestSyncService(t *testing.T) {
|
||||
creationTimestamp := metav1.Now()
|
||||
deletionTimestamp := metav1.Now()
|
||||
|
||||
pod1 := newPod(1, namespace, true, 0, false)
|
||||
pod1.Status.PodIPs = []v1.PodIP{{IP: "1.2.3.4"}}
|
||||
esController.podStore.Add(pod1)
|
||||
|
||||
pod2 := newPod(2, namespace, true, 0, false)
|
||||
pod2.Status.PodIPs = []v1.PodIP{{IP: "1.2.3.5"}, {IP: "1234::5678:0000:0000:9abc:def0"}}
|
||||
esController.podStore.Add(pod2)
|
||||
|
||||
// create service with all protocols and multiple ports
|
||||
serviceCreateTime := time.Now()
|
||||
service := &v1.Service{
|
||||
testcases := []struct {
|
||||
name string
|
||||
service *v1.Service
|
||||
pods []*v1.Pod
|
||||
expectedEndpointPorts []discovery.EndpointPort
|
||||
expectedEndpoints []discovery.Endpoint
|
||||
terminatingGateEnabled bool
|
||||
}{
|
||||
{
|
||||
name: "pods with multiple IPs and Service with ipFamilies=ipv4",
|
||||
service: &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Namespace: namespace,
|
||||
CreationTimestamp: metav1.NewTime(serviceCreateTime),
|
||||
Name: "foobar",
|
||||
Namespace: "default",
|
||||
CreationTimestamp: creationTimestamp,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{Name: "tcp-example", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP},
|
||||
{Name: "udp-example", TargetPort: intstr.FromInt(161), Protocol: v1.ProtocolUDP},
|
||||
{Name: "sctp-example", TargetPort: intstr.FromInt(3456), Protocol: v1.ProtocolSCTP},
|
||||
},
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
IPFamilies: []v1.IPFamily{v1.IPv4Protocol},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pod0",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
DeletionTimestamp: nil,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "container-1",
|
||||
}},
|
||||
NodeName: "node-1",
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
PodIP: "10.0.0.1",
|
||||
PodIPs: []v1.PodIP{{
|
||||
IP: "10.0.0.1",
|
||||
}},
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pod1",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
DeletionTimestamp: nil,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "container-1",
|
||||
}},
|
||||
NodeName: "node-1",
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
PodIP: "10.0.0.2",
|
||||
PodIPs: []v1.PodIP{
|
||||
{
|
||||
IP: "10.0.0.2",
|
||||
},
|
||||
{
|
||||
IP: "fd08::5678:0000:0000:9abc:def0",
|
||||
},
|
||||
},
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEndpointPorts: []discovery.EndpointPort{
|
||||
{
|
||||
Name: utilpointer.StringPtr("sctp-example"),
|
||||
Protocol: protoPtr(v1.ProtocolSCTP),
|
||||
Port: utilpointer.Int32Ptr(int32(3456)),
|
||||
},
|
||||
{
|
||||
Name: utilpointer.StringPtr("udp-example"),
|
||||
Protocol: protoPtr(v1.ProtocolUDP),
|
||||
Port: utilpointer.Int32Ptr(int32(161)),
|
||||
},
|
||||
{
|
||||
Name: utilpointer.StringPtr("tcp-example"),
|
||||
Protocol: protoPtr(v1.ProtocolTCP),
|
||||
Port: utilpointer.Int32Ptr(int32(80)),
|
||||
},
|
||||
},
|
||||
expectedEndpoints: []discovery.Endpoint{
|
||||
{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: utilpointer.BoolPtr(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
Topology: map[string]string{"kubernetes.io/hostname": "node-1"},
|
||||
},
|
||||
{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: utilpointer.BoolPtr(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.2"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod1"},
|
||||
Topology: map[string]string{"kubernetes.io/hostname": "node-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "pods with multiple IPs and Service with ipFamilies=ipv6",
|
||||
service: &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foobar",
|
||||
Namespace: "default",
|
||||
CreationTimestamp: creationTimestamp,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
@ -366,45 +480,126 @@ func TestSyncServiceFull(t *testing.T) {
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
IPFamilies: []v1.IPFamily{v1.IPv6Protocol},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pod0",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
DeletionTimestamp: nil,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "container-1",
|
||||
}},
|
||||
NodeName: "node-1",
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
PodIP: "10.0.0.1",
|
||||
PodIPs: []v1.PodIP{{
|
||||
IP: "10.0.0.1",
|
||||
}},
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "pod1",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
DeletionTimestamp: nil,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{
|
||||
Name: "container-1",
|
||||
}},
|
||||
NodeName: "node-1",
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
PodIP: "10.0.0.2",
|
||||
PodIPs: []v1.PodIP{
|
||||
{
|
||||
IP: "10.0.0.2",
|
||||
},
|
||||
{
|
||||
IP: "fd08::5678:0000:0000:9abc:def0",
|
||||
},
|
||||
},
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedEndpointPorts: []discovery.EndpointPort{
|
||||
{
|
||||
Name: utilpointer.StringPtr("sctp-example"),
|
||||
Protocol: protoPtr(v1.ProtocolSCTP),
|
||||
Port: utilpointer.Int32Ptr(int32(3456)),
|
||||
},
|
||||
{
|
||||
Name: utilpointer.StringPtr("udp-example"),
|
||||
Protocol: protoPtr(v1.ProtocolUDP),
|
||||
Port: utilpointer.Int32Ptr(int32(161)),
|
||||
},
|
||||
{
|
||||
Name: utilpointer.StringPtr("tcp-example"),
|
||||
Protocol: protoPtr(v1.ProtocolTCP),
|
||||
Port: utilpointer.Int32Ptr(int32(80)),
|
||||
},
|
||||
},
|
||||
expectedEndpoints: []discovery.Endpoint{
|
||||
{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: utilpointer.BoolPtr(true),
|
||||
},
|
||||
Addresses: []string{"fd08::5678:0000:0000:9abc:def0"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod1"},
|
||||
Topology: map[string]string{"kubernetes.io/hostname": "node-1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
esController.serviceStore.Add(service)
|
||||
_, err := esController.client.CoreV1().Services(namespace).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||
|
||||
for _, testcase := range testcases {
|
||||
t.Run(testcase.name, func(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.EndpointSliceTerminatingCondition, testcase.terminatingGateEnabled)()
|
||||
|
||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
||||
|
||||
for _, pod := range testcase.pods {
|
||||
esController.podStore.Add(pod)
|
||||
}
|
||||
esController.serviceStore.Add(testcase.service)
|
||||
|
||||
_, err := esController.client.CoreV1().Services(testcase.service.Namespace).Create(context.TODO(), testcase.service, metav1.CreateOptions{})
|
||||
assert.Nil(t, err, "Expected no error creating service")
|
||||
|
||||
// run through full sync service loop
|
||||
err = esController.syncService(fmt.Sprintf("%s/%s", namespace, serviceName))
|
||||
assert.NoError(t, err)
|
||||
err = esController.syncService(fmt.Sprintf("%s/%s", testcase.service.Namespace, testcase.service.Name))
|
||||
assert.Nil(t, err)
|
||||
|
||||
// last action should be to create endpoint slice
|
||||
expectActions(t, client.Actions(), 1, "create", "endpointslices")
|
||||
sliceList, err := client.DiscoveryV1beta1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
sliceList, err := client.DiscoveryV1beta1().EndpointSlices(testcase.service.Namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
assert.Nil(t, err, "Expected no error fetching endpoint slices")
|
||||
assert.Len(t, sliceList.Items, 1, "Expected 1 endpoint slices")
|
||||
|
||||
// ensure all attributes of endpoint slice match expected state
|
||||
slice := sliceList.Items[0]
|
||||
assert.Len(t, slice.Endpoints, 1, "Expected 1 endpoints in first slice")
|
||||
assert.Equal(t, slice.Annotations["endpoints.kubernetes.io/last-change-trigger-time"], serviceCreateTime.Format(time.RFC3339Nano))
|
||||
assert.EqualValues(t, []discovery.EndpointPort{{
|
||||
Name: utilpointer.StringPtr("sctp-example"),
|
||||
Protocol: protoPtr(v1.ProtocolSCTP),
|
||||
Port: utilpointer.Int32Ptr(int32(3456)),
|
||||
}, {
|
||||
Name: utilpointer.StringPtr("udp-example"),
|
||||
Protocol: protoPtr(v1.ProtocolUDP),
|
||||
Port: utilpointer.Int32Ptr(int32(161)),
|
||||
}, {
|
||||
Name: utilpointer.StringPtr("tcp-example"),
|
||||
Protocol: protoPtr(v1.ProtocolTCP),
|
||||
Port: utilpointer.Int32Ptr(int32(80)),
|
||||
}}, slice.Ports)
|
||||
|
||||
assert.ElementsMatch(t, []discovery.Endpoint{{
|
||||
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
||||
Addresses: []string{"1234::5678:0000:0000:9abc:def0"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: namespace, Name: pod2.Name},
|
||||
Topology: map[string]string{"kubernetes.io/hostname": "node-1"},
|
||||
}}, slice.Endpoints)
|
||||
assert.Equal(t, slice.Annotations["endpoints.kubernetes.io/last-change-trigger-time"], creationTimestamp.Format(time.RFC3339Nano))
|
||||
assert.EqualValues(t, testcase.expectedEndpointPorts, slice.Ports)
|
||||
assert.ElementsMatch(t, testcase.expectedEndpoints, slice.Endpoints)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// TestPodAddsBatching verifies that endpoint updates caused by pod addition are batched together.
|
||||
|
Loading…
Reference in New Issue
Block a user