mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-04 09:49:50 +00:00
migrated pkg/controller/endpointslice to contextual logging
Signed-off-by: Naman <namanlakhwani@gmail.com>
This commit is contained in:
parent
c95b16b280
commit
09849b09cf
@ -29,6 +29,7 @@ import (
|
|||||||
|
|
||||||
func startEndpointSliceController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
|
func startEndpointSliceController(ctx context.Context, controllerContext ControllerContext) (controller.Interface, bool, error) {
|
||||||
go endpointslicecontroller.NewController(
|
go endpointslicecontroller.NewController(
|
||||||
|
ctx,
|
||||||
controllerContext.InformerFactory.Core().V1().Pods(),
|
controllerContext.InformerFactory.Core().V1().Pods(),
|
||||||
controllerContext.InformerFactory.Core().V1().Services(),
|
controllerContext.InformerFactory.Core().V1().Services(),
|
||||||
controllerContext.InformerFactory.Core().V1().Nodes(),
|
controllerContext.InformerFactory.Core().V1().Nodes(),
|
||||||
@ -36,7 +37,7 @@ func startEndpointSliceController(ctx context.Context, controllerContext Control
|
|||||||
controllerContext.ComponentConfig.EndpointSliceController.MaxEndpointsPerSlice,
|
controllerContext.ComponentConfig.EndpointSliceController.MaxEndpointsPerSlice,
|
||||||
controllerContext.ClientBuilder.ClientOrDie("endpointslice-controller"),
|
controllerContext.ClientBuilder.ClientOrDie("endpointslice-controller"),
|
||||||
controllerContext.ComponentConfig.EndpointSliceController.EndpointUpdatesBatchPeriod.Duration,
|
controllerContext.ComponentConfig.EndpointSliceController.EndpointUpdatesBatchPeriod.Duration,
|
||||||
).Run(int(controllerContext.ComponentConfig.EndpointSliceController.ConcurrentServiceEndpointSyncs), ctx.Done())
|
).Run(ctx, int(controllerContext.ComponentConfig.EndpointSliceController.ConcurrentServiceEndpointSyncs))
|
||||||
return nil, true, nil
|
return nil, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ contextual k8s.io/kubernetes/test/e2e/dra/.*
|
|||||||
-contextual k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
|
-contextual k8s.io/kubernetes/pkg/controller/controller_ref_manager.go
|
||||||
-contextual k8s.io/kubernetes/pkg/controller/controller_utils.go
|
-contextual k8s.io/kubernetes/pkg/controller/controller_utils.go
|
||||||
-contextual k8s.io/kubernetes/pkg/controller/disruption/.*
|
-contextual k8s.io/kubernetes/pkg/controller/disruption/.*
|
||||||
-contextual k8s.io/kubernetes/pkg/controller/endpointslice/.*
|
-contextual k8s.io/kubernetes/pkg/controller/endpoint/.*
|
||||||
-contextual k8s.io/kubernetes/pkg/controller/endpointslicemirroring/.*
|
-contextual k8s.io/kubernetes/pkg/controller/endpointslicemirroring/.*
|
||||||
-contextual k8s.io/kubernetes/pkg/controller/garbagecollector/.*
|
-contextual k8s.io/kubernetes/pkg/controller/garbagecollector/.*
|
||||||
-contextual k8s.io/kubernetes/pkg/controller/nodeipam/.*
|
-contextual k8s.io/kubernetes/pkg/controller/nodeipam/.*
|
||||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package endpointslice
|
package endpointslice
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -76,7 +77,7 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// NewController creates and initializes a new Controller
|
// NewController creates and initializes a new Controller
|
||||||
func NewController(podInformer coreinformers.PodInformer,
|
func NewController(ctx context.Context, podInformer coreinformers.PodInformer,
|
||||||
serviceInformer coreinformers.ServiceInformer,
|
serviceInformer coreinformers.ServiceInformer,
|
||||||
nodeInformer coreinformers.NodeInformer,
|
nodeInformer coreinformers.NodeInformer,
|
||||||
endpointSliceInformer discoveryinformers.EndpointSliceInformer,
|
endpointSliceInformer discoveryinformers.EndpointSliceInformer,
|
||||||
@ -127,9 +128,12 @@ func NewController(podInformer coreinformers.PodInformer,
|
|||||||
c.nodeLister = nodeInformer.Lister()
|
c.nodeLister = nodeInformer.Lister()
|
||||||
c.nodesSynced = nodeInformer.Informer().HasSynced
|
c.nodesSynced = nodeInformer.Informer().HasSynced
|
||||||
|
|
||||||
|
logger := klog.FromContext(ctx)
|
||||||
endpointSliceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
endpointSliceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: c.onEndpointSliceAdd,
|
AddFunc: c.onEndpointSliceAdd,
|
||||||
UpdateFunc: c.onEndpointSliceUpdate,
|
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||||
|
c.onEndpointSliceUpdate(logger, oldObj, newObj)
|
||||||
|
},
|
||||||
DeleteFunc: c.onEndpointSliceDelete,
|
DeleteFunc: c.onEndpointSliceDelete,
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -148,9 +152,15 @@ func NewController(podInformer coreinformers.PodInformer,
|
|||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.TopologyAwareHints) {
|
if utilfeature.DefaultFeatureGate.Enabled(features.TopologyAwareHints) {
|
||||||
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: c.addNode,
|
AddFunc: func(obj interface{}) {
|
||||||
UpdateFunc: c.updateNode,
|
c.addNode(logger, obj)
|
||||||
DeleteFunc: c.deleteNode,
|
},
|
||||||
|
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||||
|
c.updateNode(logger, oldObj, newObj)
|
||||||
|
},
|
||||||
|
DeleteFunc: func(obj interface{}) {
|
||||||
|
c.deleteNode(logger, obj)
|
||||||
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
c.topologyCache = topologycache.NewTopologyCache()
|
c.topologyCache = topologycache.NewTopologyCache()
|
||||||
@ -239,7 +249,7 @@ type Controller struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Run will not return until stopCh is closed.
|
// Run will not return until stopCh is closed.
|
||||||
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
|
func (c *Controller) Run(ctx context.Context, workers int) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
// Start events processing pipeline.
|
// Start events processing pipeline.
|
||||||
@ -249,43 +259,45 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
|
|||||||
|
|
||||||
defer c.queue.ShutDown()
|
defer c.queue.ShutDown()
|
||||||
|
|
||||||
klog.Infof("Starting endpoint slice controller")
|
logger := klog.FromContext(ctx)
|
||||||
defer klog.Infof("Shutting down endpoint slice controller")
|
logger.Info("Starting endpoint slice controller")
|
||||||
|
defer logger.Info("Shutting down endpoint slice controller")
|
||||||
|
|
||||||
if !cache.WaitForNamedCacheSync("endpoint_slice", stopCh, c.podsSynced, c.servicesSynced, c.endpointSlicesSynced, c.nodesSynced) {
|
if !cache.WaitForNamedCacheSync("endpoint_slice", ctx.Done(), c.podsSynced, c.servicesSynced, c.endpointSlicesSynced, c.nodesSynced) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger.V(2).Info("Starting worker threads", "total", workers)
|
||||||
for i := 0; i < workers; i++ {
|
for i := 0; i < workers; i++ {
|
||||||
go wait.Until(c.worker, c.workerLoopPeriod, stopCh)
|
go wait.Until(func() { c.worker(logger) }, c.workerLoopPeriod, ctx.Done())
|
||||||
}
|
}
|
||||||
|
|
||||||
<-stopCh
|
<-ctx.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
// worker runs a worker thread that just dequeues items, processes them, and
|
// worker runs a worker thread that just dequeues items, processes them, and
|
||||||
// marks them done. You may run as many of these in parallel as you wish; the
|
// marks them done. You may run as many of these in parallel as you wish; the
|
||||||
// workqueue guarantees that they will not end up processing the same service
|
// workqueue guarantees that they will not end up processing the same service
|
||||||
// at the same time
|
// at the same time
|
||||||
func (c *Controller) worker() {
|
func (c *Controller) worker(logger klog.Logger) {
|
||||||
for c.processNextWorkItem() {
|
for c.processNextWorkItem(logger) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) processNextWorkItem() bool {
|
func (c *Controller) processNextWorkItem(logger klog.Logger) bool {
|
||||||
cKey, quit := c.queue.Get()
|
cKey, quit := c.queue.Get()
|
||||||
if quit {
|
if quit {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
defer c.queue.Done(cKey)
|
defer c.queue.Done(cKey)
|
||||||
|
|
||||||
err := c.syncService(cKey.(string))
|
err := c.syncService(logger, cKey.(string))
|
||||||
c.handleErr(err, cKey)
|
c.handleErr(logger, err, cKey)
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) handleErr(err error, key interface{}) {
|
func (c *Controller) handleErr(logger klog.Logger, err error, key interface{}) {
|
||||||
trackSync(err)
|
trackSync(err)
|
||||||
|
|
||||||
if err == nil {
|
if err == nil {
|
||||||
@ -294,20 +306,20 @@ func (c *Controller) handleErr(err error, key interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if c.queue.NumRequeues(key) < maxRetries {
|
if c.queue.NumRequeues(key) < maxRetries {
|
||||||
klog.Warningf("Error syncing endpoint slices for service %q, retrying. Error: %v", key, err)
|
logger.Info("Error syncing endpoint slices for service, retrying", "key", key, "err", err)
|
||||||
c.queue.AddRateLimited(key)
|
c.queue.AddRateLimited(key)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.Warningf("Retry budget exceeded, dropping service %q out of the queue: %v", key, err)
|
logger.Info("Retry budget exceeded, dropping service out of the queue", "key", key, "err", err)
|
||||||
c.queue.Forget(key)
|
c.queue.Forget(key)
|
||||||
utilruntime.HandleError(err)
|
utilruntime.HandleError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) syncService(key string) error {
|
func (c *Controller) syncService(logger klog.Logger, key string) error {
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
defer func() {
|
defer func() {
|
||||||
klog.V(4).Infof("Finished syncing service %q endpoint slices. (%v)", key, time.Since(startTime))
|
logger.V(4).Info("Finished syncing service endpoint slices", "key", key, "elapsedTime", time.Since(startTime))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||||
@ -340,7 +352,7 @@ func (c *Controller) syncService(key string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
klog.V(5).Infof("About to update endpoint slices for service %q", key)
|
logger.V(5).Info("About to update endpoint slices for service", "key", key)
|
||||||
|
|
||||||
podLabelSelector := labels.Set(service.Spec.Selector).AsSelectorPreValidated()
|
podLabelSelector := labels.Set(service.Spec.Selector).AsSelectorPreValidated()
|
||||||
pods, err := c.podLister.Pods(service.Namespace).List(podLabelSelector)
|
pods, err := c.podLister.Pods(service.Namespace).List(podLabelSelector)
|
||||||
@ -379,7 +391,7 @@ func (c *Controller) syncService(key string) error {
|
|||||||
lastChangeTriggerTime := c.triggerTimeTracker.
|
lastChangeTriggerTime := c.triggerTimeTracker.
|
||||||
ComputeEndpointLastChangeTriggerTime(namespace, service, pods)
|
ComputeEndpointLastChangeTriggerTime(namespace, service, pods)
|
||||||
|
|
||||||
err = c.reconciler.reconcile(service, pods, endpointSlices, lastChangeTriggerTime)
|
err = c.reconciler.reconcile(logger, service, pods, endpointSlices, lastChangeTriggerTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToUpdateEndpointSlices",
|
c.eventRecorder.Eventf(service, v1.EventTypeWarning, "FailedToUpdateEndpointSlices",
|
||||||
"Error updating Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err)
|
"Error updating Endpoint Slices for Service %s/%s: %v", service.Namespace, service.Name, err)
|
||||||
@ -429,7 +441,7 @@ func (c *Controller) onEndpointSliceAdd(obj interface{}) {
|
|||||||
// the EndpointSlice resource version does not match the expected version in the
|
// the EndpointSlice resource version does not match the expected version in the
|
||||||
// endpointSliceTracker or the managed-by value of the EndpointSlice has changed
|
// endpointSliceTracker or the managed-by value of the EndpointSlice has changed
|
||||||
// from or to this controller.
|
// from or to this controller.
|
||||||
func (c *Controller) onEndpointSliceUpdate(prevObj, obj interface{}) {
|
func (c *Controller) onEndpointSliceUpdate(logger klog.Logger, prevObj, obj interface{}) {
|
||||||
prevEndpointSlice := prevObj.(*discovery.EndpointSlice)
|
prevEndpointSlice := prevObj.(*discovery.EndpointSlice)
|
||||||
endpointSlice := obj.(*discovery.EndpointSlice)
|
endpointSlice := obj.(*discovery.EndpointSlice)
|
||||||
if endpointSlice == nil || prevEndpointSlice == nil {
|
if endpointSlice == nil || prevEndpointSlice == nil {
|
||||||
@ -442,7 +454,7 @@ func (c *Controller) onEndpointSliceUpdate(prevObj, obj interface{}) {
|
|||||||
svcName := endpointSlice.Labels[discovery.LabelServiceName]
|
svcName := endpointSlice.Labels[discovery.LabelServiceName]
|
||||||
prevSvcName := prevEndpointSlice.Labels[discovery.LabelServiceName]
|
prevSvcName := prevEndpointSlice.Labels[discovery.LabelServiceName]
|
||||||
if svcName != prevSvcName {
|
if svcName != prevSvcName {
|
||||||
klog.Warningf("%s label changed from %s to %s for %s", discovery.LabelServiceName, prevSvcName, svcName, endpointSlice.Name)
|
logger.Info("label changed", "label", discovery.LabelServiceName, "oldService", prevSvcName, "newService", svcName, "endpointslice", klog.KObj(endpointSlice))
|
||||||
c.queueServiceForEndpointSlice(endpointSlice)
|
c.queueServiceForEndpointSlice(endpointSlice)
|
||||||
c.queueServiceForEndpointSlice(prevEndpointSlice)
|
c.queueServiceForEndpointSlice(prevEndpointSlice)
|
||||||
return
|
return
|
||||||
@ -512,11 +524,11 @@ func (c *Controller) deletePod(obj interface{}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) addNode(obj interface{}) {
|
func (c *Controller) addNode(logger klog.Logger, obj interface{}) {
|
||||||
c.checkNodeTopologyDistribution()
|
c.checkNodeTopologyDistribution(logger)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) updateNode(old, cur interface{}) {
|
func (c *Controller) updateNode(logger klog.Logger, old, cur interface{}) {
|
||||||
oldNode := old.(*v1.Node)
|
oldNode := old.(*v1.Node)
|
||||||
curNode := cur.(*v1.Node)
|
curNode := cur.(*v1.Node)
|
||||||
|
|
||||||
@ -524,29 +536,29 @@ func (c *Controller) updateNode(old, cur interface{}) {
|
|||||||
// The topology cache should be updated in this case.
|
// The topology cache should be updated in this case.
|
||||||
if isNodeReady(oldNode) != isNodeReady(curNode) ||
|
if isNodeReady(oldNode) != isNodeReady(curNode) ||
|
||||||
oldNode.Labels[v1.LabelTopologyZone] != curNode.Labels[v1.LabelTopologyZone] {
|
oldNode.Labels[v1.LabelTopologyZone] != curNode.Labels[v1.LabelTopologyZone] {
|
||||||
c.checkNodeTopologyDistribution()
|
c.checkNodeTopologyDistribution(logger)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Controller) deleteNode(obj interface{}) {
|
func (c *Controller) deleteNode(logger klog.Logger, obj interface{}) {
|
||||||
c.checkNodeTopologyDistribution()
|
c.checkNodeTopologyDistribution(logger)
|
||||||
}
|
}
|
||||||
|
|
||||||
// checkNodeTopologyDistribution updates Nodes in the topology cache and then
|
// checkNodeTopologyDistribution updates Nodes in the topology cache and then
|
||||||
// queues any Services that are past the threshold.
|
// queues any Services that are past the threshold.
|
||||||
func (c *Controller) checkNodeTopologyDistribution() {
|
func (c *Controller) checkNodeTopologyDistribution(logger klog.Logger) {
|
||||||
if c.topologyCache == nil {
|
if c.topologyCache == nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
nodes, err := c.nodeLister.List(labels.Everything())
|
nodes, err := c.nodeLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Error listing Nodes: %v", err)
|
logger.Error(err, "Error listing Nodes")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.topologyCache.SetNodes(nodes)
|
c.topologyCache.SetNodes(logger, nodes)
|
||||||
serviceKeys := c.topologyCache.GetOverloadedServices()
|
serviceKeys := c.topologyCache.GetOverloadedServices()
|
||||||
for _, serviceKey := range serviceKeys {
|
for _, serviceKey := range serviceKeys {
|
||||||
klog.V(2).Infof("Queuing %s Service after Node change due to overloading", serviceKey)
|
logger.V(2).Info("Queuing Service after Node change due to overloading", "key", serviceKey)
|
||||||
c.queue.Add(serviceKey)
|
c.queue.Add(serviceKey)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,6 +40,7 @@ import (
|
|||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
k8stesting "k8s.io/client-go/testing"
|
k8stesting "k8s.io/client-go/testing"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
|
"k8s.io/klog/v2/ktesting"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/endpointslice/topologycache"
|
"k8s.io/kubernetes/pkg/controller/endpointslice/topologycache"
|
||||||
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
|
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
|
||||||
@ -60,7 +61,7 @@ type endpointSliceController struct {
|
|||||||
serviceStore cache.Store
|
serviceStore cache.Store
|
||||||
}
|
}
|
||||||
|
|
||||||
func newController(nodeNames []string, batchPeriod time.Duration) (*fake.Clientset, *endpointSliceController) {
|
func newController(t *testing.T, nodeNames []string, batchPeriod time.Duration) (*fake.Clientset, *endpointSliceController) {
|
||||||
client := fake.NewSimpleClientset()
|
client := fake.NewSimpleClientset()
|
||||||
|
|
||||||
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||||
@ -95,7 +96,9 @@ func newController(nodeNames []string, batchPeriod time.Duration) (*fake.Clients
|
|||||||
return false, endpointSlice, nil
|
return false, endpointSlice, nil
|
||||||
}))
|
}))
|
||||||
|
|
||||||
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
esController := NewController(
|
esController := NewController(
|
||||||
|
ctx,
|
||||||
informerFactory.Core().V1().Pods(),
|
informerFactory.Core().V1().Pods(),
|
||||||
informerFactory.Core().V1().Services(),
|
informerFactory.Core().V1().Services(),
|
||||||
nodeInformer,
|
nodeInformer,
|
||||||
@ -122,7 +125,7 @@ func newController(nodeNames []string, batchPeriod time.Duration) (*fake.Clients
|
|||||||
func TestSyncServiceNoSelector(t *testing.T) {
|
func TestSyncServiceNoSelector(t *testing.T) {
|
||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
serviceName := "testing-1"
|
serviceName := "testing-1"
|
||||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
|
||||||
esController.serviceStore.Add(&v1.Service{
|
esController.serviceStore.Add(&v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns},
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns},
|
||||||
Spec: v1.ServiceSpec{
|
Spec: v1.ServiceSpec{
|
||||||
@ -130,7 +133,8 @@ func TestSyncServiceNoSelector(t *testing.T) {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
err := esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, client.Actions(), 0)
|
assert.Len(t, client.Actions(), 0)
|
||||||
}
|
}
|
||||||
@ -187,7 +191,8 @@ func TestServiceExternalNameTypeSync(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.desc, func(t *testing.T) {
|
t.Run(tc.desc, func(t *testing.T) {
|
||||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
|
||||||
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
|
||||||
pod := newPod(1, namespace, true, 0, false)
|
pod := newPod(1, namespace, true, 0, false)
|
||||||
err := esController.podStore.Add(pod)
|
err := esController.podStore.Add(pod)
|
||||||
@ -196,7 +201,7 @@ func TestServiceExternalNameTypeSync(t *testing.T) {
|
|||||||
err = esController.serviceStore.Add(tc.service)
|
err = esController.serviceStore.Add(tc.service)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
err = esController.syncService(fmt.Sprintf("%s/%s", namespace, serviceName))
|
err = esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, serviceName))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, client.Actions(), 0)
|
assert.Len(t, client.Actions(), 0)
|
||||||
|
|
||||||
@ -212,7 +217,7 @@ func TestSyncServicePendingDeletion(t *testing.T) {
|
|||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
serviceName := "testing-1"
|
serviceName := "testing-1"
|
||||||
deletionTimestamp := metav1.Now()
|
deletionTimestamp := metav1.Now()
|
||||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
|
||||||
esController.serviceStore.Add(&v1.Service{
|
esController.serviceStore.Add(&v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns, DeletionTimestamp: &deletionTimestamp},
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: ns, DeletionTimestamp: &deletionTimestamp},
|
||||||
Spec: v1.ServiceSpec{
|
Spec: v1.ServiceSpec{
|
||||||
@ -221,7 +226,8 @@ func TestSyncServicePendingDeletion(t *testing.T) {
|
|||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
err := esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Len(t, client.Actions(), 0)
|
assert.Len(t, client.Actions(), 0)
|
||||||
}
|
}
|
||||||
@ -230,7 +236,7 @@ func TestSyncServicePendingDeletion(t *testing.T) {
|
|||||||
func TestSyncServiceWithSelector(t *testing.T) {
|
func TestSyncServiceWithSelector(t *testing.T) {
|
||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
serviceName := "testing-1"
|
serviceName := "testing-1"
|
||||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
|
||||||
standardSyncService(t, esController, ns, serviceName)
|
standardSyncService(t, esController, ns, serviceName)
|
||||||
expectActions(t, client.Actions(), 1, "create", "endpointslices")
|
expectActions(t, client.Actions(), 1, "create", "endpointslices")
|
||||||
|
|
||||||
@ -250,7 +256,7 @@ func TestSyncServiceWithSelector(t *testing.T) {
|
|||||||
// remove too much.
|
// remove too much.
|
||||||
func TestSyncServiceMissing(t *testing.T) {
|
func TestSyncServiceMissing(t *testing.T) {
|
||||||
namespace := metav1.NamespaceDefault
|
namespace := metav1.NamespaceDefault
|
||||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
|
||||||
|
|
||||||
// Build up existing service
|
// Build up existing service
|
||||||
existingServiceName := "stillthere"
|
existingServiceName := "stillthere"
|
||||||
@ -269,7 +275,8 @@ func TestSyncServiceMissing(t *testing.T) {
|
|||||||
missingServiceKey := endpointutil.ServiceKey{Name: missingServiceName, Namespace: namespace}
|
missingServiceKey := endpointutil.ServiceKey{Name: missingServiceName, Namespace: namespace}
|
||||||
esController.triggerTimeTracker.ServiceStates[missingServiceKey] = endpointutil.ServiceState{}
|
esController.triggerTimeTracker.ServiceStates[missingServiceKey] = endpointutil.ServiceState{}
|
||||||
|
|
||||||
err := esController.syncService(fmt.Sprintf("%s/%s", namespace, missingServiceName))
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
err := esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, missingServiceName))
|
||||||
|
|
||||||
// nil should be returned when the service doesn't exist
|
// nil should be returned when the service doesn't exist
|
||||||
assert.Nil(t, err, "Expected no error syncing service")
|
assert.Nil(t, err, "Expected no error syncing service")
|
||||||
@ -286,7 +293,7 @@ func TestSyncServiceMissing(t *testing.T) {
|
|||||||
|
|
||||||
// Ensure SyncService correctly selects Pods.
|
// Ensure SyncService correctly selects Pods.
|
||||||
func TestSyncServicePodSelection(t *testing.T) {
|
func TestSyncServicePodSelection(t *testing.T) {
|
||||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
|
||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
|
|
||||||
pod1 := newPod(1, ns, true, 0, false)
|
pod1 := newPod(1, ns, true, 0, false)
|
||||||
@ -312,11 +319,12 @@ func TestSyncServicePodSelection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSyncServiceEndpointSlicePendingDeletion(t *testing.T) {
|
func TestSyncServiceEndpointSlicePendingDeletion(t *testing.T) {
|
||||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
|
||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
serviceName := "testing-1"
|
serviceName := "testing-1"
|
||||||
service := createService(t, esController, ns, serviceName)
|
service := createService(t, esController, ns, serviceName)
|
||||||
err := esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
|
||||||
assert.Nil(t, err, "Expected no error syncing service")
|
assert.Nil(t, err, "Expected no error syncing service")
|
||||||
|
|
||||||
gvk := schema.GroupVersionKind{Version: "v1", Kind: "Service"}
|
gvk := schema.GroupVersionKind{Version: "v1", Kind: "Service"}
|
||||||
@ -345,8 +353,9 @@ func TestSyncServiceEndpointSlicePendingDeletion(t *testing.T) {
|
|||||||
t.Fatalf("Expected no error creating EndpointSlice: %v", err)
|
t.Fatalf("Expected no error creating EndpointSlice: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger, _ = ktesting.NewTestContext(t)
|
||||||
numActionsBefore := len(client.Actions())
|
numActionsBefore := len(client.Actions())
|
||||||
err = esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
|
err = esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
|
||||||
assert.Nil(t, err, "Expected no error syncing service")
|
assert.Nil(t, err, "Expected no error syncing service")
|
||||||
|
|
||||||
// The EndpointSlice marked for deletion should be ignored by the controller, and thus
|
// The EndpointSlice marked for deletion should be ignored by the controller, and thus
|
||||||
@ -358,7 +367,7 @@ func TestSyncServiceEndpointSlicePendingDeletion(t *testing.T) {
|
|||||||
|
|
||||||
// Ensure SyncService correctly selects and labels EndpointSlices.
|
// Ensure SyncService correctly selects and labels EndpointSlices.
|
||||||
func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
|
func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
|
||||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
|
||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
serviceName := "testing-1"
|
serviceName := "testing-1"
|
||||||
service := createService(t, esController, ns, serviceName)
|
service := createService(t, esController, ns, serviceName)
|
||||||
@ -435,7 +444,8 @@ func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
numActionsBefore := len(client.Actions())
|
numActionsBefore := len(client.Actions())
|
||||||
err := esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
|
||||||
assert.Nil(t, err, "Expected no error syncing service")
|
assert.Nil(t, err, "Expected no error syncing service")
|
||||||
|
|
||||||
if len(client.Actions()) != numActionsBefore+2 {
|
if len(client.Actions()) != numActionsBefore+2 {
|
||||||
@ -451,7 +461,7 @@ func TestSyncServiceEndpointSliceLabelSelection(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestOnEndpointSliceUpdate(t *testing.T) {
|
func TestOnEndpointSliceUpdate(t *testing.T) {
|
||||||
_, esController := newController([]string{"node-1"}, time.Duration(0))
|
_, esController := newController(t, []string{"node-1"}, time.Duration(0))
|
||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
serviceName := "testing-1"
|
serviceName := "testing-1"
|
||||||
epSlice1 := &discovery.EndpointSlice{
|
epSlice1 := &discovery.EndpointSlice{
|
||||||
@ -466,11 +476,12 @@ func TestOnEndpointSliceUpdate(t *testing.T) {
|
|||||||
AddressType: discovery.AddressTypeIPv4,
|
AddressType: discovery.AddressTypeIPv4,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
epSlice2 := epSlice1.DeepCopy()
|
epSlice2 := epSlice1.DeepCopy()
|
||||||
epSlice2.Labels[discovery.LabelManagedBy] = "something else"
|
epSlice2.Labels[discovery.LabelManagedBy] = "something else"
|
||||||
|
|
||||||
assert.Equal(t, 0, esController.queue.Len())
|
assert.Equal(t, 0, esController.queue.Len())
|
||||||
esController.onEndpointSliceUpdate(epSlice1, epSlice2)
|
esController.onEndpointSliceUpdate(logger, epSlice1, epSlice2)
|
||||||
err := wait.PollImmediate(100*time.Millisecond, 3*time.Second, func() (bool, error) {
|
err := wait.PollImmediate(100*time.Millisecond, 3*time.Second, func() (bool, error) {
|
||||||
if esController.queue.Len() > 0 {
|
if esController.queue.Len() > 0 {
|
||||||
return true, nil
|
return true, nil
|
||||||
@ -1214,7 +1225,7 @@ func TestSyncService(t *testing.T) {
|
|||||||
|
|
||||||
for _, testcase := range testcases {
|
for _, testcase := range testcases {
|
||||||
t.Run(testcase.name, func(t *testing.T) {
|
t.Run(testcase.name, func(t *testing.T) {
|
||||||
client, esController := newController([]string{"node-1"}, time.Duration(0))
|
client, esController := newController(t, []string{"node-1"}, time.Duration(0))
|
||||||
|
|
||||||
for _, pod := range testcase.pods {
|
for _, pod := range testcase.pods {
|
||||||
esController.podStore.Add(pod)
|
esController.podStore.Add(pod)
|
||||||
@ -1224,7 +1235,8 @@ func TestSyncService(t *testing.T) {
|
|||||||
_, err := esController.client.CoreV1().Services(testcase.service.Namespace).Create(context.TODO(), testcase.service, metav1.CreateOptions{})
|
_, err := esController.client.CoreV1().Services(testcase.service.Namespace).Create(context.TODO(), testcase.service, metav1.CreateOptions{})
|
||||||
assert.Nil(t, err, "Expected no error creating service")
|
assert.Nil(t, err, "Expected no error creating service")
|
||||||
|
|
||||||
err = esController.syncService(fmt.Sprintf("%s/%s", testcase.service.Namespace, testcase.service.Name))
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
err = esController.syncService(logger, fmt.Sprintf("%s/%s", testcase.service.Namespace, testcase.service.Name))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
|
||||||
// last action should be to create endpoint slice
|
// last action should be to create endpoint slice
|
||||||
@ -1318,11 +1330,12 @@ func TestPodAddsBatching(t *testing.T) {
|
|||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
client, esController := newController([]string{"node-1"}, tc.batchPeriod)
|
client, esController := newController(t, []string{"node-1"}, tc.batchPeriod)
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
|
|
||||||
go esController.Run(1, stopCh)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
|
go esController.Run(ctx, 1)
|
||||||
|
|
||||||
esController.serviceStore.Add(&v1.Service{
|
esController.serviceStore.Add(&v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
ObjectMeta: metav1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||||
@ -1452,11 +1465,12 @@ func TestPodUpdatesBatching(t *testing.T) {
|
|||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
client, esController := newController([]string{"node-1"}, tc.batchPeriod)
|
client, esController := newController(t, []string{"node-1"}, tc.batchPeriod)
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
|
|
||||||
go esController.Run(1, stopCh)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
|
go esController.Run(ctx, 1)
|
||||||
|
|
||||||
addPods(t, esController, ns, tc.podsCount)
|
addPods(t, esController, ns, tc.podsCount)
|
||||||
|
|
||||||
@ -1589,11 +1603,12 @@ func TestPodDeleteBatching(t *testing.T) {
|
|||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
client, esController := newController([]string{"node-1"}, tc.batchPeriod)
|
client, esController := newController(t, []string{"node-1"}, tc.batchPeriod)
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
defer close(stopCh)
|
defer close(stopCh)
|
||||||
|
|
||||||
go esController.Run(1, stopCh)
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
|
go esController.Run(ctx, 1)
|
||||||
|
|
||||||
addPods(t, esController, ns, tc.podsCount)
|
addPods(t, esController, ns, tc.podsCount)
|
||||||
|
|
||||||
@ -1655,7 +1670,7 @@ func TestSyncServiceStaleInformer(t *testing.T) {
|
|||||||
|
|
||||||
for _, testcase := range testcases {
|
for _, testcase := range testcases {
|
||||||
t.Run(testcase.name, func(t *testing.T) {
|
t.Run(testcase.name, func(t *testing.T) {
|
||||||
_, esController := newController([]string{"node-1"}, time.Duration(0))
|
_, esController := newController(t, []string{"node-1"}, time.Duration(0))
|
||||||
ns := metav1.NamespaceDefault
|
ns := metav1.NamespaceDefault
|
||||||
serviceName := "testing-1"
|
serviceName := "testing-1"
|
||||||
|
|
||||||
@ -1691,7 +1706,8 @@ func TestSyncServiceStaleInformer(t *testing.T) {
|
|||||||
epSlice2.Generation = testcase.trackerGenerationNumber
|
epSlice2.Generation = testcase.trackerGenerationNumber
|
||||||
esController.endpointSliceTracker.Update(epSlice2)
|
esController.endpointSliceTracker.Update(epSlice2)
|
||||||
|
|
||||||
err = esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
err = esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
|
||||||
// Check if we got a StaleInformerCache error
|
// Check if we got a StaleInformerCache error
|
||||||
if endpointsliceutil.IsStaleInformerCacheErr(err) != testcase.expectError {
|
if endpointsliceutil.IsStaleInformerCacheErr(err) != testcase.expectError {
|
||||||
t.Fatalf("Expected error because informer cache is outdated")
|
t.Fatalf("Expected error because informer cache is outdated")
|
||||||
@ -1835,7 +1851,7 @@ func Test_checkNodeTopologyDistribution(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
_, esController := newController([]string{}, time.Duration(0))
|
_, esController := newController(t, []string{}, time.Duration(0))
|
||||||
|
|
||||||
for i, nodeInfo := range tc.nodes {
|
for i, nodeInfo := range tc.nodes {
|
||||||
node := &v1.Node{
|
node := &v1.Node{
|
||||||
@ -1869,7 +1885,8 @@ func Test_checkNodeTopologyDistribution(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
esController.checkNodeTopologyDistribution()
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
esController.checkNodeTopologyDistribution(logger)
|
||||||
|
|
||||||
if esController.queue.Len() != tc.expectedQueueLen {
|
if esController.queue.Len() != tc.expectedQueueLen {
|
||||||
t.Errorf("Expected %d services to be queued, got %d", tc.expectedQueueLen, esController.queue.Len())
|
t.Errorf("Expected %d services to be queued, got %d", tc.expectedQueueLen, esController.queue.Len())
|
||||||
@ -1890,7 +1907,7 @@ func TestUpdateNode(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
_, esController := newController(nil, time.Duration(0))
|
_, esController := newController(t, nil, time.Duration(0))
|
||||||
sliceInfo := &topologycache.SliceInfo{
|
sliceInfo := &topologycache.SliceInfo{
|
||||||
ServiceKey: "ns/svc",
|
ServiceKey: "ns/svc",
|
||||||
AddressType: discovery.AddressTypeIPv4,
|
AddressType: discovery.AddressTypeIPv4,
|
||||||
@ -1928,12 +1945,13 @@ func TestUpdateNode(t *testing.T) {
|
|||||||
ObjectMeta: metav1.ObjectMeta{Name: "node-2"},
|
ObjectMeta: metav1.ObjectMeta{Name: "node-2"},
|
||||||
Status: nodeReadyStatus,
|
Status: nodeReadyStatus,
|
||||||
}
|
}
|
||||||
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
esController.nodeStore.Add(node1)
|
esController.nodeStore.Add(node1)
|
||||||
esController.nodeStore.Add(node2)
|
esController.nodeStore.Add(node2)
|
||||||
esController.addNode(node1)
|
esController.addNode(logger, node1)
|
||||||
esController.addNode(node2)
|
esController.addNode(logger, node2)
|
||||||
// The Nodes don't have the zone label, AddHints should fail.
|
// The Nodes don't have the zone label, AddHints should fail.
|
||||||
_, _, eventsBuilders := esController.topologyCache.AddHints(sliceInfo)
|
_, _, eventsBuilders := esController.topologyCache.AddHints(logger, sliceInfo)
|
||||||
require.Len(t, eventsBuilders, 1)
|
require.Len(t, eventsBuilders, 1)
|
||||||
assert.Contains(t, eventsBuilders[0].Message, topologycache.InsufficientNodeInfo)
|
assert.Contains(t, eventsBuilders[0].Message, topologycache.InsufficientNodeInfo)
|
||||||
|
|
||||||
@ -1945,9 +1963,9 @@ func TestUpdateNode(t *testing.T) {
|
|||||||
// After adding the zone label to the Nodes and calling the event handler updateNode, AddHints should succeed.
|
// After adding the zone label to the Nodes and calling the event handler updateNode, AddHints should succeed.
|
||||||
esController.nodeStore.Update(updateNode1)
|
esController.nodeStore.Update(updateNode1)
|
||||||
esController.nodeStore.Update(updateNode2)
|
esController.nodeStore.Update(updateNode2)
|
||||||
esController.updateNode(node1, updateNode1)
|
esController.updateNode(logger, node1, updateNode1)
|
||||||
esController.updateNode(node2, updateNode2)
|
esController.updateNode(logger, node2, updateNode2)
|
||||||
_, _, eventsBuilders = esController.topologyCache.AddHints(sliceInfo)
|
_, _, eventsBuilders = esController.topologyCache.AddHints(logger, sliceInfo)
|
||||||
require.Len(t, eventsBuilders, 1)
|
require.Len(t, eventsBuilders, 1)
|
||||||
assert.Contains(t, eventsBuilders[0].Message, topologycache.TopologyAwareHintsEnabled)
|
assert.Contains(t, eventsBuilders[0].Message, topologycache.TopologyAwareHintsEnabled)
|
||||||
}
|
}
|
||||||
@ -1965,7 +1983,8 @@ func standardSyncService(t *testing.T, esController *endpointSliceController, na
|
|||||||
t.Helper()
|
t.Helper()
|
||||||
createService(t, esController, namespace, serviceName)
|
createService(t, esController, namespace, serviceName)
|
||||||
|
|
||||||
err := esController.syncService(fmt.Sprintf("%s/%s", namespace, serviceName))
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
err := esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, serviceName))
|
||||||
assert.Nil(t, err, "Expected no error syncing service")
|
assert.Nil(t, err, "Expected no error syncing service")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,13 +66,13 @@ type endpointMeta struct {
|
|||||||
// compares them with the endpoints already present in any existing endpoint
|
// compares them with the endpoints already present in any existing endpoint
|
||||||
// slices for the given service. It creates, updates, or deletes endpoint slices
|
// slices for the given service. It creates, updates, or deletes endpoint slices
|
||||||
// to ensure the desired set of pods are represented by endpoint slices.
|
// to ensure the desired set of pods are represented by endpoint slices.
|
||||||
func (r *reconciler) reconcile(service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time) error {
|
func (r *reconciler) reconcile(logger klog.Logger, service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time) error {
|
||||||
slicesToDelete := []*discovery.EndpointSlice{} // slices that are no longer matching any address the service has
|
slicesToDelete := []*discovery.EndpointSlice{} // slices that are no longer matching any address the service has
|
||||||
errs := []error{} // all errors generated in the process of reconciling
|
errs := []error{} // all errors generated in the process of reconciling
|
||||||
slicesByAddressType := make(map[discovery.AddressType][]*discovery.EndpointSlice) // slices by address type
|
slicesByAddressType := make(map[discovery.AddressType][]*discovery.EndpointSlice) // slices by address type
|
||||||
|
|
||||||
// addresses that this service supports [o(1) find]
|
// addresses that this service supports [o(1) find]
|
||||||
serviceSupportedAddressesTypes := getAddressTypesForService(service)
|
serviceSupportedAddressesTypes := getAddressTypesForService(logger, service)
|
||||||
|
|
||||||
// loop through slices identifying their address type.
|
// loop through slices identifying their address type.
|
||||||
// slices that no longer match address type supported by services
|
// slices that no longer match address type supported by services
|
||||||
@ -84,7 +84,7 @@ func (r *reconciler) reconcile(service *corev1.Service, pods []*corev1.Pod, exis
|
|||||||
if r.topologyCache != nil {
|
if r.topologyCache != nil {
|
||||||
svcKey, err := serviceControllerKey(existingSlice)
|
svcKey, err := serviceControllerKey(existingSlice)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Warningf("Couldn't get key to remove EndpointSlice from topology cache %+v: %v", existingSlice, err)
|
logger.Info("Couldn't get key to remove EndpointSlice from topology cache", "existingSlice", existingSlice, "err", err)
|
||||||
} else {
|
} else {
|
||||||
r.topologyCache.RemoveHints(svcKey, existingSlice.AddressType)
|
r.topologyCache.RemoveHints(svcKey, existingSlice.AddressType)
|
||||||
}
|
}
|
||||||
@ -105,7 +105,7 @@ func (r *reconciler) reconcile(service *corev1.Service, pods []*corev1.Pod, exis
|
|||||||
// reconcile for existing.
|
// reconcile for existing.
|
||||||
for addressType := range serviceSupportedAddressesTypes {
|
for addressType := range serviceSupportedAddressesTypes {
|
||||||
existingSlices := slicesByAddressType[addressType]
|
existingSlices := slicesByAddressType[addressType]
|
||||||
err := r.reconcileByAddressType(service, pods, existingSlices, triggerTime, addressType)
|
err := r.reconcileByAddressType(logger, service, pods, existingSlices, triggerTime, addressType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
@ -130,7 +130,7 @@ func (r *reconciler) reconcile(service *corev1.Service, pods []*corev1.Pod, exis
|
|||||||
// compares them with the endpoints already present in any existing endpoint
|
// compares them with the endpoints already present in any existing endpoint
|
||||||
// slices (by address type) for the given service. It creates, updates, or deletes endpoint slices
|
// slices (by address type) for the given service. It creates, updates, or deletes endpoint slices
|
||||||
// to ensure the desired set of pods are represented by endpoint slices.
|
// to ensure the desired set of pods are represented by endpoint slices.
|
||||||
func (r *reconciler) reconcileByAddressType(service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time, addressType discovery.AddressType) error {
|
func (r *reconciler) reconcileByAddressType(logger klog.Logger, service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time, addressType discovery.AddressType) error {
|
||||||
errs := []error{}
|
errs := []error{}
|
||||||
|
|
||||||
slicesToCreate := []*discovery.EndpointSlice{}
|
slicesToCreate := []*discovery.EndpointSlice{}
|
||||||
@ -158,7 +158,7 @@ func (r *reconciler) reconcileByAddressType(service *corev1.Service, pods []*cor
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
endpointPorts := getEndpointPorts(service, pod)
|
endpointPorts := getEndpointPorts(logger, service, pod)
|
||||||
epHash := endpointutil.NewPortMapKey(endpointPorts)
|
epHash := endpointutil.NewPortMapKey(endpointPorts)
|
||||||
if _, ok := desiredEndpointsByPortMap[epHash]; !ok {
|
if _, ok := desiredEndpointsByPortMap[epHash]; !ok {
|
||||||
desiredEndpointsByPortMap[epHash] = endpointsliceutil.EndpointSet{}
|
desiredEndpointsByPortMap[epHash] = endpointsliceutil.EndpointSet{}
|
||||||
@ -186,7 +186,7 @@ func (r *reconciler) reconcileByAddressType(service *corev1.Service, pods []*cor
|
|||||||
// On the other side, if the service.Spec.PublishNotReadyAddresses is set we just add the
|
// On the other side, if the service.Spec.PublishNotReadyAddresses is set we just add the
|
||||||
// Pod, since the user is explicitly indicating that the Pod address should be published.
|
// Pod, since the user is explicitly indicating that the Pod address should be published.
|
||||||
if !service.Spec.PublishNotReadyAddresses {
|
if !service.Spec.PublishNotReadyAddresses {
|
||||||
klog.Warningf("skipping Pod %s for Service %s/%s: Node %s Not Found", pod.Name, service.Namespace, service.Name, pod.Spec.NodeName)
|
logger.Info("skipping Pod for Service, Node not found", "pod", klog.KObj(pod), "service", klog.KObj(service), "node", klog.KRef("", pod.Spec.NodeName))
|
||||||
errs = append(errs, fmt.Errorf("skipping Pod %s for Service %s/%s: Node %s Not Found", pod.Name, service.Namespace, service.Name, pod.Spec.NodeName))
|
errs = append(errs, fmt.Errorf("skipping Pod %s for Service %s/%s: Node %s Not Found", pod.Name, service.Namespace, service.Name, pod.Spec.NodeName))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@ -205,7 +205,7 @@ func (r *reconciler) reconcileByAddressType(service *corev1.Service, pods []*cor
|
|||||||
for portMap, desiredEndpoints := range desiredEndpointsByPortMap {
|
for portMap, desiredEndpoints := range desiredEndpointsByPortMap {
|
||||||
numEndpoints := len(desiredEndpoints)
|
numEndpoints := len(desiredEndpoints)
|
||||||
pmSlicesToCreate, pmSlicesToUpdate, pmSlicesToDelete, added, removed := r.reconcileByPortMapping(
|
pmSlicesToCreate, pmSlicesToUpdate, pmSlicesToDelete, added, removed := r.reconcileByPortMapping(
|
||||||
service, existingSlicesByPortMap[portMap], desiredEndpoints, desiredMetaByPortMap[portMap])
|
logger, service, existingSlicesByPortMap[portMap], desiredEndpoints, desiredMetaByPortMap[portMap])
|
||||||
|
|
||||||
totalAdded += added
|
totalAdded += added
|
||||||
totalRemoved += removed
|
totalRemoved += removed
|
||||||
@ -231,7 +231,7 @@ func (r *reconciler) reconcileByAddressType(service *corev1.Service, pods []*cor
|
|||||||
// When no endpoint slices would usually exist, we need to add a placeholder.
|
// When no endpoint slices would usually exist, we need to add a placeholder.
|
||||||
if len(existingSlices) == len(slicesToDelete) && len(slicesToCreate) < 1 {
|
if len(existingSlices) == len(slicesToDelete) && len(slicesToCreate) < 1 {
|
||||||
// Check for existing placeholder slice outside of the core control flow
|
// Check for existing placeholder slice outside of the core control flow
|
||||||
placeholderSlice := newEndpointSlice(service, &endpointMeta{ports: []discovery.EndpointPort{}, addressType: addressType})
|
placeholderSlice := newEndpointSlice(logger, service, &endpointMeta{ports: []discovery.EndpointPort{}, addressType: addressType})
|
||||||
if len(slicesToDelete) == 1 && placeholderSliceCompare.DeepEqual(slicesToDelete[0], placeholderSlice) {
|
if len(slicesToDelete) == 1 && placeholderSliceCompare.DeepEqual(slicesToDelete[0], placeholderSlice) {
|
||||||
// We are about to unnecessarily delete/recreate the placeholder, remove it now.
|
// We are about to unnecessarily delete/recreate the placeholder, remove it now.
|
||||||
slicesToDelete = slicesToDelete[:0]
|
slicesToDelete = slicesToDelete[:0]
|
||||||
@ -262,11 +262,11 @@ func (r *reconciler) reconcileByAddressType(service *corev1.Service, pods []*cor
|
|||||||
}
|
}
|
||||||
|
|
||||||
if r.topologyCache != nil && hintsEnabled(service.Annotations) {
|
if r.topologyCache != nil && hintsEnabled(service.Annotations) {
|
||||||
slicesToCreate, slicesToUpdate, events = r.topologyCache.AddHints(si)
|
slicesToCreate, slicesToUpdate, events = r.topologyCache.AddHints(logger, si)
|
||||||
} else {
|
} else {
|
||||||
if r.topologyCache != nil {
|
if r.topologyCache != nil {
|
||||||
if r.topologyCache.HasPopulatedHints(si.ServiceKey) {
|
if r.topologyCache.HasPopulatedHints(si.ServiceKey) {
|
||||||
klog.InfoS("TopologyAwareHints annotation has changed, removing hints", "serviceKey", si.ServiceKey, "addressType", si.AddressType)
|
logger.Info("TopologyAwareHints annotation has changed, removing hints", "serviceKey", si.ServiceKey, "addressType", si.AddressType)
|
||||||
events = append(events, &topologycache.EventBuilder{
|
events = append(events, &topologycache.EventBuilder{
|
||||||
EventType: corev1.EventTypeWarning,
|
EventType: corev1.EventTypeWarning,
|
||||||
Reason: "TopologyAwareHintsDisabled",
|
Reason: "TopologyAwareHintsDisabled",
|
||||||
@ -407,6 +407,7 @@ func (r *reconciler) finalize(
|
|||||||
// 3. If there still desired endpoints left, try to fit them into a previously
|
// 3. If there still desired endpoints left, try to fit them into a previously
|
||||||
// unchanged slice and/or create new ones.
|
// unchanged slice and/or create new ones.
|
||||||
func (r *reconciler) reconcileByPortMapping(
|
func (r *reconciler) reconcileByPortMapping(
|
||||||
|
logger klog.Logger,
|
||||||
service *corev1.Service,
|
service *corev1.Service,
|
||||||
existingSlices []*discovery.EndpointSlice,
|
existingSlices []*discovery.EndpointSlice,
|
||||||
desiredSet endpointsliceutil.EndpointSet,
|
desiredSet endpointsliceutil.EndpointSet,
|
||||||
@ -441,7 +442,7 @@ func (r *reconciler) reconcileByPortMapping(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// generate the slice labels and check if parent labels have changed
|
// generate the slice labels and check if parent labels have changed
|
||||||
labels, labelsChanged := setEndpointSliceLabels(existingSlice, service)
|
labels, labelsChanged := setEndpointSliceLabels(logger, existingSlice, service)
|
||||||
|
|
||||||
// If an endpoint was updated or removed, mark for update or delete
|
// If an endpoint was updated or removed, mark for update or delete
|
||||||
if endpointUpdated || len(existingSlice.Endpoints) != len(newEndpoints) {
|
if endpointUpdated || len(existingSlice.Endpoints) != len(newEndpoints) {
|
||||||
@ -514,7 +515,7 @@ func (r *reconciler) reconcileByPortMapping(
|
|||||||
|
|
||||||
// If we didn't find a sliceToFill, generate a new empty one.
|
// If we didn't find a sliceToFill, generate a new empty one.
|
||||||
if sliceToFill == nil {
|
if sliceToFill == nil {
|
||||||
sliceToFill = newEndpointSlice(service, endpointMeta)
|
sliceToFill = newEndpointSlice(logger, service, endpointMeta)
|
||||||
} else {
|
} else {
|
||||||
// deep copy required to modify this slice.
|
// deep copy required to modify this slice.
|
||||||
sliceToFill = sliceToFill.DeepCopy()
|
sliceToFill = sliceToFill.DeepCopy()
|
||||||
|
@ -38,6 +38,7 @@ import (
|
|||||||
k8stesting "k8s.io/client-go/testing"
|
k8stesting "k8s.io/client-go/testing"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/component-base/metrics/testutil"
|
"k8s.io/component-base/metrics/testutil"
|
||||||
|
"k8s.io/klog/v2/ktesting"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
"k8s.io/kubernetes/pkg/controller/endpointslice/metrics"
|
"k8s.io/kubernetes/pkg/controller/endpointslice/metrics"
|
||||||
"k8s.io/kubernetes/pkg/controller/endpointslice/topologycache"
|
"k8s.io/kubernetes/pkg/controller/endpointslice/topologycache"
|
||||||
@ -481,6 +482,7 @@ func TestReconcile1EndpointSlice(t *testing.T) {
|
|||||||
svc, epMeta := newServiceAndEndpointMeta("foo", namespace)
|
svc, epMeta := newServiceAndEndpointMeta("foo", namespace)
|
||||||
emptySlice := newEmptyEndpointSlice(1, namespace, epMeta, svc)
|
emptySlice := newEmptyEndpointSlice(1, namespace, epMeta, svc)
|
||||||
emptySlice.ObjectMeta.Labels = map[string]string{"bar": "baz"}
|
emptySlice.ObjectMeta.Labels = map[string]string{"bar": "baz"}
|
||||||
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
desc string
|
desc string
|
||||||
@ -495,7 +497,7 @@ func TestReconcile1EndpointSlice(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
desc: "Existing placeholder that's the same",
|
desc: "Existing placeholder that's the same",
|
||||||
existing: newEndpointSlice(&svc, &endpointMeta{ports: []discovery.EndpointPort{}, addressType: discovery.AddressTypeIPv4}),
|
existing: newEndpointSlice(logger, &svc, &endpointMeta{ports: []discovery.EndpointPort{}, addressType: discovery.AddressTypeIPv4}),
|
||||||
wantMetrics: expectedMetrics{desiredSlices: 1, actualSlices: 1, desiredEndpoints: 0, addedPerSync: 0, removedPerSync: 0, numCreated: 0, numUpdated: 0, numDeleted: 0, slicesChangedPerSync: 0},
|
wantMetrics: expectedMetrics{desiredSlices: 1, actualSlices: 1, desiredEndpoints: 0, addedPerSync: 0, removedPerSync: 0, numCreated: 0, numUpdated: 0, numDeleted: 0, slicesChangedPerSync: 0},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -1608,6 +1610,7 @@ func TestReconcilerPodMissingNode(t *testing.T) {
|
|||||||
client := newClientset()
|
client := newClientset()
|
||||||
setupMetrics()
|
setupMetrics()
|
||||||
r := newReconciler(client, tc.existingNodes, defaultMaxEndpointsPerSlice)
|
r := newReconciler(client, tc.existingNodes, defaultMaxEndpointsPerSlice)
|
||||||
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
|
||||||
svc := service.DeepCopy()
|
svc := service.DeepCopy()
|
||||||
svc.Spec.PublishNotReadyAddresses = tc.publishNotReady
|
svc.Spec.PublishNotReadyAddresses = tc.publishNotReady
|
||||||
@ -1620,7 +1623,7 @@ func TestReconcilerPodMissingNode(t *testing.T) {
|
|||||||
t.Errorf("Expected no error creating endpoint slice")
|
t.Errorf("Expected no error creating endpoint slice")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := r.reconcile(svc, pods, existingSlices, time.Now())
|
err := r.reconcile(logger, svc, pods, existingSlices, time.Now())
|
||||||
if err == nil && tc.expectError {
|
if err == nil && tc.expectError {
|
||||||
t.Errorf("Expected error but no error received")
|
t.Errorf("Expected error but no error received")
|
||||||
}
|
}
|
||||||
@ -1833,19 +1836,20 @@ func TestReconcileTopology(t *testing.T) {
|
|||||||
client := newClientset()
|
client := newClientset()
|
||||||
cmc := newCacheMutationCheck(tc.existingSlices)
|
cmc := newCacheMutationCheck(tc.existingSlices)
|
||||||
createEndpointSlices(t, client, ns, tc.existingSlices)
|
createEndpointSlices(t, client, ns, tc.existingSlices)
|
||||||
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
|
||||||
setupMetrics()
|
setupMetrics()
|
||||||
r := newReconciler(client, tc.nodes, defaultMaxEndpointsPerSlice)
|
r := newReconciler(client, tc.nodes, defaultMaxEndpointsPerSlice)
|
||||||
if tc.topologyCacheEnabled {
|
if tc.topologyCacheEnabled {
|
||||||
r.topologyCache = topologycache.NewTopologyCache()
|
r.topologyCache = topologycache.NewTopologyCache()
|
||||||
r.topologyCache.SetNodes(tc.nodes)
|
r.topologyCache.SetNodes(logger, tc.nodes)
|
||||||
}
|
}
|
||||||
|
|
||||||
service := svc.DeepCopy()
|
service := svc.DeepCopy()
|
||||||
service.Annotations = map[string]string{
|
service.Annotations = map[string]string{
|
||||||
corev1.DeprecatedAnnotationTopologyAwareHints: tc.hintsAnnotation,
|
corev1.DeprecatedAnnotationTopologyAwareHints: tc.hintsAnnotation,
|
||||||
}
|
}
|
||||||
r.reconcile(service, tc.pods, tc.existingSlices, time.Now())
|
r.reconcile(logger, service, tc.pods, tc.existingSlices, time.Now())
|
||||||
|
|
||||||
cmc.Check(t)
|
cmc.Check(t)
|
||||||
expectMetrics(t, tc.expectedMetrics)
|
expectMetrics(t, tc.expectedMetrics)
|
||||||
@ -2034,8 +2038,9 @@ func fetchEndpointSlices(t *testing.T, client *fake.Clientset, namespace string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func reconcileHelper(t *testing.T, r *reconciler, service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time) {
|
func reconcileHelper(t *testing.T, r *reconciler, service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time) {
|
||||||
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
t.Helper()
|
t.Helper()
|
||||||
err := r.reconcile(service, pods, existingSlices, triggerTime)
|
err := r.reconcile(logger, service, pods, existingSlices, triggerTime)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Expected no error reconciling Endpoint Slices, got: %v", err)
|
t.Fatalf("Expected no error reconciling Endpoint Slices, got: %v", err)
|
||||||
}
|
}
|
||||||
|
@ -88,12 +88,12 @@ func (t *TopologyCache) GetOverloadedServices() []string {
|
|||||||
|
|
||||||
// AddHints adds or updates topology hints on EndpointSlices and returns updated
|
// AddHints adds or updates topology hints on EndpointSlices and returns updated
|
||||||
// lists of EndpointSlices to create and update.
|
// lists of EndpointSlices to create and update.
|
||||||
func (t *TopologyCache) AddHints(si *SliceInfo) ([]*discovery.EndpointSlice, []*discovery.EndpointSlice, []*EventBuilder) {
|
func (t *TopologyCache) AddHints(logger klog.Logger, si *SliceInfo) ([]*discovery.EndpointSlice, []*discovery.EndpointSlice, []*EventBuilder) {
|
||||||
totalEndpoints := si.getTotalReadyEndpoints()
|
totalEndpoints := si.getTotalReadyEndpoints()
|
||||||
allocations, allocationsEvent := t.getAllocations(totalEndpoints)
|
allocations, allocationsEvent := t.getAllocations(totalEndpoints)
|
||||||
events := []*EventBuilder{}
|
events := []*EventBuilder{}
|
||||||
if allocationsEvent != nil {
|
if allocationsEvent != nil {
|
||||||
klog.InfoS(allocationsEvent.Message+", removing hints", "serviceKey", si.ServiceKey, "addressType", si.AddressType)
|
logger.Info(allocationsEvent.Message+", removing hints", "key", si.ServiceKey, "addressType", si.AddressType)
|
||||||
allocationsEvent.Message = FormatWithAddressType(allocationsEvent.Message, si.AddressType)
|
allocationsEvent.Message = FormatWithAddressType(allocationsEvent.Message, si.AddressType)
|
||||||
events = append(events, allocationsEvent)
|
events = append(events, allocationsEvent)
|
||||||
t.RemoveHints(si.ServiceKey, si.AddressType)
|
t.RemoveHints(si.ServiceKey, si.AddressType)
|
||||||
@ -116,7 +116,7 @@ func (t *TopologyCache) AddHints(si *SliceInfo) ([]*discovery.EndpointSlice, []*
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if endpoint.Zone == nil || *endpoint.Zone == "" {
|
if endpoint.Zone == nil || *endpoint.Zone == "" {
|
||||||
klog.InfoS("Endpoint found without zone specified, removing hints", "serviceKey", si.ServiceKey, "addressType", si.AddressType)
|
logger.Info("Endpoint found without zone specified, removing hints", "key", si.ServiceKey, "addressType", si.AddressType)
|
||||||
events = append(events, &EventBuilder{
|
events = append(events, &EventBuilder{
|
||||||
EventType: v1.EventTypeWarning,
|
EventType: v1.EventTypeWarning,
|
||||||
Reason: "TopologyAwareHintsDisabled",
|
Reason: "TopologyAwareHintsDisabled",
|
||||||
@ -136,14 +136,14 @@ func (t *TopologyCache) AddHints(si *SliceInfo) ([]*discovery.EndpointSlice, []*
|
|||||||
givingZones, receivingZones := getGivingAndReceivingZones(allocations, allocatedHintsByZone)
|
givingZones, receivingZones := getGivingAndReceivingZones(allocations, allocatedHintsByZone)
|
||||||
|
|
||||||
// step 3. Redistribute endpoints based on data from step 2.
|
// step 3. Redistribute endpoints based on data from step 2.
|
||||||
redistributions := redistributeHints(allocatableSlices, givingZones, receivingZones)
|
redistributions := redistributeHints(logger, allocatableSlices, givingZones, receivingZones)
|
||||||
|
|
||||||
for zone, diff := range redistributions {
|
for zone, diff := range redistributions {
|
||||||
allocatedHintsByZone[zone] += diff
|
allocatedHintsByZone[zone] += diff
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(allocatedHintsByZone) == 0 {
|
if len(allocatedHintsByZone) == 0 {
|
||||||
klog.V(2).InfoS("No hints allocated for zones, removing them", "serviceKey", si.ServiceKey, "addressType", si.AddressType)
|
logger.V(2).Info("No hints allocated for zones, removing them", "key", si.ServiceKey, "addressType", si.AddressType)
|
||||||
events = append(events, &EventBuilder{
|
events = append(events, &EventBuilder{
|
||||||
EventType: v1.EventTypeWarning,
|
EventType: v1.EventTypeWarning,
|
||||||
Reason: "TopologyAwareHintsDisabled",
|
Reason: "TopologyAwareHintsDisabled",
|
||||||
@ -159,7 +159,7 @@ func (t *TopologyCache) AddHints(si *SliceInfo) ([]*discovery.EndpointSlice, []*
|
|||||||
|
|
||||||
// if hints were not enabled before, we publish an event to indicate we enabled them.
|
// if hints were not enabled before, we publish an event to indicate we enabled them.
|
||||||
if !hintsEnabled {
|
if !hintsEnabled {
|
||||||
klog.InfoS("Topology Aware Hints has been enabled, adding hints.", "serviceKey", si.ServiceKey, "addressType", si.AddressType)
|
logger.Info("Topology Aware Hints has been enabled, adding hints.", "key", si.ServiceKey, "addressType", si.AddressType)
|
||||||
events = append(events, &EventBuilder{
|
events = append(events, &EventBuilder{
|
||||||
EventType: v1.EventTypeNormal,
|
EventType: v1.EventTypeNormal,
|
||||||
Reason: "TopologyAwareHintsEnabled",
|
Reason: "TopologyAwareHintsEnabled",
|
||||||
@ -201,7 +201,7 @@ func (t *TopologyCache) RemoveHints(serviceKey string, addrType discovery.Addres
|
|||||||
}
|
}
|
||||||
|
|
||||||
// SetNodes updates the Node distribution for the TopologyCache.
|
// SetNodes updates the Node distribution for the TopologyCache.
|
||||||
func (t *TopologyCache) SetNodes(nodes []*v1.Node) {
|
func (t *TopologyCache) SetNodes(logger klog.Logger, nodes []*v1.Node) {
|
||||||
cpuByZone := map[string]*resource.Quantity{}
|
cpuByZone := map[string]*resource.Quantity{}
|
||||||
sufficientNodeInfo := true
|
sufficientNodeInfo := true
|
||||||
|
|
||||||
@ -209,11 +209,11 @@ func (t *TopologyCache) SetNodes(nodes []*v1.Node) {
|
|||||||
|
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
if hasExcludedLabels(node.Labels) {
|
if hasExcludedLabels(node.Labels) {
|
||||||
klog.V(2).Infof("Ignoring node %s because it has an excluded label", node.Name)
|
logger.V(2).Info("Ignoring node because it has an excluded label", "node", klog.KObj(node))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if !isNodeReady(node) {
|
if !isNodeReady(node) {
|
||||||
klog.V(2).Infof("Ignoring node %s because it is not ready: %v", node.Name, node.Status.Conditions)
|
logger.V(2).Info("Ignoring node because it is not ready", "node", klog.KObj(node))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -229,7 +229,7 @@ func (t *TopologyCache) SetNodes(nodes []*v1.Node) {
|
|||||||
if !ok || zone == "" || nodeCPU.IsZero() {
|
if !ok || zone == "" || nodeCPU.IsZero() {
|
||||||
cpuByZone = map[string]*resource.Quantity{}
|
cpuByZone = map[string]*resource.Quantity{}
|
||||||
sufficientNodeInfo = false
|
sufficientNodeInfo = false
|
||||||
klog.Warningf("Can't get CPU or zone information for %s node", node.Name)
|
logger.Info("Can't get CPU or zone information for node", "node", klog.KObj(node))
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -245,7 +245,7 @@ func (t *TopologyCache) SetNodes(nodes []*v1.Node) {
|
|||||||
defer t.lock.Unlock()
|
defer t.lock.Unlock()
|
||||||
|
|
||||||
if totalCPU.IsZero() || !sufficientNodeInfo || len(cpuByZone) < 2 {
|
if totalCPU.IsZero() || !sufficientNodeInfo || len(cpuByZone) < 2 {
|
||||||
klog.V(2).Infof("Insufficient node info for topology hints (%d zones, %s CPU, %t)", len(cpuByZone), totalCPU.String(), sufficientNodeInfo)
|
logger.V(2).Info("Insufficient node info for topology hints", "totalZones", len(cpuByZone), "totalCPU", totalCPU.String(), "sufficientNodeInfo", sufficientNodeInfo)
|
||||||
t.sufficientNodeInfo = false
|
t.sufficientNodeInfo = false
|
||||||
t.cpuByZone = nil
|
t.cpuByZone = nil
|
||||||
t.cpuRatiosByZone = nil
|
t.cpuRatiosByZone = nil
|
||||||
|
@ -27,6 +27,7 @@ import (
|
|||||||
discovery "k8s.io/api/discovery/v1"
|
discovery "k8s.io/api/discovery/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/klog/v2/ktesting"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -382,7 +383,8 @@ func TestAddHints(t *testing.T) {
|
|||||||
cache := NewTopologyCache()
|
cache := NewTopologyCache()
|
||||||
cache.cpuRatiosByZone = tc.cpuRatiosByZone
|
cache.cpuRatiosByZone = tc.cpuRatiosByZone
|
||||||
|
|
||||||
slicesToCreate, slicesToUpdate, events := cache.AddHints(tc.sliceInfo)
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
slicesToCreate, slicesToUpdate, events := cache.AddHints(logger, tc.sliceInfo)
|
||||||
|
|
||||||
expectEquivalentSlices(t, slicesToCreate, tc.expectedSlicesToCreate)
|
expectEquivalentSlices(t, slicesToCreate, tc.expectedSlicesToCreate)
|
||||||
expectEquivalentSlices(t, slicesToUpdate, tc.expectedSlicesToUpdate)
|
expectEquivalentSlices(t, slicesToUpdate, tc.expectedSlicesToUpdate)
|
||||||
@ -580,7 +582,8 @@ func TestSetNodes(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
cache.SetNodes(nodes)
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
cache.SetNodes(logger, nodes)
|
||||||
|
|
||||||
if cache.sufficientNodeInfo != tc.expectSufficientNodeInfo {
|
if cache.sufficientNodeInfo != tc.expectSufficientNodeInfo {
|
||||||
t.Errorf("Expected sufficientNodeInfo to be %t, got %t", tc.expectSufficientNodeInfo, cache.sufficientNodeInfo)
|
t.Errorf("Expected sufficientNodeInfo to be %t, got %t", tc.expectSufficientNodeInfo, cache.sufficientNodeInfo)
|
||||||
@ -680,11 +683,12 @@ func TestTopologyCacheRace(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
go func() {
|
go func() {
|
||||||
cache.SetNodes(nodes)
|
cache.SetNodes(logger, nodes)
|
||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
cache.AddHints(sliceInfo)
|
cache.AddHints(logger, sliceInfo)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ func FormatWithAddressType(s string, addressType discovery.AddressType) string {
|
|||||||
// It allocates endpoints from the provided givingZones to the provided
|
// It allocates endpoints from the provided givingZones to the provided
|
||||||
// receivingZones. This returns a map that represents the changes in allocated
|
// receivingZones. This returns a map that represents the changes in allocated
|
||||||
// endpoints by zone.
|
// endpoints by zone.
|
||||||
func redistributeHints(slices []*discovery.EndpointSlice, givingZones, receivingZones map[string]int) map[string]int {
|
func redistributeHints(logger klog.Logger, slices []*discovery.EndpointSlice, givingZones, receivingZones map[string]int) map[string]int {
|
||||||
redistributions := map[string]int{}
|
redistributions := map[string]int{}
|
||||||
|
|
||||||
for _, slice := range slices {
|
for _, slice := range slices {
|
||||||
@ -91,7 +91,7 @@ func redistributeHints(slices []*discovery.EndpointSlice, givingZones, receiving
|
|||||||
}
|
}
|
||||||
if endpoint.Zone == nil || *endpoint.Zone == "" {
|
if endpoint.Zone == nil || *endpoint.Zone == "" {
|
||||||
// This should always be caught earlier in AddHints()
|
// This should always be caught earlier in AddHints()
|
||||||
klog.Warningf("Endpoint found without zone specified")
|
logger.Info("Endpoint found without zone specified")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
discovery "k8s.io/api/discovery/v1"
|
discovery "k8s.io/api/discovery/v1"
|
||||||
|
"k8s.io/klog/v2/ktesting"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -73,7 +74,8 @@ func Test_redistributeHints(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
actualRedistributions := redistributeHints(tc.slices, tc.givingZones, tc.receivingZones)
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
actualRedistributions := redistributeHints(logger, tc.slices, tc.givingZones, tc.receivingZones)
|
||||||
|
|
||||||
if len(actualRedistributions) != len(tc.expectedRedistributions) {
|
if len(actualRedistributions) != len(tc.expectedRedistributions) {
|
||||||
t.Fatalf("Expected redistributions for %d zones, got %d (%+v)", len(tc.expectedRedistributions), len(actualRedistributions), actualRedistributions)
|
t.Fatalf("Expected redistributions for %d zones, got %d (%+v)", len(tc.expectedRedistributions), len(actualRedistributions), actualRedistributions)
|
||||||
|
@ -77,7 +77,7 @@ func podToEndpoint(pod *v1.Pod, node *v1.Node, service *v1.Service, addressType
|
|||||||
|
|
||||||
// getEndpointPorts returns a list of EndpointPorts generated from a Service
|
// getEndpointPorts returns a list of EndpointPorts generated from a Service
|
||||||
// and Pod.
|
// and Pod.
|
||||||
func getEndpointPorts(service *v1.Service, pod *v1.Pod) []discovery.EndpointPort {
|
func getEndpointPorts(logger klog.Logger, service *v1.Service, pod *v1.Pod) []discovery.EndpointPort {
|
||||||
endpointPorts := []discovery.EndpointPort{}
|
endpointPorts := []discovery.EndpointPort{}
|
||||||
|
|
||||||
// Allow headless service not to have ports.
|
// Allow headless service not to have ports.
|
||||||
@ -92,7 +92,7 @@ func getEndpointPorts(service *v1.Service, pod *v1.Pod) []discovery.EndpointPort
|
|||||||
portProto := servicePort.Protocol
|
portProto := servicePort.Protocol
|
||||||
portNum, err := podutil.FindPort(pod, servicePort)
|
portNum, err := podutil.FindPort(pod, servicePort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.V(4).Infof("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
|
logger.V(4).Info("Failed to find port for service", "service", klog.KObj(service), "err", err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,7 +128,7 @@ func getEndpointAddresses(podStatus v1.PodStatus, service *v1.Service, addressTy
|
|||||||
|
|
||||||
// newEndpointSlice returns an EndpointSlice generated from a service and
|
// newEndpointSlice returns an EndpointSlice generated from a service and
|
||||||
// endpointMeta.
|
// endpointMeta.
|
||||||
func newEndpointSlice(service *v1.Service, endpointMeta *endpointMeta) *discovery.EndpointSlice {
|
func newEndpointSlice(logger klog.Logger, service *v1.Service, endpointMeta *endpointMeta) *discovery.EndpointSlice {
|
||||||
gvk := schema.GroupVersionKind{Version: "v1", Kind: "Service"}
|
gvk := schema.GroupVersionKind{Version: "v1", Kind: "Service"}
|
||||||
ownerRef := metav1.NewControllerRef(service, gvk)
|
ownerRef := metav1.NewControllerRef(service, gvk)
|
||||||
epSlice := &discovery.EndpointSlice{
|
epSlice := &discovery.EndpointSlice{
|
||||||
@ -143,7 +143,7 @@ func newEndpointSlice(service *v1.Service, endpointMeta *endpointMeta) *discover
|
|||||||
Endpoints: []discovery.Endpoint{},
|
Endpoints: []discovery.Endpoint{},
|
||||||
}
|
}
|
||||||
// add parent service labels
|
// add parent service labels
|
||||||
epSlice.Labels, _ = setEndpointSliceLabels(epSlice, service)
|
epSlice.Labels, _ = setEndpointSliceLabels(logger, epSlice, service)
|
||||||
|
|
||||||
return epSlice
|
return epSlice
|
||||||
}
|
}
|
||||||
@ -238,7 +238,7 @@ func serviceControllerKey(endpointSlice *discovery.EndpointSlice) (string, error
|
|||||||
// setEndpointSliceLabels returns a map with the new endpoint slices labels and true if there was an update.
|
// setEndpointSliceLabels returns a map with the new endpoint slices labels and true if there was an update.
|
||||||
// Slices labels must be equivalent to the Service labels except for the reserved IsHeadlessService, LabelServiceName and LabelManagedBy labels
|
// Slices labels must be equivalent to the Service labels except for the reserved IsHeadlessService, LabelServiceName and LabelManagedBy labels
|
||||||
// Changes to IsHeadlessService, LabelServiceName and LabelManagedBy labels on the Service do not result in updates to EndpointSlice labels.
|
// Changes to IsHeadlessService, LabelServiceName and LabelManagedBy labels on the Service do not result in updates to EndpointSlice labels.
|
||||||
func setEndpointSliceLabels(epSlice *discovery.EndpointSlice, service *v1.Service) (map[string]string, bool) {
|
func setEndpointSliceLabels(logger klog.Logger, epSlice *discovery.EndpointSlice, service *v1.Service) (map[string]string, bool) {
|
||||||
updated := false
|
updated := false
|
||||||
epLabels := make(map[string]string)
|
epLabels := make(map[string]string)
|
||||||
svcLabels := make(map[string]string)
|
svcLabels := make(map[string]string)
|
||||||
@ -255,7 +255,7 @@ func setEndpointSliceLabels(epSlice *discovery.EndpointSlice, service *v1.Servic
|
|||||||
|
|
||||||
for key, value := range service.Labels {
|
for key, value := range service.Labels {
|
||||||
if isReservedLabelKey(key) {
|
if isReservedLabelKey(key) {
|
||||||
klog.Warningf("Service %s/%s using reserved endpoint slices label, skipping label %s: %s", service.Namespace, service.Name, key, value)
|
logger.Info("Service using reserved endpoint slices label", "service", klog.KObj(service), "skipping", key, "label", value)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// copy service labels
|
// copy service labels
|
||||||
@ -302,7 +302,7 @@ func (sl endpointSliceEndpointLen) Less(i, j int) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// returns a map of address types used by a service
|
// returns a map of address types used by a service
|
||||||
func getAddressTypesForService(service *v1.Service) sets.Set[discovery.AddressType] {
|
func getAddressTypesForService(logger klog.Logger, service *v1.Service) sets.Set[discovery.AddressType] {
|
||||||
serviceSupportedAddresses := sets.New[discovery.AddressType]()
|
serviceSupportedAddresses := sets.New[discovery.AddressType]()
|
||||||
// TODO: (khenidak) when address types are removed in favor of
|
// TODO: (khenidak) when address types are removed in favor of
|
||||||
// v1.IPFamily this will need to be removed, and work directly with
|
// v1.IPFamily this will need to be removed, and work directly with
|
||||||
@ -345,7 +345,7 @@ func getAddressTypesForService(service *v1.Service) sets.Set[discovery.AddressTy
|
|||||||
addrType = discovery.AddressTypeIPv6
|
addrType = discovery.AddressTypeIPv6
|
||||||
}
|
}
|
||||||
serviceSupportedAddresses.Insert(addrType)
|
serviceSupportedAddresses.Insert(addrType)
|
||||||
klog.V(2).Infof("couldn't find ipfamilies for service: %v/%v. This could happen if controller manager is connected to an old apiserver that does not support ip families yet. EndpointSlices for this Service will use %s as the IP Family based on familyOf(ClusterIP:%v).", service.Namespace, service.Name, addrType, service.Spec.ClusterIP)
|
logger.V(2).Info("Couldn't find ipfamilies for service. This could happen if controller manager is connected to an old apiserver that does not support ip families yet. EndpointSlices for this Service will use addressType as the IP Family based on familyOf(ClusterIP).", "service", klog.KObj(service), "addressType", addrType, "clusterIP", service.Spec.ClusterIP)
|
||||||
return serviceSupportedAddresses
|
return serviceSupportedAddresses
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -356,7 +356,7 @@ func getAddressTypesForService(service *v1.Service) sets.Set[discovery.AddressTy
|
|||||||
// since kubelet will need to restart in order to start patching pod status with multiple ips
|
// since kubelet will need to restart in order to start patching pod status with multiple ips
|
||||||
serviceSupportedAddresses.Insert(discovery.AddressTypeIPv4)
|
serviceSupportedAddresses.Insert(discovery.AddressTypeIPv4)
|
||||||
serviceSupportedAddresses.Insert(discovery.AddressTypeIPv6)
|
serviceSupportedAddresses.Insert(discovery.AddressTypeIPv6)
|
||||||
klog.V(2).Infof("couldn't find ipfamilies for headless service: %v/%v likely because controller manager is likely connected to an old apiserver that does not support ip families yet. The service endpoint slice will use dual stack families until api-server default it correctly", service.Namespace, service.Name)
|
logger.V(2).Info("Couldn't find ipfamilies for headless service, likely because controller manager is likely connected to an old apiserver that does not support ip families yet. The service endpoint slice will use dual stack families until api-server default it correctly", "service", klog.KObj(service))
|
||||||
return serviceSupportedAddresses
|
return serviceSupportedAddresses
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,6 +33,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/rand"
|
"k8s.io/apimachinery/pkg/util/rand"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
k8stesting "k8s.io/client-go/testing"
|
k8stesting "k8s.io/client-go/testing"
|
||||||
|
"k8s.io/klog/v2/ktesting"
|
||||||
"k8s.io/utils/pointer"
|
"k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -206,8 +207,9 @@ func TestNewEndpointSlice(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
svc := tc.updateSvc(service)
|
svc := tc.updateSvc(service)
|
||||||
generatedSlice := newEndpointSlice(&svc, &endpointMeta)
|
generatedSlice := newEndpointSlice(logger, &svc, &endpointMeta)
|
||||||
assert.EqualValues(t, tc.expectedSlice, generatedSlice)
|
assert.EqualValues(t, tc.expectedSlice, generatedSlice)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -587,7 +589,8 @@ func TestGetEndpointPorts(t *testing.T) {
|
|||||||
|
|
||||||
for name, tc := range testCases {
|
for name, tc := range testCases {
|
||||||
t.Run(name, func(t *testing.T) {
|
t.Run(name, func(t *testing.T) {
|
||||||
actualPorts := getEndpointPorts(tc.service, tc.pod)
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
actualPorts := getEndpointPorts(logger, tc.service, tc.pod)
|
||||||
|
|
||||||
if len(actualPorts) != len(tc.expectedPorts) {
|
if len(actualPorts) != len(tc.expectedPorts) {
|
||||||
t.Fatalf("Expected %d ports, got %d", len(tc.expectedPorts), len(actualPorts))
|
t.Fatalf("Expected %d ports, got %d", len(tc.expectedPorts), len(actualPorts))
|
||||||
@ -875,8 +878,9 @@ func TestSetEndpointSliceLabels(t *testing.T) {
|
|||||||
|
|
||||||
for _, tc := range testCases {
|
for _, tc := range testCases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
svc := tc.updateSvc(service)
|
svc := tc.updateSvc(service)
|
||||||
labels, updated := setEndpointSliceLabels(tc.epSlice, &svc)
|
labels, updated := setEndpointSliceLabels(logger, tc.epSlice, &svc)
|
||||||
assert.EqualValues(t, updated, tc.expectedUpdate)
|
assert.EqualValues(t, updated, tc.expectedUpdate)
|
||||||
assert.EqualValues(t, tc.expectedLabels, labels)
|
assert.EqualValues(t, tc.expectedLabels, labels)
|
||||||
})
|
})
|
||||||
@ -1113,7 +1117,8 @@ func TestSupportedServiceAddressType(t *testing.T) {
|
|||||||
|
|
||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
t.Run(testCase.name, func(t *testing.T) {
|
t.Run(testCase.name, func(t *testing.T) {
|
||||||
addressTypes := getAddressTypesForService(&testCase.service)
|
logger, _ := ktesting.NewTestContext(t)
|
||||||
|
addressTypes := getAddressTypesForService(logger, &testCase.service)
|
||||||
if len(addressTypes) != len(testCase.expectedAddressTypes) {
|
if len(addressTypes) != len(testCase.expectedAddressTypes) {
|
||||||
t.Fatalf("expected count address types %v got %v", len(testCase.expectedAddressTypes), len(addressTypes))
|
t.Fatalf("expected count address types %v got %v", len(testCase.expectedAddressTypes), len(addressTypes))
|
||||||
}
|
}
|
||||||
|
@ -100,6 +100,7 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
1*time.Second)
|
1*time.Second)
|
||||||
|
|
||||||
epsController := endpointslice.NewController(
|
epsController := endpointslice.NewController(
|
||||||
|
ctx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
informers.Core().V1().Nodes(),
|
informers.Core().V1().Nodes(),
|
||||||
@ -112,7 +113,7 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
informers.Start(ctx.Done())
|
informers.Start(ctx.Done())
|
||||||
// use only one worker to serialize the updates
|
// use only one worker to serialize the updates
|
||||||
go epController.Run(ctx, 1)
|
go epController.Run(ctx, 1)
|
||||||
go epsController.Run(1, ctx.Done())
|
go epsController.Run(ctx, 1)
|
||||||
|
|
||||||
var testcases = []struct {
|
var testcases = []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -47,6 +47,7 @@ func TestEndpointSliceMirroring(t *testing.T) {
|
|||||||
t.Fatalf("Error creating clientset: %v", err)
|
t.Fatalf("Error creating clientset: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(client, resyncPeriod)
|
informers := informers.NewSharedInformerFactory(client, resyncPeriod)
|
||||||
|
|
||||||
@ -58,6 +59,7 @@ func TestEndpointSliceMirroring(t *testing.T) {
|
|||||||
1*time.Second)
|
1*time.Second)
|
||||||
|
|
||||||
epsController := endpointslice.NewController(
|
epsController := endpointslice.NewController(
|
||||||
|
ctx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
informers.Core().V1().Nodes(),
|
informers.Core().V1().Nodes(),
|
||||||
@ -75,11 +77,10 @@ func TestEndpointSliceMirroring(t *testing.T) {
|
|||||||
1*time.Second)
|
1*time.Second)
|
||||||
|
|
||||||
// Start informer and controllers
|
// Start informer and controllers
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
defer cancel()
|
||||||
informers.Start(ctx.Done())
|
informers.Start(ctx.Done())
|
||||||
go epController.Run(ctx, 5)
|
go epController.Run(ctx, 5)
|
||||||
go epsController.Run(5, ctx.Done())
|
go epsController.Run(ctx, 5)
|
||||||
go epsmController.Run(5, ctx.Done())
|
go epsmController.Run(5, ctx.Done())
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
|
@ -29,6 +29,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/klog/v2/ktesting"
|
||||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
"k8s.io/kubernetes/pkg/controller/endpointslice"
|
"k8s.io/kubernetes/pkg/controller/endpointslice"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
@ -115,7 +116,9 @@ func TestEndpointSliceTerminating(t *testing.T) {
|
|||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(client, resyncPeriod)
|
informers := informers.NewSharedInformerFactory(client, resyncPeriod)
|
||||||
|
|
||||||
|
_, ctx := ktesting.NewTestContext(t)
|
||||||
epsController := endpointslice.NewController(
|
epsController := endpointslice.NewController(
|
||||||
|
ctx,
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Core().V1().Services(),
|
informers.Core().V1().Services(),
|
||||||
informers.Core().V1().Nodes(),
|
informers.Core().V1().Nodes(),
|
||||||
@ -125,10 +128,10 @@ func TestEndpointSliceTerminating(t *testing.T) {
|
|||||||
1*time.Second)
|
1*time.Second)
|
||||||
|
|
||||||
// Start informer and controllers
|
// Start informer and controllers
|
||||||
stopCh := make(chan struct{})
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
defer close(stopCh)
|
defer cancel()
|
||||||
informers.Start(stopCh)
|
informers.Start(ctx.Done())
|
||||||
go epsController.Run(1, stopCh)
|
go epsController.Run(ctx, 1)
|
||||||
|
|
||||||
// Create namespace
|
// Create namespace
|
||||||
ns := framework.CreateNamespaceOrDie(client, "test-endpoints-terminating", t)
|
ns := framework.CreateNamespaceOrDie(client, "test-endpoints-terminating", t)
|
||||||
|
Loading…
Reference in New Issue
Block a user