mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 12:15:52 +00:00
Merge pull request #99997 from JornShen/extract_ep_and_epm_share_code_to_pkg
Extracting same code of endpointslice and endpointslicemirror into a new shared EndpointSlice package
This commit is contained in:
commit
6acc62da75
@ -45,6 +45,7 @@ import (
|
||||
endpointslicemetrics "k8s.io/kubernetes/pkg/controller/endpointslice/metrics"
|
||||
"k8s.io/kubernetes/pkg/controller/endpointslice/topologycache"
|
||||
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
|
||||
endpointsliceutil "k8s.io/kubernetes/pkg/controller/util/endpointslice"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
@ -141,7 +142,7 @@ func NewController(podInformer coreinformers.PodInformer,
|
||||
|
||||
c.endpointSliceLister = endpointSliceInformer.Lister()
|
||||
c.endpointSlicesSynced = endpointSliceInformer.Informer().HasSynced
|
||||
c.endpointSliceTracker = newEndpointSliceTracker()
|
||||
c.endpointSliceTracker = endpointsliceutil.NewEndpointSliceTracker()
|
||||
|
||||
c.maxEndpointsPerSlice = maxEndpointsPerSlice
|
||||
|
||||
@ -204,7 +205,7 @@ type Controller struct {
|
||||
// endpointSliceTracker tracks the list of EndpointSlices and associated
|
||||
// resource versions expected for each Service. It can help determine if a
|
||||
// cached EndpointSlice is out of date.
|
||||
endpointSliceTracker *endpointSliceTracker
|
||||
endpointSliceTracker *endpointsliceutil.EndpointSliceTracker
|
||||
|
||||
// nodeLister is able to list/get nodes and is populated by the
|
||||
// shared informer passed to NewController
|
||||
@ -368,7 +369,7 @@ func (c *Controller) syncService(key string) error {
|
||||
}
|
||||
|
||||
if c.endpointSliceTracker.StaleSlices(service, endpointSlices) {
|
||||
return &StaleInformerCache{"EndpointSlice informer cache is out of date"}
|
||||
return endpointsliceutil.NewStaleInformerCache("EndpointSlice informer cache is out of date")
|
||||
}
|
||||
|
||||
// We call ComputeEndpointLastChangeTriggerTime here to make sure that the
|
||||
@ -550,7 +551,7 @@ func (c *Controller) checkNodeTopologyDistribution() {
|
||||
func trackSync(err error) {
|
||||
metricLabel := "success"
|
||||
if err != nil {
|
||||
if isStaleInformerCacheErr(err) {
|
||||
if endpointsliceutil.IsStaleInformerCacheErr(err) {
|
||||
metricLabel = "stale"
|
||||
} else {
|
||||
metricLabel = "error"
|
||||
|
@ -44,6 +44,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/endpointslice/topologycache"
|
||||
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
|
||||
endpointsliceutil "k8s.io/kubernetes/pkg/controller/util/endpointslice"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
@ -1522,7 +1523,7 @@ func TestSyncServiceStaleInformer(t *testing.T) {
|
||||
|
||||
err = esController.syncService(fmt.Sprintf("%s/%s", ns, serviceName))
|
||||
// Check if we got a StaleInformerCache error
|
||||
if isStaleInformerCacheErr(err) != testcase.expectError {
|
||||
if endpointsliceutil.IsStaleInformerCacheErr(err) != testcase.expectError {
|
||||
t.Fatalf("Expected error because informer cache is outdated")
|
||||
}
|
||||
|
||||
|
@ -36,6 +36,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller/endpointslice/metrics"
|
||||
"k8s.io/kubernetes/pkg/controller/endpointslice/topologycache"
|
||||
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
|
||||
endpointsliceutil "k8s.io/kubernetes/pkg/controller/util/endpointslice"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
@ -45,7 +46,7 @@ type reconciler struct {
|
||||
client clientset.Interface
|
||||
nodeLister corelisters.NodeLister
|
||||
maxEndpointsPerSlice int32
|
||||
endpointSliceTracker *endpointSliceTracker
|
||||
endpointSliceTracker *endpointsliceutil.EndpointSliceTracker
|
||||
metricsCache *metrics.Cache
|
||||
// topologyCache tracks the distribution of Nodes and endpoints across zones
|
||||
// to enable TopologyAwareHints.
|
||||
@ -148,7 +149,7 @@ func (r *reconciler) reconcileByAddressType(service *corev1.Service, pods []*cor
|
||||
|
||||
// Build data structures for desired state.
|
||||
desiredMetaByPortMap := map[endpointutil.PortMapKey]*endpointMeta{}
|
||||
desiredEndpointsByPortMap := map[endpointutil.PortMapKey]endpointSet{}
|
||||
desiredEndpointsByPortMap := map[endpointutil.PortMapKey]endpointsliceutil.EndpointSet{}
|
||||
numDesiredEndpoints := 0
|
||||
|
||||
for _, pod := range pods {
|
||||
@ -160,7 +161,7 @@ func (r *reconciler) reconcileByAddressType(service *corev1.Service, pods []*cor
|
||||
endpointPorts := getEndpointPorts(service, pod)
|
||||
epHash := endpointutil.NewPortMapKey(endpointPorts)
|
||||
if _, ok := desiredEndpointsByPortMap[epHash]; !ok {
|
||||
desiredEndpointsByPortMap[epHash] = endpointSet{}
|
||||
desiredEndpointsByPortMap[epHash] = endpointsliceutil.EndpointSet{}
|
||||
}
|
||||
|
||||
if _, ok := desiredMetaByPortMap[epHash]; !ok {
|
||||
@ -349,7 +350,7 @@ func (r *reconciler) finalize(
|
||||
func (r *reconciler) reconcileByPortMapping(
|
||||
service *corev1.Service,
|
||||
existingSlices []*discovery.EndpointSlice,
|
||||
desiredSet endpointSet,
|
||||
desiredSet endpointsliceutil.EndpointSet,
|
||||
endpointMeta *endpointMeta,
|
||||
) ([]*discovery.EndpointSlice, []*discovery.EndpointSlice, []*discovery.EndpointSlice, int, int) {
|
||||
slicesByName := map[string]*discovery.EndpointSlice{}
|
||||
|
@ -43,6 +43,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/endpointslice/metrics"
|
||||
"k8s.io/kubernetes/pkg/controller/endpointslice/topologycache"
|
||||
endpointsliceutil "k8s.io/kubernetes/pkg/controller/util/endpointslice"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
@ -1597,7 +1598,7 @@ func newReconciler(client *fake.Clientset, nodes []*corev1.Node, maxEndpointsPer
|
||||
client: client,
|
||||
nodeLister: corelisters.NewNodeLister(indexer),
|
||||
maxEndpointsPerSlice: maxEndpointsPerSlice,
|
||||
endpointSliceTracker: newEndpointSliceTracker(),
|
||||
endpointSliceTracker: endpointsliceutil.NewEndpointSliceTracker(),
|
||||
metricsCache: metrics.NewCache(maxEndpointsPerSlice),
|
||||
}
|
||||
}
|
||||
@ -1670,8 +1671,8 @@ func expectActions(t *testing.T, actions []k8stesting.Action, num int, verb, res
|
||||
}
|
||||
}
|
||||
|
||||
func expectTrackedGeneration(t *testing.T, tracker *endpointSliceTracker, slice *discovery.EndpointSlice, expectedGeneration int64) {
|
||||
gfs, ok := tracker.generationsForSliceUnsafe(slice)
|
||||
func expectTrackedGeneration(t *testing.T, tracker *endpointsliceutil.EndpointSliceTracker, slice *discovery.EndpointSlice, expectedGeneration int64) {
|
||||
gfs, ok := tracker.GenerationsForSliceUnsafe(slice)
|
||||
if !ok {
|
||||
t.Fatalf("Expected Service to be tracked for EndpointSlices %s", slice.Name)
|
||||
}
|
||||
|
@ -394,3 +394,16 @@ func hintsEnabled(annotations map[string]string) bool {
|
||||
}
|
||||
return val == "Auto" || val == "auto"
|
||||
}
|
||||
|
||||
// managedByChanged returns true if one of the provided EndpointSlices is
|
||||
// managed by the EndpointSlice controller while the other is not.
|
||||
func managedByChanged(endpointSlice1, endpointSlice2 *discovery.EndpointSlice) bool {
|
||||
return managedByController(endpointSlice1) != managedByController(endpointSlice2)
|
||||
}
|
||||
|
||||
// managedByController returns true if the controller of the provided
|
||||
// EndpointSlices is the EndpointSlice controller.
|
||||
func managedByController(endpointSlice *discovery.EndpointSlice) bool {
|
||||
managedBy, _ := endpointSlice.Labels[discovery.LabelManagedBy]
|
||||
return managedBy == controllerName
|
||||
}
|
||||
|
@ -1,96 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpointslicemirroring
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
|
||||
)
|
||||
|
||||
// endpointHash is used to uniquely identify endpoints. Only including addresses
|
||||
// and hostnames as unique identifiers allows us to do more in place updates
|
||||
// should attributes such as topology, conditions, or targetRef change.
|
||||
type endpointHash string
|
||||
type endpointHashObj struct {
|
||||
Addresses []string
|
||||
Hostname string
|
||||
}
|
||||
|
||||
func hashEndpoint(endpoint *discovery.Endpoint) endpointHash {
|
||||
sort.Strings(endpoint.Addresses)
|
||||
hashObj := endpointHashObj{Addresses: endpoint.Addresses}
|
||||
if endpoint.Hostname != nil {
|
||||
hashObj.Hostname = *endpoint.Hostname
|
||||
}
|
||||
|
||||
return endpointHash(endpointutil.DeepHashObjectToString(hashObj))
|
||||
}
|
||||
|
||||
// endpointSet provides simple methods for comparing sets of Endpoints.
|
||||
type endpointSet map[endpointHash]*discovery.Endpoint
|
||||
|
||||
// Insert adds items to the set.
|
||||
func (s endpointSet) Insert(items ...*discovery.Endpoint) endpointSet {
|
||||
for _, item := range items {
|
||||
s[hashEndpoint(item)] = item
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Delete removes all items from the set.
|
||||
func (s endpointSet) Delete(items ...*discovery.Endpoint) endpointSet {
|
||||
for _, item := range items {
|
||||
delete(s, hashEndpoint(item))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Has returns true if and only if item is contained in the set.
|
||||
func (s endpointSet) Has(item *discovery.Endpoint) bool {
|
||||
_, contained := s[hashEndpoint(item)]
|
||||
return contained
|
||||
}
|
||||
|
||||
// Returns an endpoint matching the hash if contained in the set.
|
||||
func (s endpointSet) Get(item *discovery.Endpoint) *discovery.Endpoint {
|
||||
return s[hashEndpoint(item)]
|
||||
}
|
||||
|
||||
// UnsortedList returns the slice with contents in random order.
|
||||
func (s endpointSet) UnsortedList() []*discovery.Endpoint {
|
||||
endpoints := make([]*discovery.Endpoint, 0, len(s))
|
||||
for _, endpoint := range s {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
// Returns a single element from the set.
|
||||
func (s endpointSet) PopAny() (*discovery.Endpoint, bool) {
|
||||
for _, endpoint := range s {
|
||||
s.Delete(endpoint)
|
||||
return endpoint, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Len returns the size of the set.
|
||||
func (s endpointSet) Len() int {
|
||||
return len(s)
|
||||
}
|
@ -1,204 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpointslicemirroring
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
deletionExpected = -1
|
||||
)
|
||||
|
||||
// generationsBySlice tracks expected EndpointSlice generations by EndpointSlice
|
||||
// uid. A value of deletionExpected (-1) may be used here to indicate that we
|
||||
// expect this EndpointSlice to be deleted.
|
||||
type generationsBySlice map[types.UID]int64
|
||||
|
||||
// endpointSliceTracker tracks EndpointSlices and their associated generation to
|
||||
// help determine if a change to an EndpointSlice has been processed by the
|
||||
// EndpointSlice controller.
|
||||
type endpointSliceTracker struct {
|
||||
// lock protects generationsByService.
|
||||
lock sync.Mutex
|
||||
// generationsByService tracks the generations of EndpointSlices for each
|
||||
// Service.
|
||||
generationsByService map[types.NamespacedName]generationsBySlice
|
||||
}
|
||||
|
||||
// newEndpointSliceTracker creates and initializes a new endpointSliceTracker.
|
||||
func newEndpointSliceTracker() *endpointSliceTracker {
|
||||
return &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{},
|
||||
}
|
||||
}
|
||||
|
||||
// Has returns true if the endpointSliceTracker has a generation for the
|
||||
// provided EndpointSlice.
|
||||
func (est *endpointSliceTracker) Has(endpointSlice *discovery.EndpointSlice) bool {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_, ok = gfs[endpointSlice.UID]
|
||||
return ok
|
||||
}
|
||||
|
||||
// ShouldSync returns true if this endpointSliceTracker does not have a
|
||||
// generation for the provided EndpointSlice or it is greater than the
|
||||
// generation of the tracked EndpointSlice.
|
||||
func (est *endpointSliceTracker) ShouldSync(endpointSlice *discovery.EndpointSlice) bool {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
g, ok := gfs[endpointSlice.UID]
|
||||
return !ok || endpointSlice.Generation > g
|
||||
}
|
||||
|
||||
// StaleSlices returns true if any of the following are true:
|
||||
// 1. One or more of the provided EndpointSlices have older generations than the
|
||||
// corresponding tracked ones.
|
||||
// 2. The tracker is expecting one or more of the provided EndpointSlices to be
|
||||
// deleted.
|
||||
// 3. The tracker is tracking EndpointSlices that have not been provided.
|
||||
func (est *endpointSliceTracker) StaleSlices(service *v1.Service, endpointSlices []*discovery.EndpointSlice) bool {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
nn := types.NamespacedName{Name: service.Name, Namespace: service.Namespace}
|
||||
gfs, ok := est.generationsByService[nn]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
providedSlices := map[types.UID]int64{}
|
||||
for _, endpointSlice := range endpointSlices {
|
||||
providedSlices[endpointSlice.UID] = endpointSlice.Generation
|
||||
g, ok := gfs[endpointSlice.UID]
|
||||
if ok && (g == deletionExpected || g > endpointSlice.Generation) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for uid, generation := range gfs {
|
||||
if generation == deletionExpected {
|
||||
continue
|
||||
}
|
||||
_, ok := providedSlices[uid]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Update adds or updates the generation in this endpointSliceTracker for the
|
||||
// provided EndpointSlice.
|
||||
func (est *endpointSliceTracker) Update(endpointSlice *discovery.EndpointSlice) {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
|
||||
|
||||
if !ok {
|
||||
gfs = generationsBySlice{}
|
||||
est.generationsByService[getServiceNN(endpointSlice)] = gfs
|
||||
}
|
||||
gfs[endpointSlice.UID] = endpointSlice.Generation
|
||||
}
|
||||
|
||||
// DeleteService removes the set of generations tracked for the Service.
|
||||
func (est *endpointSliceTracker) DeleteService(namespace, name string) {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
serviceNN := types.NamespacedName{Name: name, Namespace: namespace}
|
||||
delete(est.generationsByService, serviceNN)
|
||||
}
|
||||
|
||||
// ExpectDeletion sets the generation to deletionExpected in this
|
||||
// endpointSliceTracker for the provided EndpointSlice.
|
||||
func (est *endpointSliceTracker) ExpectDeletion(endpointSlice *discovery.EndpointSlice) {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
|
||||
|
||||
if !ok {
|
||||
gfs = generationsBySlice{}
|
||||
est.generationsByService[getServiceNN(endpointSlice)] = gfs
|
||||
}
|
||||
gfs[endpointSlice.UID] = deletionExpected
|
||||
}
|
||||
|
||||
// HandleDeletion removes the generation in this endpointSliceTracker for the
|
||||
// provided EndpointSlice. This returns true if the tracker expected this
|
||||
// EndpointSlice to be deleted and false if not.
|
||||
func (est *endpointSliceTracker) HandleDeletion(endpointSlice *discovery.EndpointSlice) bool {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
|
||||
|
||||
if ok {
|
||||
g, ok := gfs[endpointSlice.UID]
|
||||
delete(gfs, endpointSlice.UID)
|
||||
if ok && g != deletionExpected {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// generationsForSliceUnsafe returns the generations for the Service
|
||||
// corresponding to the provided EndpointSlice, and a bool to indicate if it
|
||||
// exists. A lock must be applied before calling this function.
|
||||
func (est *endpointSliceTracker) generationsForSliceUnsafe(endpointSlice *discovery.EndpointSlice) (generationsBySlice, bool) {
|
||||
serviceNN := getServiceNN(endpointSlice)
|
||||
generations, ok := est.generationsByService[serviceNN]
|
||||
return generations, ok
|
||||
}
|
||||
|
||||
// getServiceNN returns a namespaced name for the Service corresponding to the
|
||||
// provided EndpointSlice.
|
||||
func getServiceNN(endpointSlice *discovery.EndpointSlice) types.NamespacedName {
|
||||
serviceName, _ := endpointSlice.Labels[discovery.LabelServiceName]
|
||||
return types.NamespacedName{Name: serviceName, Namespace: endpointSlice.Namespace}
|
||||
}
|
||||
|
||||
// managedByChanged returns true if one of the provided EndpointSlices is
|
||||
// managed by the EndpointSlice controller while the other is not.
|
||||
func managedByChanged(endpointSlice1, endpointSlice2 *discovery.EndpointSlice) bool {
|
||||
return managedByController(endpointSlice1) != managedByController(endpointSlice2)
|
||||
}
|
||||
|
||||
// managedByController returns true if the controller of the provided
|
||||
// EndpointSlices is the EndpointSlice controller.
|
||||
func managedByController(endpointSlice *discovery.EndpointSlice) bool {
|
||||
managedBy, _ := endpointSlice.Labels[discovery.LabelManagedBy]
|
||||
return managedBy == controllerName
|
||||
}
|
@ -1,401 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpointslicemirroring
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func TestEndpointSliceTrackerUpdate(t *testing.T) {
|
||||
epSlice1 := &discovery.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-1",
|
||||
Namespace: "ns1",
|
||||
UID: "original",
|
||||
Generation: 1,
|
||||
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
||||
},
|
||||
}
|
||||
|
||||
epSlice1DifferentNS := epSlice1.DeepCopy()
|
||||
epSlice1DifferentNS.Namespace = "ns2"
|
||||
epSlice1DifferentNS.UID = "diff-ns"
|
||||
|
||||
epSlice1DifferentService := epSlice1.DeepCopy()
|
||||
epSlice1DifferentService.Labels[discovery.LabelServiceName] = "svc2"
|
||||
epSlice1DifferentService.UID = "diff-svc"
|
||||
|
||||
epSlice1NewerGen := epSlice1.DeepCopy()
|
||||
epSlice1NewerGen.Generation = 2
|
||||
|
||||
testCases := map[string]struct {
|
||||
updateParam *discovery.EndpointSlice
|
||||
checksParam *discovery.EndpointSlice
|
||||
expectHas bool
|
||||
expectShouldSync bool
|
||||
expectGeneration int64
|
||||
}{
|
||||
"same slice": {
|
||||
updateParam: epSlice1,
|
||||
checksParam: epSlice1,
|
||||
expectHas: true,
|
||||
expectShouldSync: false,
|
||||
expectGeneration: epSlice1.Generation,
|
||||
},
|
||||
"different namespace": {
|
||||
updateParam: epSlice1,
|
||||
checksParam: epSlice1DifferentNS,
|
||||
expectHas: false,
|
||||
expectShouldSync: true,
|
||||
expectGeneration: epSlice1.Generation,
|
||||
},
|
||||
"different service": {
|
||||
updateParam: epSlice1,
|
||||
checksParam: epSlice1DifferentService,
|
||||
expectHas: false,
|
||||
expectShouldSync: true,
|
||||
expectGeneration: epSlice1.Generation,
|
||||
},
|
||||
"newer generation": {
|
||||
updateParam: epSlice1,
|
||||
checksParam: epSlice1NewerGen,
|
||||
expectHas: true,
|
||||
expectShouldSync: true,
|
||||
expectGeneration: epSlice1.Generation,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
esTracker := newEndpointSliceTracker()
|
||||
esTracker.Update(tc.updateParam)
|
||||
if esTracker.Has(tc.checksParam) != tc.expectHas {
|
||||
t.Errorf("tc.tracker.Has(%+v) == %t, expected %t", tc.checksParam, esTracker.Has(tc.checksParam), tc.expectHas)
|
||||
}
|
||||
if esTracker.ShouldSync(tc.checksParam) != tc.expectShouldSync {
|
||||
t.Errorf("tc.tracker.ShouldSync(%+v) == %t, expected %t", tc.checksParam, esTracker.ShouldSync(tc.checksParam), tc.expectShouldSync)
|
||||
}
|
||||
serviceNN := types.NamespacedName{Namespace: epSlice1.Namespace, Name: "svc1"}
|
||||
gfs, ok := esTracker.generationsByService[serviceNN]
|
||||
if !ok {
|
||||
t.Fatalf("expected tracker to have generations for %s Service", serviceNN.Name)
|
||||
}
|
||||
generation, ok := gfs[epSlice1.UID]
|
||||
if !ok {
|
||||
t.Fatalf("expected tracker to have generation for %s EndpointSlice", epSlice1.Name)
|
||||
}
|
||||
if tc.expectGeneration != generation {
|
||||
t.Fatalf("expected generation to be %d, got %d", tc.expectGeneration, generation)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndpointSliceTrackerStaleSlices(t *testing.T) {
|
||||
epSlice1 := &discovery.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-1",
|
||||
Namespace: "ns1",
|
||||
UID: "original",
|
||||
Generation: 1,
|
||||
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
||||
},
|
||||
}
|
||||
|
||||
epSlice1NewerGen := epSlice1.DeepCopy()
|
||||
epSlice1NewerGen.Generation = 2
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
tracker *endpointSliceTracker
|
||||
serviceParam *v1.Service
|
||||
slicesParam []*discovery.EndpointSlice
|
||||
expectNewer bool
|
||||
}{{
|
||||
name: "empty tracker",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{},
|
||||
expectNewer: false,
|
||||
}, {
|
||||
name: "empty slices",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{},
|
||||
expectNewer: false,
|
||||
}, {
|
||||
name: "matching slices",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1.Generation,
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{epSlice1},
|
||||
expectNewer: false,
|
||||
}, {
|
||||
name: "newer slice in tracker",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1NewerGen.Generation,
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{epSlice1},
|
||||
expectNewer: true,
|
||||
}, {
|
||||
name: "newer slice in params",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1.Generation,
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{epSlice1NewerGen},
|
||||
expectNewer: false,
|
||||
}, {
|
||||
name: "slice in params is expected to be deleted",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: deletionExpected,
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{epSlice1},
|
||||
expectNewer: true,
|
||||
}, {
|
||||
name: "slice in tracker but not in params",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1.Generation,
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{},
|
||||
expectNewer: true,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actualNewer := tc.tracker.StaleSlices(tc.serviceParam, tc.slicesParam)
|
||||
if actualNewer != tc.expectNewer {
|
||||
t.Errorf("Expected %t, got %t", tc.expectNewer, actualNewer)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestEndpointSliceTrackerDeletion(t *testing.T) {
|
||||
epSlice1 := &discovery.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-1",
|
||||
Namespace: "ns1",
|
||||
UID: "original",
|
||||
Generation: 1,
|
||||
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
||||
},
|
||||
}
|
||||
|
||||
epSlice1DifferentNS := epSlice1.DeepCopy()
|
||||
epSlice1DifferentNS.Namespace = "ns2"
|
||||
epSlice1DifferentNS.UID = "diff-ns"
|
||||
|
||||
epSlice1DifferentService := epSlice1.DeepCopy()
|
||||
epSlice1DifferentService.Labels[discovery.LabelServiceName] = "svc2"
|
||||
epSlice1DifferentService.UID = "diff-svc"
|
||||
|
||||
epSlice1NewerGen := epSlice1.DeepCopy()
|
||||
epSlice1NewerGen.Generation = 2
|
||||
|
||||
testCases := map[string]struct {
|
||||
expectDeletionParam *discovery.EndpointSlice
|
||||
checksParam *discovery.EndpointSlice
|
||||
deleteParam *discovery.EndpointSlice
|
||||
expectHas bool
|
||||
expectShouldSync bool
|
||||
expectedHandleDeletionResp bool
|
||||
}{
|
||||
"same slice": {
|
||||
expectDeletionParam: epSlice1,
|
||||
checksParam: epSlice1,
|
||||
deleteParam: epSlice1,
|
||||
expectHas: true,
|
||||
expectShouldSync: true,
|
||||
expectedHandleDeletionResp: true,
|
||||
},
|
||||
"different namespace": {
|
||||
expectDeletionParam: epSlice1DifferentNS,
|
||||
checksParam: epSlice1DifferentNS,
|
||||
deleteParam: epSlice1DifferentNS,
|
||||
expectHas: true,
|
||||
expectShouldSync: true,
|
||||
expectedHandleDeletionResp: false,
|
||||
},
|
||||
"different namespace, check original ep slice": {
|
||||
expectDeletionParam: epSlice1DifferentNS,
|
||||
checksParam: epSlice1,
|
||||
deleteParam: epSlice1DifferentNS,
|
||||
expectHas: true,
|
||||
expectShouldSync: false,
|
||||
expectedHandleDeletionResp: false,
|
||||
},
|
||||
"different service": {
|
||||
expectDeletionParam: epSlice1DifferentService,
|
||||
checksParam: epSlice1DifferentService,
|
||||
deleteParam: epSlice1DifferentService,
|
||||
expectHas: true,
|
||||
expectShouldSync: true,
|
||||
expectedHandleDeletionResp: false,
|
||||
},
|
||||
"expectDelete different service, check original ep slice, delete original": {
|
||||
expectDeletionParam: epSlice1DifferentService,
|
||||
checksParam: epSlice1,
|
||||
deleteParam: epSlice1,
|
||||
expectHas: true,
|
||||
expectShouldSync: false,
|
||||
expectedHandleDeletionResp: false,
|
||||
},
|
||||
"different generation": {
|
||||
expectDeletionParam: epSlice1NewerGen,
|
||||
checksParam: epSlice1NewerGen,
|
||||
deleteParam: epSlice1NewerGen,
|
||||
expectHas: true,
|
||||
expectShouldSync: true,
|
||||
expectedHandleDeletionResp: true,
|
||||
},
|
||||
"expectDelete different generation, check original ep slice, delete original": {
|
||||
expectDeletionParam: epSlice1NewerGen,
|
||||
checksParam: epSlice1,
|
||||
deleteParam: epSlice1,
|
||||
expectHas: true,
|
||||
expectShouldSync: true,
|
||||
expectedHandleDeletionResp: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
esTracker := newEndpointSliceTracker()
|
||||
esTracker.Update(epSlice1)
|
||||
|
||||
esTracker.ExpectDeletion(tc.expectDeletionParam)
|
||||
if esTracker.Has(tc.checksParam) != tc.expectHas {
|
||||
t.Errorf("esTracker.Has(%+v) == %t, expected %t", tc.checksParam, esTracker.Has(tc.checksParam), tc.expectHas)
|
||||
}
|
||||
if esTracker.ShouldSync(tc.checksParam) != tc.expectShouldSync {
|
||||
t.Errorf("esTracker.ShouldSync(%+v) == %t, expected %t", tc.checksParam, esTracker.ShouldSync(tc.checksParam), tc.expectShouldSync)
|
||||
}
|
||||
if esTracker.HandleDeletion(epSlice1) != tc.expectedHandleDeletionResp {
|
||||
t.Errorf("esTracker.ShouldSync(%+v) == %t, expected %t", epSlice1, esTracker.HandleDeletion(epSlice1), tc.expectedHandleDeletionResp)
|
||||
}
|
||||
if esTracker.Has(epSlice1) != false {
|
||||
t.Errorf("esTracker.Has(%+v) == %t, expected false", epSlice1, esTracker.Has(epSlice1))
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndpointSliceTrackerDeleteService(t *testing.T) {
|
||||
svcName1, svcNS1 := "svc1", "ns1"
|
||||
svcName2, svcNS2 := "svc2", "ns2"
|
||||
epSlice1 := &discovery.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-1",
|
||||
Namespace: svcNS1,
|
||||
Generation: 1,
|
||||
Labels: map[string]string{discovery.LabelServiceName: svcName1},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
updateParam *discovery.EndpointSlice
|
||||
deleteServiceParam *types.NamespacedName
|
||||
expectHas bool
|
||||
expectShouldSync bool
|
||||
expectGeneration int64
|
||||
}{
|
||||
"same service": {
|
||||
updateParam: epSlice1,
|
||||
deleteServiceParam: &types.NamespacedName{Namespace: svcNS1, Name: svcName1},
|
||||
expectHas: false,
|
||||
expectShouldSync: true,
|
||||
},
|
||||
"different namespace": {
|
||||
updateParam: epSlice1,
|
||||
deleteServiceParam: &types.NamespacedName{Namespace: svcNS2, Name: svcName1},
|
||||
expectHas: true,
|
||||
expectShouldSync: false,
|
||||
expectGeneration: epSlice1.Generation,
|
||||
},
|
||||
"different service": {
|
||||
updateParam: epSlice1,
|
||||
deleteServiceParam: &types.NamespacedName{Namespace: svcNS1, Name: svcName2},
|
||||
expectHas: true,
|
||||
expectShouldSync: false,
|
||||
expectGeneration: epSlice1.Generation,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
esTracker := newEndpointSliceTracker()
|
||||
esTracker.Update(tc.updateParam)
|
||||
esTracker.DeleteService(tc.deleteServiceParam.Namespace, tc.deleteServiceParam.Name)
|
||||
if esTracker.Has(tc.updateParam) != tc.expectHas {
|
||||
t.Errorf("tc.tracker.Has(%+v) == %t, expected %t", tc.updateParam, esTracker.Has(tc.updateParam), tc.expectHas)
|
||||
}
|
||||
if esTracker.ShouldSync(tc.updateParam) != tc.expectShouldSync {
|
||||
t.Errorf("tc.tracker.ShouldSync(%+v) == %t, expected %t", tc.updateParam, esTracker.ShouldSync(tc.updateParam), tc.expectShouldSync)
|
||||
}
|
||||
if tc.expectGeneration != 0 {
|
||||
serviceNN := types.NamespacedName{Namespace: epSlice1.Namespace, Name: "svc1"}
|
||||
gfs, ok := esTracker.generationsByService[serviceNN]
|
||||
if !ok {
|
||||
t.Fatalf("expected tracker to have status for %s Service", serviceNN.Name)
|
||||
}
|
||||
generation, ok := gfs[epSlice1.UID]
|
||||
if !ok {
|
||||
t.Fatalf("expected tracker to have generation for %s EndpointSlice", epSlice1.Name)
|
||||
}
|
||||
if tc.expectGeneration != generation {
|
||||
t.Fatalf("expected generation to be %d, got %d", tc.expectGeneration, generation)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -42,6 +42,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/endpointslicemirroring/metrics"
|
||||
endpointsliceutil "k8s.io/kubernetes/pkg/controller/util/endpointslice"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -116,7 +117,7 @@ func NewController(endpointsInformer coreinformers.EndpointsInformer,
|
||||
|
||||
c.endpointSliceLister = endpointSliceInformer.Lister()
|
||||
c.endpointSlicesSynced = endpointSliceInformer.Informer().HasSynced
|
||||
c.endpointSliceTracker = newEndpointSliceTracker()
|
||||
c.endpointSliceTracker = endpointsliceutil.NewEndpointSliceTracker()
|
||||
|
||||
c.serviceLister = serviceInformer.Lister()
|
||||
c.servicesSynced = serviceInformer.Informer().HasSynced
|
||||
@ -169,7 +170,7 @@ type Controller struct {
|
||||
// endpointSliceTracker tracks the list of EndpointSlices and associated
|
||||
// resource versions expected for each Endpoints resource. It can help
|
||||
// determine if a cached EndpointSlice is out of date.
|
||||
endpointSliceTracker *endpointSliceTracker
|
||||
endpointSliceTracker *endpointsliceutil.EndpointSliceTracker
|
||||
|
||||
// serviceLister is able to list/get services and is populated by the shared
|
||||
// informer passed to NewController.
|
||||
@ -317,7 +318,7 @@ func (c *Controller) syncEndpoints(key string) error {
|
||||
}
|
||||
|
||||
if c.endpointSliceTracker.StaleSlices(svc, endpointSlices) {
|
||||
return &StaleInformerCache{"EndpointSlice informer cache is out of date"}
|
||||
return endpointsliceutil.NewStaleInformerCache("EndpointSlice informer cache is out of date")
|
||||
}
|
||||
|
||||
err = c.reconciler.reconcile(endpoints, endpointSlices)
|
||||
|
@ -1,25 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpointslicemirroring
|
||||
|
||||
// StaleInformerCache errors indicate that the informer cache includes out of
|
||||
// date resources.
|
||||
type StaleInformerCache struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
func (e *StaleInformerCache) Error() string { return e.msg }
|
@ -31,6 +31,7 @@ import (
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/controller/endpointslicemirroring/metrics"
|
||||
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
|
||||
endpointsliceutil "k8s.io/kubernetes/pkg/controller/util/endpointslice"
|
||||
)
|
||||
|
||||
// reconciler is responsible for transforming current EndpointSlice state into
|
||||
@ -41,7 +42,7 @@ type reconciler struct {
|
||||
// endpointSliceTracker tracks the list of EndpointSlices and associated
|
||||
// resource versions expected for each Endpoints resource. It can help
|
||||
// determine if a cached EndpointSlice is out of date.
|
||||
endpointSliceTracker *endpointSliceTracker
|
||||
endpointSliceTracker *endpointsliceutil.EndpointSliceTracker
|
||||
|
||||
// eventRecorder allows reconciler to record an event if it finds an invalid
|
||||
// IP address in an Endpoints resource.
|
||||
@ -173,7 +174,7 @@ func (r *reconciler) reconcile(endpoints *corev1.Endpoints, existingSlices []*di
|
||||
func (r *reconciler) reconcileByPortMapping(
|
||||
endpoints *corev1.Endpoints,
|
||||
existingSlices []*discovery.EndpointSlice,
|
||||
desiredSet endpointSet,
|
||||
desiredSet endpointsliceutil.EndpointSet,
|
||||
endpointPorts []discovery.EndpointPort,
|
||||
addressType discovery.AddressType,
|
||||
) (slicesByAction, totalsByAction) {
|
||||
@ -306,7 +307,7 @@ func endpointSlicesByKey(existingSlices []*discovery.EndpointSlice) map[addrType
|
||||
|
||||
// totalChanges returns the total changes that will be required for an
|
||||
// EndpointSlice to match a desired set of endpoints.
|
||||
func totalChanges(existingSlice *discovery.EndpointSlice, desiredSet endpointSet) totalsByAction {
|
||||
func totalChanges(existingSlice *discovery.EndpointSlice, desiredSet endpointsliceutil.EndpointSet) totalsByAction {
|
||||
totals := totalsByAction{}
|
||||
existingMatches := 0
|
||||
|
||||
|
@ -19,6 +19,7 @@ package endpointslicemirroring
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
endpointsliceutil "k8s.io/kubernetes/pkg/controller/util/endpointslice"
|
||||
)
|
||||
|
||||
// slicesByAction includes lists of slices to create, update, or delete.
|
||||
@ -49,7 +50,7 @@ func (t *totalsByAction) add(totals totalsByAction) {
|
||||
func newDesiredCalc() *desiredCalc {
|
||||
return &desiredCalc{
|
||||
portsByKey: map[addrTypePortMapKey][]discovery.EndpointPort{},
|
||||
endpointsByKey: map[addrTypePortMapKey]endpointSet{},
|
||||
endpointsByKey: map[addrTypePortMapKey]endpointsliceutil.EndpointSet{},
|
||||
numDesiredEndpoints: 0,
|
||||
}
|
||||
}
|
||||
@ -57,7 +58,7 @@ func newDesiredCalc() *desiredCalc {
|
||||
// desiredCalc helps calculate desired endpoints and ports.
|
||||
type desiredCalc struct {
|
||||
portsByKey map[addrTypePortMapKey][]discovery.EndpointPort
|
||||
endpointsByKey map[addrTypePortMapKey]endpointSet
|
||||
endpointsByKey map[addrTypePortMapKey]endpointsliceutil.EndpointSet
|
||||
numDesiredEndpoints int
|
||||
}
|
||||
|
||||
@ -75,7 +76,7 @@ func (d *desiredCalc) initPorts(subsetPorts []v1.EndpointPort) multiAddrTypePort
|
||||
for _, addrType := range addrTypes {
|
||||
multiKey[addrType] = newAddrTypePortMapKey(endpointPorts, addrType)
|
||||
if _, ok := d.endpointsByKey[multiKey[addrType]]; !ok {
|
||||
d.endpointsByKey[multiKey[addrType]] = endpointSet{}
|
||||
d.endpointsByKey[multiKey[addrType]] = endpointsliceutil.EndpointSet{}
|
||||
}
|
||||
d.portsByKey[multiKey[addrType]] = endpointPorts
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
"k8s.io/kubernetes/pkg/controller/endpointslicemirroring/metrics"
|
||||
endpointsliceutil "k8s.io/kubernetes/pkg/controller/util/endpointslice"
|
||||
utilpointer "k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
@ -965,7 +966,7 @@ func newReconciler(client *fake.Clientset, maxEndpointsPerSubset int32) *reconci
|
||||
return &reconciler{
|
||||
client: client,
|
||||
maxEndpointsPerSubset: maxEndpointsPerSubset,
|
||||
endpointSliceTracker: newEndpointSliceTracker(),
|
||||
endpointSliceTracker: endpointsliceutil.NewEndpointSliceTracker(),
|
||||
metricsCache: metrics.NewCache(maxEndpointsPerSubset),
|
||||
eventRecorder: recorder,
|
||||
}
|
||||
|
@ -258,3 +258,16 @@ func cloneAndRemoveKeys(a map[string]string, keys ...string) map[string]string {
|
||||
}
|
||||
return newMap
|
||||
}
|
||||
|
||||
// managedByChanged returns true if one of the provided EndpointSlices is
|
||||
// managed by the EndpointSlice controller while the other is not.
|
||||
func managedByChanged(endpointSlice1, endpointSlice2 *discovery.EndpointSlice) bool {
|
||||
return managedByController(endpointSlice1) != managedByController(endpointSlice2)
|
||||
}
|
||||
|
||||
// managedByController returns true if the controller of the provided
|
||||
// EndpointSlices is the EndpointSlice controller.
|
||||
func managedByController(endpointSlice *discovery.EndpointSlice) bool {
|
||||
managedBy, _ := endpointSlice.Labels[discovery.LabelManagedBy]
|
||||
return managedBy == controllerName
|
||||
}
|
||||
|
13
pkg/controller/util/endpointslice/OWNERS
Normal file
13
pkg/controller/util/endpointslice/OWNERS
Normal file
@ -0,0 +1,13 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- robscott
|
||||
- freehan
|
||||
- sig-network-approvers
|
||||
reviewers:
|
||||
- robscott
|
||||
- freehan
|
||||
- sig-network-reviewers
|
||||
labels:
|
||||
- sig/network
|
||||
|
@ -42,11 +42,11 @@ func hashEndpoint(endpoint *discovery.Endpoint) endpointHash {
|
||||
return endpointHash(endpointutil.DeepHashObjectToString(hashObj))
|
||||
}
|
||||
|
||||
// endpointSet provides simple methods for comparing sets of Endpoints.
|
||||
type endpointSet map[endpointHash]*discovery.Endpoint
|
||||
// EndpointSet provides simple methods for comparing sets of Endpoints.
|
||||
type EndpointSet map[endpointHash]*discovery.Endpoint
|
||||
|
||||
// Insert adds items to the set.
|
||||
func (s endpointSet) Insert(items ...*discovery.Endpoint) endpointSet {
|
||||
func (s EndpointSet) Insert(items ...*discovery.Endpoint) EndpointSet {
|
||||
for _, item := range items {
|
||||
s[hashEndpoint(item)] = item
|
||||
}
|
||||
@ -54,7 +54,7 @@ func (s endpointSet) Insert(items ...*discovery.Endpoint) endpointSet {
|
||||
}
|
||||
|
||||
// Delete removes all items from the set.
|
||||
func (s endpointSet) Delete(items ...*discovery.Endpoint) endpointSet {
|
||||
func (s EndpointSet) Delete(items ...*discovery.Endpoint) EndpointSet {
|
||||
for _, item := range items {
|
||||
delete(s, hashEndpoint(item))
|
||||
}
|
||||
@ -62,18 +62,18 @@ func (s endpointSet) Delete(items ...*discovery.Endpoint) endpointSet {
|
||||
}
|
||||
|
||||
// Has returns true if and only if item is contained in the set.
|
||||
func (s endpointSet) Has(item *discovery.Endpoint) bool {
|
||||
func (s EndpointSet) Has(item *discovery.Endpoint) bool {
|
||||
_, contained := s[hashEndpoint(item)]
|
||||
return contained
|
||||
}
|
||||
|
||||
// Returns an endpoint matching the hash if contained in the set.
|
||||
func (s endpointSet) Get(item *discovery.Endpoint) *discovery.Endpoint {
|
||||
func (s EndpointSet) Get(item *discovery.Endpoint) *discovery.Endpoint {
|
||||
return s[hashEndpoint(item)]
|
||||
}
|
||||
|
||||
// UnsortedList returns the slice with contents in random order.
|
||||
func (s endpointSet) UnsortedList() []*discovery.Endpoint {
|
||||
func (s EndpointSet) UnsortedList() []*discovery.Endpoint {
|
||||
endpoints := make([]*discovery.Endpoint, 0, len(s))
|
||||
for _, endpoint := range s {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
@ -82,7 +82,7 @@ func (s endpointSet) UnsortedList() []*discovery.Endpoint {
|
||||
}
|
||||
|
||||
// Returns a single element from the set.
|
||||
func (s endpointSet) PopAny() (*discovery.Endpoint, bool) {
|
||||
func (s EndpointSet) PopAny() (*discovery.Endpoint, bool) {
|
||||
for _, endpoint := range s {
|
||||
s.Delete(endpoint)
|
||||
return endpoint, true
|
||||
@ -91,6 +91,6 @@ func (s endpointSet) PopAny() (*discovery.Endpoint, bool) {
|
||||
}
|
||||
|
||||
// Len returns the size of the set.
|
||||
func (s endpointSet) Len() int {
|
||||
func (s EndpointSet) Len() int {
|
||||
return len(s)
|
||||
}
|
@ -28,36 +28,36 @@ const (
|
||||
deletionExpected = -1
|
||||
)
|
||||
|
||||
// generationsBySlice tracks expected EndpointSlice generations by EndpointSlice
|
||||
// GenerationsBySlice tracks expected EndpointSlice generations by EndpointSlice
|
||||
// uid. A value of deletionExpected (-1) may be used here to indicate that we
|
||||
// expect this EndpointSlice to be deleted.
|
||||
type generationsBySlice map[types.UID]int64
|
||||
type GenerationsBySlice map[types.UID]int64
|
||||
|
||||
// endpointSliceTracker tracks EndpointSlices and their associated generation to
|
||||
// EndpointSliceTracker tracks EndpointSlices and their associated generation to
|
||||
// help determine if a change to an EndpointSlice has been processed by the
|
||||
// EndpointSlice controller.
|
||||
type endpointSliceTracker struct {
|
||||
type EndpointSliceTracker struct {
|
||||
// lock protects generationsByService.
|
||||
lock sync.Mutex
|
||||
// generationsByService tracks the generations of EndpointSlices for each
|
||||
// Service.
|
||||
generationsByService map[types.NamespacedName]generationsBySlice
|
||||
generationsByService map[types.NamespacedName]GenerationsBySlice
|
||||
}
|
||||
|
||||
// newEndpointSliceTracker creates and initializes a new endpointSliceTracker.
|
||||
func newEndpointSliceTracker() *endpointSliceTracker {
|
||||
return &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{},
|
||||
// NewEndpointSliceTracker creates and initializes a new endpointSliceTracker.
|
||||
func NewEndpointSliceTracker() *EndpointSliceTracker {
|
||||
return &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{},
|
||||
}
|
||||
}
|
||||
|
||||
// Has returns true if the endpointSliceTracker has a generation for the
|
||||
// provided EndpointSlice.
|
||||
func (est *endpointSliceTracker) Has(endpointSlice *discovery.EndpointSlice) bool {
|
||||
func (est *EndpointSliceTracker) Has(endpointSlice *discovery.EndpointSlice) bool {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
|
||||
gfs, ok := est.GenerationsForSliceUnsafe(endpointSlice)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
@ -68,11 +68,11 @@ func (est *endpointSliceTracker) Has(endpointSlice *discovery.EndpointSlice) boo
|
||||
// ShouldSync returns true if this endpointSliceTracker does not have a
|
||||
// generation for the provided EndpointSlice or it is greater than the
|
||||
// generation of the tracked EndpointSlice.
|
||||
func (est *endpointSliceTracker) ShouldSync(endpointSlice *discovery.EndpointSlice) bool {
|
||||
func (est *EndpointSliceTracker) ShouldSync(endpointSlice *discovery.EndpointSlice) bool {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
|
||||
gfs, ok := est.GenerationsForSliceUnsafe(endpointSlice)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
@ -86,7 +86,7 @@ func (est *endpointSliceTracker) ShouldSync(endpointSlice *discovery.EndpointSli
|
||||
// 2. The tracker is expecting one or more of the provided EndpointSlices to be
|
||||
// deleted.
|
||||
// 3. The tracker is tracking EndpointSlices that have not been provided.
|
||||
func (est *endpointSliceTracker) StaleSlices(service *v1.Service, endpointSlices []*discovery.EndpointSlice) bool {
|
||||
func (est *EndpointSliceTracker) StaleSlices(service *v1.Service, endpointSlices []*discovery.EndpointSlice) bool {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
@ -117,21 +117,21 @@ func (est *endpointSliceTracker) StaleSlices(service *v1.Service, endpointSlices
|
||||
|
||||
// Update adds or updates the generation in this endpointSliceTracker for the
|
||||
// provided EndpointSlice.
|
||||
func (est *endpointSliceTracker) Update(endpointSlice *discovery.EndpointSlice) {
|
||||
func (est *EndpointSliceTracker) Update(endpointSlice *discovery.EndpointSlice) {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
|
||||
gfs, ok := est.GenerationsForSliceUnsafe(endpointSlice)
|
||||
|
||||
if !ok {
|
||||
gfs = generationsBySlice{}
|
||||
gfs = GenerationsBySlice{}
|
||||
est.generationsByService[getServiceNN(endpointSlice)] = gfs
|
||||
}
|
||||
gfs[endpointSlice.UID] = endpointSlice.Generation
|
||||
}
|
||||
|
||||
// DeleteService removes the set of generations tracked for the Service.
|
||||
func (est *endpointSliceTracker) DeleteService(namespace, name string) {
|
||||
func (est *EndpointSliceTracker) DeleteService(namespace, name string) {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
@ -141,14 +141,14 @@ func (est *endpointSliceTracker) DeleteService(namespace, name string) {
|
||||
|
||||
// ExpectDeletion sets the generation to deletionExpected in this
|
||||
// endpointSliceTracker for the provided EndpointSlice.
|
||||
func (est *endpointSliceTracker) ExpectDeletion(endpointSlice *discovery.EndpointSlice) {
|
||||
func (est *EndpointSliceTracker) ExpectDeletion(endpointSlice *discovery.EndpointSlice) {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
|
||||
gfs, ok := est.GenerationsForSliceUnsafe(endpointSlice)
|
||||
|
||||
if !ok {
|
||||
gfs = generationsBySlice{}
|
||||
gfs = GenerationsBySlice{}
|
||||
est.generationsByService[getServiceNN(endpointSlice)] = gfs
|
||||
}
|
||||
gfs[endpointSlice.UID] = deletionExpected
|
||||
@ -157,11 +157,11 @@ func (est *endpointSliceTracker) ExpectDeletion(endpointSlice *discovery.Endpoin
|
||||
// HandleDeletion removes the generation in this endpointSliceTracker for the
|
||||
// provided EndpointSlice. This returns true if the tracker expected this
|
||||
// EndpointSlice to be deleted and false if not.
|
||||
func (est *endpointSliceTracker) HandleDeletion(endpointSlice *discovery.EndpointSlice) bool {
|
||||
func (est *EndpointSliceTracker) HandleDeletion(endpointSlice *discovery.EndpointSlice) bool {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.generationsForSliceUnsafe(endpointSlice)
|
||||
gfs, ok := est.GenerationsForSliceUnsafe(endpointSlice)
|
||||
|
||||
if ok {
|
||||
g, ok := gfs[endpointSlice.UID]
|
||||
@ -174,10 +174,10 @@ func (est *endpointSliceTracker) HandleDeletion(endpointSlice *discovery.Endpoin
|
||||
return true
|
||||
}
|
||||
|
||||
// generationsForSliceUnsafe returns the generations for the Service
|
||||
// GenerationsForSliceUnsafe returns the generations for the Service
|
||||
// corresponding to the provided EndpointSlice, and a bool to indicate if it
|
||||
// exists. A lock must be applied before calling this function.
|
||||
func (est *endpointSliceTracker) generationsForSliceUnsafe(endpointSlice *discovery.EndpointSlice) (generationsBySlice, bool) {
|
||||
func (est *EndpointSliceTracker) GenerationsForSliceUnsafe(endpointSlice *discovery.EndpointSlice) (GenerationsBySlice, bool) {
|
||||
serviceNN := getServiceNN(endpointSlice)
|
||||
generations, ok := est.generationsByService[serviceNN]
|
||||
return generations, ok
|
||||
@ -189,16 +189,3 @@ func getServiceNN(endpointSlice *discovery.EndpointSlice) types.NamespacedName {
|
||||
serviceName, _ := endpointSlice.Labels[discovery.LabelServiceName]
|
||||
return types.NamespacedName{Name: serviceName, Namespace: endpointSlice.Namespace}
|
||||
}
|
||||
|
||||
// managedByChanged returns true if one of the provided EndpointSlices is
|
||||
// managed by the EndpointSlice controller while the other is not.
|
||||
func managedByChanged(endpointSlice1, endpointSlice2 *discovery.EndpointSlice) bool {
|
||||
return managedByController(endpointSlice1) != managedByController(endpointSlice2)
|
||||
}
|
||||
|
||||
// managedByController returns true if the controller of the provided
|
||||
// EndpointSlices is the EndpointSlice controller.
|
||||
func managedByController(endpointSlice *discovery.EndpointSlice) bool {
|
||||
managedBy, _ := endpointSlice.Labels[discovery.LabelManagedBy]
|
||||
return managedBy == controllerName
|
||||
}
|
@ -86,7 +86,7 @@ func TestEndpointSliceTrackerUpdate(t *testing.T) {
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
esTracker := newEndpointSliceTracker()
|
||||
esTracker := NewEndpointSliceTracker()
|
||||
esTracker.Update(tc.updateParam)
|
||||
if esTracker.Has(tc.checksParam) != tc.expectHas {
|
||||
t.Errorf("tc.tracker.Has(%+v) == %t, expected %t", tc.checksParam, esTracker.Has(tc.checksParam), tc.expectHas)
|
||||
@ -126,22 +126,22 @@ func TestEndpointSliceTrackerStaleSlices(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
tracker *endpointSliceTracker
|
||||
tracker *EndpointSliceTracker
|
||||
serviceParam *v1.Service
|
||||
slicesParam []*discovery.EndpointSlice
|
||||
expectNewer bool
|
||||
}{{
|
||||
name: "empty tracker",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{},
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{},
|
||||
expectNewer: false,
|
||||
}, {
|
||||
name: "empty slices",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {},
|
||||
},
|
||||
},
|
||||
@ -150,8 +150,8 @@ func TestEndpointSliceTrackerStaleSlices(t *testing.T) {
|
||||
expectNewer: false,
|
||||
}, {
|
||||
name: "matching slices",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1.Generation,
|
||||
},
|
||||
@ -162,8 +162,8 @@ func TestEndpointSliceTrackerStaleSlices(t *testing.T) {
|
||||
expectNewer: false,
|
||||
}, {
|
||||
name: "newer slice in tracker",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1NewerGen.Generation,
|
||||
},
|
||||
@ -174,8 +174,8 @@ func TestEndpointSliceTrackerStaleSlices(t *testing.T) {
|
||||
expectNewer: true,
|
||||
}, {
|
||||
name: "newer slice in params",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1.Generation,
|
||||
},
|
||||
@ -186,8 +186,8 @@ func TestEndpointSliceTrackerStaleSlices(t *testing.T) {
|
||||
expectNewer: false,
|
||||
}, {
|
||||
name: "slice in params is expected to be deleted",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: deletionExpected,
|
||||
},
|
||||
@ -198,8 +198,8 @@ func TestEndpointSliceTrackerStaleSlices(t *testing.T) {
|
||||
expectNewer: true,
|
||||
}, {
|
||||
name: "slice in tracker but not in params",
|
||||
tracker: &endpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]generationsBySlice{
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1.Generation,
|
||||
},
|
||||
@ -309,7 +309,7 @@ func TestEndpointSliceTrackerDeletion(t *testing.T) {
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
esTracker := newEndpointSliceTracker()
|
||||
esTracker := NewEndpointSliceTracker()
|
||||
esTracker.Update(epSlice1)
|
||||
|
||||
esTracker.ExpectDeletion(tc.expectDeletionParam)
|
||||
@ -373,7 +373,7 @@ func TestEndpointSliceTrackerDeleteService(t *testing.T) {
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
esTracker := newEndpointSliceTracker()
|
||||
esTracker := NewEndpointSliceTracker()
|
||||
esTracker.Update(tc.updateParam)
|
||||
esTracker.DeleteService(tc.deleteServiceParam.Namespace, tc.deleteServiceParam.Name)
|
||||
if esTracker.Has(tc.updateParam) != tc.expectHas {
|
@ -22,9 +22,14 @@ type StaleInformerCache struct {
|
||||
msg string
|
||||
}
|
||||
|
||||
// NewStaleInformerCache return StaleInformerCache with error mes
|
||||
func NewStaleInformerCache(msg string) *StaleInformerCache {
|
||||
return &StaleInformerCache{msg}
|
||||
}
|
||||
|
||||
func (e *StaleInformerCache) Error() string { return e.msg }
|
||||
|
||||
func isStaleInformerCacheErr(err error) bool {
|
||||
func IsStaleInformerCacheErr(err error) bool {
|
||||
_, ok := err.(*StaleInformerCache)
|
||||
return ok
|
||||
}
|
Loading…
Reference in New Issue
Block a user