mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 07:27:21 +00:00
dual stack services (#91824)
* api: structure change * api: defaulting, conversion, and validation * [FIX] validation: auto remove second ip/family when service changes to SingleStack * [FIX] api: defaulting, conversion, and validation * api-server: clusterIPs alloc, printers, storage and strategy * [FIX] clusterIPs default on read * alloc: auto remove second ip/family when service changes to SingleStack * api-server: repair loop handling for clusterIPs * api-server: force kubernetes default service into single stack * api-server: tie dualstack feature flag with endpoint feature flag * controller-manager: feature flag, endpoint, and endpointSlice controllers handling multi family service * [FIX] controller-manager: feature flag, endpoint, and endpointSlicecontrollers handling multi family service * kube-proxy: feature-flag, utils, proxier, and meta proxier * [FIX] kubeproxy: call both proxier at the same time * kubenet: remove forced pod IP sorting * kubectl: modify describe to include ClusterIPs, IPFamilies, and IPFamilyPolicy * e2e: fix tests that depends on IPFamily field AND add dual stack tests * e2e: fix expected error message for ClusterIP immutability * add integration tests for dualstack the third phase of dual stack is a very complex change in the API, basically it introduces Dual Stack services. Main changes are: - It pluralizes the Service IPFamily field to IPFamilies, and removes the singular field. - It introduces a new field IPFamilyPolicyType that can take 3 values to express the "dual-stack(mad)ness" of the cluster: SingleStack, PreferDualStack and RequireDualStack - It pluralizes ClusterIP to ClusterIPs. The goal is to add coverage to the services API operations, taking into account the 6 different modes a cluster can have: - single stack: IP4 or IPv6 (as of today) - dual stack: IPv4 only, IPv6 only, IPv4 - IPv6, IPv6 - IPv4 * [FIX] add integration tests for dualstack * generated data * generated files Co-authored-by: Antonio Ojea <aojea@redhat.com>
This commit is contained in:
committed by
GitHub
parent
d0e06cf3e0
commit
6675eba3ef
@@ -27,6 +27,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
@@ -56,12 +57,62 @@ type endpointMeta struct {
|
||||
// slices for the given service. It creates, updates, or deletes endpoint slices
|
||||
// to ensure the desired set of pods are represented by endpoint slices.
|
||||
func (r *reconciler) reconcile(service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time) error {
|
||||
addressType := discovery.AddressTypeIPv4
|
||||
slicesToDelete := []*discovery.EndpointSlice{} // slices that are no longer matching any address the service has
|
||||
errs := []error{} // all errors generated in the process of reconciling
|
||||
slicesByAddressType := make(map[discovery.AddressType][]*discovery.EndpointSlice) // slices by address type
|
||||
|
||||
if endpointutil.IsIPv6Service(service) {
|
||||
addressType = discovery.AddressTypeIPv6
|
||||
// addresses that this service supports [o(1) find]
|
||||
serviceSupportedAddressesTypes := getAddressTypesForService(service)
|
||||
|
||||
// loop through slices identifying their address type.
|
||||
// slices that no longer match address type supported by services
|
||||
// go to delete, other slices goes to the reconciler machinery
|
||||
// for further adjustment
|
||||
for _, existingSlice := range existingSlices {
|
||||
// service no longer supports that address type, add it to deleted slices
|
||||
if _, ok := serviceSupportedAddressesTypes[existingSlice.AddressType]; !ok {
|
||||
slicesToDelete = append(slicesToDelete, existingSlice)
|
||||
continue
|
||||
}
|
||||
|
||||
// add list if it is not on our map
|
||||
if _, ok := slicesByAddressType[existingSlice.AddressType]; !ok {
|
||||
slicesByAddressType[existingSlice.AddressType] = make([]*discovery.EndpointSlice, 0, 1)
|
||||
}
|
||||
|
||||
slicesByAddressType[existingSlice.AddressType] = append(slicesByAddressType[existingSlice.AddressType], existingSlice)
|
||||
}
|
||||
|
||||
// reconcile for existing.
|
||||
for addressType := range serviceSupportedAddressesTypes {
|
||||
existingSlices := slicesByAddressType[addressType]
|
||||
err := r.reconcileByAddressType(service, pods, existingSlices, triggerTime, addressType)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
// delete those which are of addressType that is no longer supported
|
||||
// by the service
|
||||
for _, sliceToDelete := range slicesToDelete {
|
||||
err := r.client.DiscoveryV1beta1().EndpointSlices(service.Namespace).Delete(context.TODO(), sliceToDelete.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("Error deleting %s EndpointSlice for Service %s/%s: %v", sliceToDelete.Name, service.Namespace, service.Name, err))
|
||||
} else {
|
||||
r.endpointSliceTracker.Delete(sliceToDelete)
|
||||
metrics.EndpointSliceChanges.WithLabelValues("delete").Inc()
|
||||
}
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// reconcileByAddressType takes a set of pods currently matching a service selector and
|
||||
// compares them with the endpoints already present in any existing endpoint
|
||||
// slices (by address type) for the given service. It creates, updates, or deletes endpoint slices
|
||||
// to ensure the desired set of pods are represented by endpoint slices.
|
||||
func (r *reconciler) reconcileByAddressType(service *corev1.Service, pods []*corev1.Pod, existingSlices []*discovery.EndpointSlice, triggerTime time.Time, addressType discovery.AddressType) error {
|
||||
|
||||
slicesToCreate := []*discovery.EndpointSlice{}
|
||||
slicesToUpdate := []*discovery.EndpointSlice{}
|
||||
slicesToDelete := []*discovery.EndpointSlice{}
|
||||
@@ -70,7 +121,7 @@ func (r *reconciler) reconcile(service *corev1.Service, pods []*corev1.Pod, exis
|
||||
existingSlicesByPortMap := map[endpointutil.PortMapKey][]*discovery.EndpointSlice{}
|
||||
numExistingEndpoints := 0
|
||||
for _, existingSlice := range existingSlices {
|
||||
if existingSlice.AddressType == addressType && ownedBy(existingSlice, service) {
|
||||
if ownedBy(existingSlice, service) {
|
||||
epHash := endpointutil.NewPortMapKey(existingSlice.Ports)
|
||||
existingSlicesByPortMap[epHash] = append(existingSlicesByPortMap[epHash], existingSlice)
|
||||
numExistingEndpoints += len(existingSlice.Endpoints)
|
||||
@@ -106,7 +157,7 @@ func (r *reconciler) reconcile(service *corev1.Service, pods []*corev1.Pod, exis
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
endpoint := podToEndpoint(pod, node, service)
|
||||
endpoint := podToEndpoint(pod, node, service, addressType)
|
||||
if len(endpoint.Addresses) > 0 {
|
||||
desiredEndpointsByPortMap[epHash].Insert(&endpoint)
|
||||
numDesiredEndpoints++
|
||||
|
||||
Reference in New Issue
Block a user