dual stack services (#91824)

* api: structure change

* api: defaulting, conversion, and validation

* [FIX] validation: auto remove second ip/family when service changes to SingleStack

* [FIX] api: defaulting, conversion, and validation

* api-server: clusterIPs alloc, printers, storage and strategy

* [FIX] clusterIPs default on read

* alloc: auto remove second ip/family when service changes to SingleStack

* api-server: repair loop handling for clusterIPs

* api-server: force kubernetes default service into single stack

* api-server: tie dualstack feature flag with endpoint feature flag

* controller-manager: feature flag, endpoint, and endpointSlice controllers handling multi family service

* [FIX] controller-manager: feature flag, endpoint, and endpointSlicecontrollers handling multi family service

* kube-proxy: feature-flag, utils, proxier, and meta proxier

* [FIX] kubeproxy: call both proxier at the same time

* kubenet: remove forced pod IP sorting

* kubectl: modify describe to include ClusterIPs, IPFamilies, and IPFamilyPolicy

* e2e: fix tests that depends on IPFamily field AND add dual stack tests

* e2e: fix expected error message for ClusterIP immutability

* add integration tests for dualstack

the third phase of dual stack is a very complex change in the API,
basically it introduces Dual Stack services. Main changes are:

- It pluralizes the Service IPFamily field to IPFamilies,
and removes the singular field.
- It introduces a new field IPFamilyPolicyType that can take
3 values to express the "dual-stack(mad)ness" of the cluster:
SingleStack, PreferDualStack and RequireDualStack
- It pluralizes ClusterIP to ClusterIPs.

The goal is to add coverage to the services API operations,
taking into account the 6 different modes a cluster can have:

- single stack: IP4 or IPv6 (as of today)
- dual stack: IPv4 only, IPv6 only, IPv4 - IPv6, IPv6 - IPv4

* [FIX] add integration tests for dualstack

* generated data

* generated files

Co-authored-by: Antonio Ojea <aojea@redhat.com>
This commit is contained in:
Khaled Henidak (Kal)
2020-10-26 13:15:59 -07:00
committed by GitHub
parent d0e06cf3e0
commit 6675eba3ef
84 changed files with 11170 additions and 3514 deletions

View File

@@ -32,7 +32,6 @@ import (
apiservice "k8s.io/kubernetes/pkg/api/v1/service"
"k8s.io/kubernetes/pkg/proxy/metrics"
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
utilnet "k8s.io/utils/net"
)
// BaseServiceInfo contains base information that defines a service.
@@ -135,8 +134,10 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic
// Kube-apiserver side guarantees SessionAffinityConfig won't be nil when session affinity type is ClientIP
stickyMaxAgeSeconds = int(*service.Spec.SessionAffinityConfig.ClientIP.TimeoutSeconds)
}
clusterIP := utilproxy.GetClusterIPByFamily(sct.ipFamily, service)
info := &BaseServiceInfo{
clusterIP: net.ParseIP(service.Spec.ClusterIP),
clusterIP: net.ParseIP(clusterIP),
port: int(port.Port),
protocol: port.Protocol,
nodePort: int(port.NodePort),
@@ -150,41 +151,38 @@ func (sct *ServiceChangeTracker) newBaseServiceInfo(port *v1.ServicePort, servic
for i, sourceRange := range service.Spec.LoadBalancerSourceRanges {
loadBalancerSourceRanges[i] = strings.TrimSpace(sourceRange)
}
// filter external ips, source ranges and ingress ips
// prior to dual stack services, this was considered an error, but with dual stack
// services, this is actually expected. Hence we downgraded from reporting by events
// to just log lines with high verbosity
if sct.isIPv6Mode == nil {
info.externalIPs = make([]string, len(service.Spec.ExternalIPs))
info.loadBalancerSourceRanges = loadBalancerSourceRanges
copy(info.externalIPs, service.Spec.ExternalIPs)
// Deep-copy in case the service instance changes
info.loadBalancerStatus = *service.Status.LoadBalancer.DeepCopy()
} else {
// Filter out the incorrect IP version case.
// If ExternalIPs, LoadBalancerSourceRanges and LoadBalancerStatus Ingress on service contains incorrect IP versions,
// only filter out the incorrect ones.
var incorrectIPs []string
info.externalIPs, incorrectIPs = utilproxy.FilterIncorrectIPVersion(service.Spec.ExternalIPs, *sct.isIPv6Mode)
if len(incorrectIPs) > 0 {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "externalIPs", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
}
info.loadBalancerSourceRanges, incorrectIPs = utilproxy.FilterIncorrectCIDRVersion(loadBalancerSourceRanges, *sct.isIPv6Mode)
if len(incorrectIPs) > 0 {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "loadBalancerSourceRanges", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
}
// Obtain Load Balancer Ingress IPs
var ips []string
for _, ing := range service.Status.LoadBalancer.Ingress {
ips = append(ips, ing.IP)
}
if len(ips) > 0 {
correctIPs, incorrectIPs := utilproxy.FilterIncorrectIPVersion(ips, *sct.isIPv6Mode)
if len(incorrectIPs) > 0 {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "Load Balancer ingress IPs", strings.Join(incorrectIPs, ","), service.Namespace, service.Name, service.UID)
}
// Create the LoadBalancerStatus with the filtererd IPs
for _, ip := range correctIPs {
info.loadBalancerStatus.Ingress = append(info.loadBalancerStatus.Ingress, v1.LoadBalancerIngress{IP: ip})
var incorrectIPs []string
info.externalIPs, incorrectIPs = utilproxy.FilterIncorrectIPVersion(service.Spec.ExternalIPs, sct.ipFamily)
if len(incorrectIPs) > 0 {
klog.V(4).Infof("service change tracker(%v) ignored the following external IPs(%s) for service %v/%v as they don't match IPFamily", sct.ipFamily, strings.Join(incorrectIPs, ","), service.Namespace, service.Name)
}
}
info.loadBalancerSourceRanges, incorrectIPs = utilproxy.FilterIncorrectCIDRVersion(loadBalancerSourceRanges, sct.ipFamily)
if len(incorrectIPs) > 0 {
klog.V(4).Infof("service change tracker(%v) ignored the following load balancer source ranges(%s) for service %v/%v as they don't match IPFamily", sct.ipFamily, strings.Join(incorrectIPs, ","), service.Namespace, service.Name)
}
// Obtain Load Balancer Ingress IPs
var ips []string
for _, ing := range service.Status.LoadBalancer.Ingress {
ips = append(ips, ing.IP)
}
if len(ips) > 0 {
correctIPs, incorrectIPs := utilproxy.FilterIncorrectIPVersion(ips, sct.ipFamily)
if len(incorrectIPs) > 0 {
klog.V(4).Infof("service change tracker(%v) ignored the following load balancer(%s) ingress ips for service %v/%v as they don't match IPFamily", sct.ipFamily, strings.Join(incorrectIPs, ","), service.Namespace, service.Name)
}
// Create the LoadBalancerStatus with the filtered IPs
for _, ip := range correctIPs {
info.loadBalancerStatus.Ingress = append(info.loadBalancerStatus.Ingress, v1.LoadBalancerIngress{IP: ip})
}
}
@@ -224,18 +222,18 @@ type ServiceChangeTracker struct {
// makeServiceInfo allows proxier to inject customized information when processing service.
makeServiceInfo makeServicePortFunc
processServiceMapChange processServiceMapChangeFunc
// isIPv6Mode indicates if change tracker is under IPv6/IPv4 mode. Nil means not applicable.
isIPv6Mode *bool
recorder record.EventRecorder
ipFamily v1.IPFamily
recorder record.EventRecorder
}
// NewServiceChangeTracker initializes a ServiceChangeTracker
func NewServiceChangeTracker(makeServiceInfo makeServicePortFunc, isIPv6Mode *bool, recorder record.EventRecorder, processServiceMapChange processServiceMapChangeFunc) *ServiceChangeTracker {
func NewServiceChangeTracker(makeServiceInfo makeServicePortFunc, ipFamily v1.IPFamily, recorder record.EventRecorder, processServiceMapChange processServiceMapChangeFunc) *ServiceChangeTracker {
return &ServiceChangeTracker{
items: make(map[types.NamespacedName]*serviceChange),
makeServiceInfo: makeServiceInfo,
isIPv6Mode: isIPv6Mode,
recorder: recorder,
ipFamily: ipFamily,
processServiceMapChange: processServiceMapChange,
}
}
@@ -322,13 +320,9 @@ func (sct *ServiceChangeTracker) serviceToServiceMap(service *v1.Service) Servic
return nil
}
if len(service.Spec.ClusterIP) != 0 {
// Filter out the incorrect IP version case.
// If ClusterIP on service has incorrect IP version, service itself will be ignored.
if sct.isIPv6Mode != nil && utilnet.IsIPv6String(service.Spec.ClusterIP) != *sct.isIPv6Mode {
utilproxy.LogAndEmitIncorrectIPVersionEvent(sct.recorder, "clusterIP", service.Spec.ClusterIP, service.Namespace, service.Name, service.UID)
return nil
}
clusterIP := utilproxy.GetClusterIPByFamily(sct.ipFamily, service)
if clusterIP == "" {
return nil
}
serviceMap := make(ServiceMap)