mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Allow single-to-dual-stack reconfiguration for ServiceCIDR
This change modifies the validation logic for ServiceCIDR updates (`ValidateServiceCIDRUpdate`) to specifically permit upgrading a single-stack ServiceCIDR (either IPv4 or IPv6) to a dual-stack configuration. This reconfiguration path is considered safe because it only involves adding a new CIDR range without altering the existing primary CIDR. This ensures that existing Service IP allocations are not disrupted. Other modifications, such as: - Downgrading from dual-stack to single-stack - Reordering CIDRs in a dual-stack configuration - Changing the primary CIDR during a single-to-dual-stack reconfiguration remain disallowed by the validation. These operations carry a higher risk of breaking existing Services or cluster networking configurations. Preventing these updates automatically encourages administrators to perform such changes manually after carefully assessing the potential impact on their specific cluster environment. The validation errors and controller logs provide guidance when such disallowed changes are attempted. Change-Id: I41dc09dfddb05f277925da2262f8114d6accbd1d
This commit is contained in:
parent
b15dfce6cb
commit
0266d3bcb3
@ -765,28 +765,31 @@ var ValidateServiceCIDRName = apimachineryvalidation.NameIsDNSSubdomain
|
||||
|
||||
func ValidateServiceCIDR(cidrConfig *networking.ServiceCIDR) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&cidrConfig.ObjectMeta, false, ValidateServiceCIDRName, field.NewPath("metadata"))
|
||||
fieldPath := field.NewPath("spec", "cidrs")
|
||||
allErrs = append(allErrs, validateServiceCIDRSpec(&cidrConfig.Spec, field.NewPath("spec", "cidrs"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
if len(cidrConfig.Spec.CIDRs) == 0 {
|
||||
func validateServiceCIDRSpec(cidrConfigSpec *networking.ServiceCIDRSpec, fieldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
if len(cidrConfigSpec.CIDRs) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fieldPath, "at least one CIDR required"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
if len(cidrConfig.Spec.CIDRs) > 2 {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath, cidrConfig.Spec, "may only hold up to 2 values"))
|
||||
if len(cidrConfigSpec.CIDRs) > 2 {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath, cidrConfigSpec, "may only hold up to 2 values"))
|
||||
return allErrs
|
||||
}
|
||||
// validate cidrs are dual stack, one of each IP family
|
||||
if len(cidrConfig.Spec.CIDRs) == 2 {
|
||||
isDual, err := netutils.IsDualStackCIDRStrings(cidrConfig.Spec.CIDRs)
|
||||
if err != nil || !isDual {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath, cidrConfig.Spec, "may specify no more than one IP for each IP family, i.e 192.168.0.0/24 and 2001:db8::/64"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
for i, cidr := range cidrConfigSpec.CIDRs {
|
||||
allErrs = append(allErrs, validation.IsValidCIDR(fieldPath.Index(i), cidr)...)
|
||||
}
|
||||
|
||||
for i, cidr := range cidrConfig.Spec.CIDRs {
|
||||
allErrs = append(allErrs, validation.IsValidCIDR(fieldPath.Index(i), cidr)...)
|
||||
// validate cidrs are dual stack, one of each IP family
|
||||
if len(cidrConfigSpec.CIDRs) == 2 &&
|
||||
netutils.IPFamilyOfCIDRString(cidrConfigSpec.CIDRs[0]) == netutils.IPFamilyOfCIDRString(cidrConfigSpec.CIDRs[1]) &&
|
||||
netutils.IPFamilyOfCIDRString(cidrConfigSpec.CIDRs[0]) != netutils.IPFamilyUnknown {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath, cidrConfigSpec.CIDRs, "may specify no more than one IP for each IP family, i.e 192.168.0.0/24 and 2001:db8::/64"))
|
||||
}
|
||||
|
||||
return allErrs
|
||||
@ -796,8 +799,28 @@ func ValidateServiceCIDR(cidrConfig *networking.ServiceCIDR) field.ErrorList {
|
||||
func ValidateServiceCIDRUpdate(update, old *networking.ServiceCIDR) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.Spec.CIDRs, old.Spec.CIDRs, field.NewPath("spec").Child("cidrs"))...)
|
||||
switch {
|
||||
// no change in Spec.CIDRs lengths fields must not change
|
||||
case len(old.Spec.CIDRs) == len(update.Spec.CIDRs):
|
||||
for i, ip := range old.Spec.CIDRs {
|
||||
if ip != update.Spec.CIDRs[i] {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("cidrs").Index(i), update.Spec.CIDRs[i], apimachineryvalidation.FieldImmutableErrorMsg))
|
||||
}
|
||||
}
|
||||
// added a new CIDR is allowed to convert to Dual Stack
|
||||
// ref: https://issues.k8s.io/131261
|
||||
case len(old.Spec.CIDRs) == 1 && len(update.Spec.CIDRs) == 2:
|
||||
// existing CIDR can not change
|
||||
if update.Spec.CIDRs[0] != old.Spec.CIDRs[0] {
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("cidrs").Index(0), update.Spec.CIDRs[0], apimachineryvalidation.FieldImmutableErrorMsg))
|
||||
}
|
||||
// validate the new added CIDR
|
||||
allErrs = append(allErrs, validateServiceCIDRSpec(&update.Spec, field.NewPath("spec", "cidrs"))...)
|
||||
|
||||
// no other changes allowed
|
||||
default:
|
||||
allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("cidrs"), update.Spec.CIDRs, apimachineryvalidation.FieldImmutableErrorMsg))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
|
@ -2373,6 +2373,17 @@ func TestValidateServiceCIDR(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"bad-iprange-ipv6-bad-ipv4": {
|
||||
expectedErrors: 2,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"192.168.007.0/24", "MN00:1234::/64"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, testCase := range testCases {
|
||||
@ -2386,9 +2397,27 @@ func TestValidateServiceCIDR(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestValidateServiceCIDRUpdate(t *testing.T) {
|
||||
oldServiceCIDR := &networking.ServiceCIDR{
|
||||
oldServiceCIDRv4 := &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mysvc",
|
||||
Name: "mysvc-v4",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"192.168.0.0/24"},
|
||||
},
|
||||
}
|
||||
oldServiceCIDRv6 := &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mysvc-v6",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"fd00:1234::/64"},
|
||||
},
|
||||
}
|
||||
oldServiceCIDRDual := &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mysvc-dual",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
@ -2396,45 +2425,196 @@ func TestValidateServiceCIDRUpdate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// Define expected immutable field error for convenience
|
||||
cidrsPath := field.NewPath("spec").Child("cidrs")
|
||||
cidr0Path := cidrsPath.Index(0)
|
||||
cidr1Path := cidrsPath.Index(1)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
svc func(svc *networking.ServiceCIDR) *networking.ServiceCIDR
|
||||
expectErr bool
|
||||
name string
|
||||
old *networking.ServiceCIDR
|
||||
new *networking.ServiceCIDR
|
||||
expectedErrs field.ErrorList
|
||||
}{
|
||||
{
|
||||
name: "Successful update, no changes",
|
||||
svc: func(svc *networking.ServiceCIDR) *networking.ServiceCIDR {
|
||||
out := svc.DeepCopy()
|
||||
return out
|
||||
},
|
||||
expectErr: false,
|
||||
name: "Successful update, no changes (dual)",
|
||||
old: oldServiceCIDRDual,
|
||||
new: oldServiceCIDRDual.DeepCopy(),
|
||||
},
|
||||
|
||||
{
|
||||
name: "Failed update, update spec.CIDRs single stack",
|
||||
svc: func(svc *networking.ServiceCIDR) *networking.ServiceCIDR {
|
||||
out := svc.DeepCopy()
|
||||
name: "Successful update, no changes (v4)",
|
||||
old: oldServiceCIDRv4,
|
||||
new: oldServiceCIDRv4.DeepCopy(),
|
||||
},
|
||||
{
|
||||
name: "Successful update, single IPv4 to dual stack upgrade",
|
||||
old: oldServiceCIDRv4,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRv4.DeepCopy()
|
||||
out.Spec.CIDRs = []string{"192.168.0.0/24", "fd00:1234::/64"} // Add IPv6
|
||||
return out
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "Successful update, single IPv6 to dual stack upgrade",
|
||||
old: oldServiceCIDRv6,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRv6.DeepCopy()
|
||||
out.Spec.CIDRs = []string{"fd00:1234::/64", "192.168.0.0/24"} // Add IPv4
|
||||
return out
|
||||
}(),
|
||||
},
|
||||
{
|
||||
name: "Failed update, change CIDRs (dual)",
|
||||
old: oldServiceCIDRDual,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRDual.DeepCopy()
|
||||
out.Spec.CIDRs = []string{"10.0.0.0/16", "fd00:abcd::/64"}
|
||||
return out
|
||||
}(),
|
||||
expectedErrs: field.ErrorList{
|
||||
field.Invalid(cidr0Path, "10.0.0.0/16", apimachineryvalidation.FieldImmutableErrorMsg),
|
||||
field.Invalid(cidr1Path, "fd00:abcd::/64", apimachineryvalidation.FieldImmutableErrorMsg),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Failed update, change CIDRs (single)",
|
||||
old: oldServiceCIDRv4,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRv4.DeepCopy()
|
||||
out.Spec.CIDRs = []string{"10.0.0.0/16"}
|
||||
return out
|
||||
}, expectErr: true,
|
||||
}(),
|
||||
expectedErrs: field.ErrorList{field.Invalid(cidr0Path, "10.0.0.0/16", apimachineryvalidation.FieldImmutableErrorMsg)},
|
||||
},
|
||||
{
|
||||
name: "Failed update, update spec.CIDRs dual stack",
|
||||
svc: func(svc *networking.ServiceCIDR) *networking.ServiceCIDR {
|
||||
out := svc.DeepCopy()
|
||||
out.Spec.CIDRs = []string{"10.0.0.0/24", "fd00:1234::/64"}
|
||||
name: "Failed update, single IPv4 to dual stack upgrade with primary change",
|
||||
old: oldServiceCIDRv4,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRv4.DeepCopy()
|
||||
// Change primary CIDR during upgrade
|
||||
out.Spec.CIDRs = []string{"10.0.0.0/16", "fd00:1234::/64"}
|
||||
return out
|
||||
}, expectErr: true,
|
||||
}(),
|
||||
expectedErrs: field.ErrorList{field.Invalid(cidr0Path, "10.0.0.0/16", apimachineryvalidation.FieldImmutableErrorMsg)},
|
||||
},
|
||||
{
|
||||
name: "Failed update, single IPv6 to dual stack upgrade with primary change",
|
||||
old: oldServiceCIDRv6,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRv6.DeepCopy()
|
||||
// Change primary CIDR during upgrade
|
||||
out.Spec.CIDRs = []string{"fd00:abcd::/64", "192.168.0.0/24"}
|
||||
return out
|
||||
}(),
|
||||
expectedErrs: field.ErrorList{field.Invalid(cidr0Path, "fd00:abcd::/64", apimachineryvalidation.FieldImmutableErrorMsg)},
|
||||
},
|
||||
{
|
||||
name: "Failed update, dual stack downgrade to single",
|
||||
old: oldServiceCIDRDual,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRDual.DeepCopy()
|
||||
out.Spec.CIDRs = []string{"192.168.0.0/24"} // Remove IPv6
|
||||
return out
|
||||
}(),
|
||||
expectedErrs: field.ErrorList{field.Invalid(cidrsPath, []string{"192.168.0.0/24"}, apimachineryvalidation.FieldImmutableErrorMsg)},
|
||||
},
|
||||
{
|
||||
name: "Failed update, dual stack reorder",
|
||||
old: oldServiceCIDRDual,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRDual.DeepCopy()
|
||||
// Swap order
|
||||
out.Spec.CIDRs = []string{"fd00:1234::/64", "192.168.0.0/24"}
|
||||
return out
|
||||
}(),
|
||||
expectedErrs: field.ErrorList{
|
||||
field.Invalid(cidr0Path, "fd00:1234::/64", apimachineryvalidation.FieldImmutableErrorMsg),
|
||||
field.Invalid(cidr1Path, "192.168.0.0/24", apimachineryvalidation.FieldImmutableErrorMsg),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Failed update, add invalid CIDR during upgrade",
|
||||
old: oldServiceCIDRv4,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRv4.DeepCopy()
|
||||
out.Spec.CIDRs = []string{"192.168.0.0/24", "invalid-cidr"}
|
||||
return out
|
||||
}(),
|
||||
expectedErrs: field.ErrorList{field.Invalid(cidrsPath.Index(1), "invalid-cidr", "must be a valid CIDR value, (e.g. 10.9.8.0/24 or 2001:db8::/64)")},
|
||||
},
|
||||
{
|
||||
name: "Failed update, add duplicate family CIDR during upgrade",
|
||||
old: oldServiceCIDRv4,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRv4.DeepCopy()
|
||||
out.Spec.CIDRs = []string{"192.168.0.0/24", "10.0.0.0/16"}
|
||||
return out
|
||||
}(),
|
||||
expectedErrs: field.ErrorList{field.Invalid(cidrsPath, []string{"192.168.0.0/24", "10.0.0.0/16"}, "may specify no more than one IP for each IP family, i.e 192.168.0.0/24 and 2001:db8::/64")},
|
||||
},
|
||||
{
|
||||
name: "Failed update, dual stack remove one cidr",
|
||||
old: oldServiceCIDRDual,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRDual.DeepCopy()
|
||||
out.Spec.CIDRs = out.Spec.CIDRs[0:1]
|
||||
return out
|
||||
}(),
|
||||
expectedErrs: field.ErrorList{
|
||||
field.Invalid(cidrsPath, []string{"192.168.0.0/24"}, apimachineryvalidation.FieldImmutableErrorMsg),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Failed update, dual stack remove all cidrs",
|
||||
old: oldServiceCIDRDual,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRDual.DeepCopy()
|
||||
out.Spec.CIDRs = []string{}
|
||||
return out
|
||||
}(),
|
||||
expectedErrs: field.ErrorList{
|
||||
field.Invalid(cidrsPath, []string{}, apimachineryvalidation.FieldImmutableErrorMsg),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Failed update, single stack remove cidr",
|
||||
old: oldServiceCIDRv4,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRv4.DeepCopy()
|
||||
out.Spec.CIDRs = []string{}
|
||||
return out
|
||||
}(),
|
||||
expectedErrs: field.ErrorList{
|
||||
field.Invalid(cidrsPath, []string{}, apimachineryvalidation.FieldImmutableErrorMsg),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Failed update, add additional cidrs",
|
||||
old: oldServiceCIDRDual,
|
||||
new: func() *networking.ServiceCIDR {
|
||||
out := oldServiceCIDRDual.DeepCopy()
|
||||
out.Spec.CIDRs = append(out.Spec.CIDRs, "172.16.0.0/24")
|
||||
return out
|
||||
}(),
|
||||
expectedErrs: field.ErrorList{
|
||||
field.Invalid(cidrsPath, []string{"192.168.0.0/24", "fd00:1234::/64", "172.16.0.0/24"}, apimachineryvalidation.FieldImmutableErrorMsg),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
err := ValidateServiceCIDRUpdate(testCase.svc(oldServiceCIDR), oldServiceCIDR)
|
||||
if !testCase.expectErr && err != nil {
|
||||
t.Errorf("ValidateServiceCIDRUpdate must be successful for test '%s', got %v", testCase.name, err)
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Ensure ResourceVersion is set for update validation
|
||||
tc.new.ResourceVersion = tc.old.ResourceVersion
|
||||
errs := ValidateServiceCIDRUpdate(tc.new, tc.old)
|
||||
|
||||
if len(errs) != len(tc.expectedErrs) {
|
||||
t.Fatalf("Expected %d errors, got %d errors: %v", len(tc.expectedErrs), len(errs), errs)
|
||||
}
|
||||
if testCase.expectErr && err == nil {
|
||||
t.Errorf("ValidateServiceCIDRUpdate must return error for test: %s, but got nil", testCase.name)
|
||||
for i, expectedErr := range tc.expectedErrs {
|
||||
if errs[i].Error() != expectedErr.Error() {
|
||||
t.Errorf("Expected error %d: %v, got: %v", i, expectedErr, errs[i])
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -91,7 +91,9 @@ type Controller struct {
|
||||
serviceCIDRLister networkingv1listers.ServiceCIDRLister
|
||||
serviceCIDRsSynced cache.InformerSynced
|
||||
|
||||
interval time.Duration
|
||||
interval time.Duration
|
||||
reportedMismatchedCIDRs bool
|
||||
reportedNotReadyCondition bool
|
||||
}
|
||||
|
||||
// Start will not return until the default ServiceCIDR exists or stopCh is closed.
|
||||
@ -138,7 +140,19 @@ func (c *Controller) sync() error {
|
||||
serviceCIDR, err := c.serviceCIDRLister.Get(DefaultServiceCIDRName)
|
||||
// if exists
|
||||
if err == nil {
|
||||
c.syncStatus(serviceCIDR)
|
||||
// single to dual stack upgrade
|
||||
if len(c.cidrs) == 2 && len(serviceCIDR.Spec.CIDRs) == 1 && c.cidrs[0] == serviceCIDR.Spec.CIDRs[0] {
|
||||
klog.Infof("Updating default ServiceCIDR from single-stack (%v) to dual-stack (%v)", serviceCIDR.Spec.CIDRs, c.cidrs)
|
||||
serviceCIDRcopy := serviceCIDR.DeepCopy()
|
||||
serviceCIDRcopy.Spec.CIDRs = c.cidrs
|
||||
_, err := c.client.NetworkingV1().ServiceCIDRs().Update(context.Background(), serviceCIDRcopy, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
klog.Infof("The default ServiceCIDR can not be updated from %s to dual stack %v : %v", c.cidrs[0], c.cidrs, err)
|
||||
c.eventRecorder.Eventf(serviceCIDR, v1.EventTypeWarning, "KubernetesDefaultServiceCIDRError", "The default ServiceCIDR can not be upgraded from %s to dual stack %v : %v", c.cidrs[0], c.cidrs, err)
|
||||
}
|
||||
} else {
|
||||
c.syncStatus(serviceCIDR)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -175,18 +189,28 @@ func (c *Controller) syncStatus(serviceCIDR *networkingapiv1.ServiceCIDR) {
|
||||
|
||||
// This controller will set the Ready condition to true if the Ready condition
|
||||
// does not exist and the CIDR values match this controller CIDR values.
|
||||
sameConfig := reflect.DeepEqual(c.cidrs, serviceCIDR.Spec.CIDRs)
|
||||
for _, condition := range serviceCIDR.Status.Conditions {
|
||||
if condition.Type == networkingapiv1.ServiceCIDRConditionReady {
|
||||
if condition.Status == metav1.ConditionTrue {
|
||||
// ServiceCIDR is Ready and config matches this apiserver
|
||||
// nothing else is required
|
||||
if sameConfig {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if !c.reportedNotReadyCondition {
|
||||
klog.InfoS("default ServiceCIDR condition Ready is not True, please validate your cluster's network configuration for this ServiceCIDR", "status", condition.Status, "reason", condition.Reason, "message", condition.Message)
|
||||
c.eventRecorder.Eventf(serviceCIDR, v1.EventTypeWarning, condition.Reason, condition.Message)
|
||||
c.reportedNotReadyCondition = true
|
||||
}
|
||||
return
|
||||
}
|
||||
klog.Infof("default ServiceCIDR condition Ready is not True: %v", condition.Status)
|
||||
c.eventRecorder.Eventf(serviceCIDR, v1.EventTypeWarning, condition.Reason, condition.Message)
|
||||
return
|
||||
}
|
||||
}
|
||||
// set status to ready if the ServiceCIDR matches this configuration
|
||||
if reflect.DeepEqual(c.cidrs, serviceCIDR.Spec.CIDRs) {
|
||||
// No condition set, set status to ready if the ServiceCIDR matches this configuration
|
||||
// otherwise, warn about it since the network configuration of the cluster is inconsistent
|
||||
if sameConfig {
|
||||
klog.Infof("Setting default ServiceCIDR condition Ready to True")
|
||||
svcApplyStatus := networkingapiv1apply.ServiceCIDRStatus().WithConditions(
|
||||
metav1apply.Condition().
|
||||
@ -199,5 +223,9 @@ func (c *Controller) syncStatus(serviceCIDR *networkingapiv1.ServiceCIDR) {
|
||||
klog.Infof("error updating default ServiceCIDR status: %v", errApply)
|
||||
c.eventRecorder.Eventf(serviceCIDR, v1.EventTypeWarning, "KubernetesDefaultServiceCIDRError", "The default ServiceCIDR Status can not be set to Ready=True")
|
||||
}
|
||||
} else if !c.reportedMismatchedCIDRs {
|
||||
klog.Infof("inconsistent ServiceCIDR status, global configuration: %v local configuration: %v, configure the flags to match current ServiceCIDR or manually delete the default ServiceCIDR", serviceCIDR.Spec.CIDRs, c.cidrs)
|
||||
c.eventRecorder.Eventf(serviceCIDR, v1.EventTypeWarning, "KubernetesDefaultServiceCIDRInconsistent", "The default ServiceCIDR %v does not match the flag configurations %s", serviceCIDR.Spec.CIDRs, c.cidrs)
|
||||
c.reportedMismatchedCIDRs = true
|
||||
}
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
networkingapiv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
@ -35,8 +36,13 @@ const (
|
||||
defaultIPv6CIDR = "2001:db8::/64"
|
||||
)
|
||||
|
||||
func newController(t *testing.T, objects []*networkingapiv1.ServiceCIDR) (*fake.Clientset, *Controller) {
|
||||
client := fake.NewSimpleClientset()
|
||||
func newController(t *testing.T, cidrsFromFlags []string, objects ...*networkingapiv1.ServiceCIDR) (*fake.Clientset, *Controller) {
|
||||
var runtimeObjects []runtime.Object
|
||||
for _, cidr := range objects {
|
||||
runtimeObjects = append(runtimeObjects, cidr)
|
||||
}
|
||||
|
||||
client := fake.NewSimpleClientset(runtimeObjects...)
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
serviceCIDRInformer := informerFactory.Networking().V1().ServiceCIDRs()
|
||||
@ -47,12 +53,11 @@ func newController(t *testing.T, objects []*networkingapiv1.ServiceCIDR) (*fake.
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
c := &Controller{
|
||||
client: client,
|
||||
interval: time.Second,
|
||||
cidrs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
cidrs: cidrsFromFlags,
|
||||
eventRecorder: record.NewFakeRecorder(100),
|
||||
serviceCIDRLister: serviceCIDRInformer.Lister(),
|
||||
serviceCIDRsSynced: func() bool { return true },
|
||||
@ -151,13 +156,195 @@ func TestControllerSync(t *testing.T) {
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
client, controller := newController(t, tc.cidrs)
|
||||
client, controller := newController(t, []string{defaultIPv4CIDR, defaultIPv6CIDR}, tc.cidrs...)
|
||||
controller.sync()
|
||||
expectAction(t, client.Actions(), tc.actions)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestControllerSyncConversions(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
controllerCIDRs []string
|
||||
existingCIDR *networkingapiv1.ServiceCIDR
|
||||
expectedAction [][]string // verb, resource, [subresource]
|
||||
}{
|
||||
{
|
||||
name: "flags match ServiceCIDRs",
|
||||
controllerCIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
existingCIDR: &networkingapiv1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
},
|
||||
Status: networkingapiv1.ServiceCIDRStatus{}, // No conditions
|
||||
},
|
||||
expectedAction: [][]string{{"patch", "servicecidrs", "status"}},
|
||||
},
|
||||
{
|
||||
name: "existing Ready=False condition, cidrs match -> no patch",
|
||||
controllerCIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
existingCIDR: &networkingapiv1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
},
|
||||
Status: networkingapiv1.ServiceCIDRStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: networkingapiv1.ServiceCIDRConditionReady,
|
||||
Status: metav1.ConditionFalse,
|
||||
Reason: "SomeReason",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedAction: [][]string{}, // No patch expected, just logs/events
|
||||
},
|
||||
{
|
||||
name: "existing Ready=True condition -> no patch",
|
||||
controllerCIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
existingCIDR: &networkingapiv1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
},
|
||||
Status: networkingapiv1.ServiceCIDRStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: networkingapiv1.ServiceCIDRConditionReady,
|
||||
Status: metav1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedAction: [][]string{},
|
||||
},
|
||||
{
|
||||
name: "ServiceCIDR being deleted -> no patch",
|
||||
controllerCIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
existingCIDR: &networkingapiv1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
DeletionTimestamp: ptr.To(metav1.Now()),
|
||||
},
|
||||
Spec: networkingapiv1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
},
|
||||
Status: networkingapiv1.ServiceCIDRStatus{},
|
||||
},
|
||||
expectedAction: [][]string{},
|
||||
},
|
||||
{
|
||||
name: "IPv4 to IPv4 IPv6 is ok",
|
||||
controllerCIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
existingCIDR: &networkingapiv1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv4CIDR}, // Existing has both
|
||||
},
|
||||
Status: networkingapiv1.ServiceCIDRStatus{},
|
||||
},
|
||||
expectedAction: [][]string{{"update", "servicecidrs"}},
|
||||
},
|
||||
{
|
||||
name: "IPv4 to IPv6 IPv4 - switching primary IP family leaves in inconsistent state",
|
||||
controllerCIDRs: []string{defaultIPv6CIDR, defaultIPv4CIDR},
|
||||
existingCIDR: &networkingapiv1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv4CIDR}, // Existing has both
|
||||
},
|
||||
Status: networkingapiv1.ServiceCIDRStatus{},
|
||||
},
|
||||
expectedAction: [][]string{},
|
||||
},
|
||||
{
|
||||
name: "IPv6 to IPv6 IPv4",
|
||||
controllerCIDRs: []string{defaultIPv6CIDR, defaultIPv4CIDR},
|
||||
existingCIDR: &networkingapiv1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv6CIDR}, // Existing has both
|
||||
},
|
||||
Status: networkingapiv1.ServiceCIDRStatus{},
|
||||
},
|
||||
expectedAction: [][]string{{"update", "servicecidrs"}},
|
||||
},
|
||||
{
|
||||
name: "IPv6 to IPv4 IPv6 - switching primary IP family leaves in inconsistent state",
|
||||
controllerCIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
existingCIDR: &networkingapiv1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv6CIDR}, // Existing has both
|
||||
},
|
||||
Status: networkingapiv1.ServiceCIDRStatus{},
|
||||
},
|
||||
expectedAction: [][]string{},
|
||||
},
|
||||
{
|
||||
name: "IPv6 IPv4 to IPv4 IPv6 - switching primary IP family leaves in inconsistent state",
|
||||
controllerCIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
existingCIDR: &networkingapiv1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv6CIDR, defaultIPv4CIDR}, // Existing has both
|
||||
},
|
||||
Status: networkingapiv1.ServiceCIDRStatus{},
|
||||
},
|
||||
expectedAction: [][]string{},
|
||||
},
|
||||
{
|
||||
name: "IPv4 IPv6 to IPv4 - needs operator attention for the IPv6 remaining Services",
|
||||
controllerCIDRs: []string{defaultIPv4CIDR},
|
||||
existingCIDR: &networkingapiv1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
},
|
||||
Status: networkingapiv1.ServiceCIDRStatus{},
|
||||
},
|
||||
expectedAction: [][]string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
// Initialize controller and client with the existing ServiceCIDR
|
||||
client, controller := newController(t, tc.controllerCIDRs, tc.existingCIDR)
|
||||
|
||||
// Call the syncStatus method directly
|
||||
err := controller.sync()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// Verify the expected actions
|
||||
expectAction(t, client.Actions(), tc.expectedAction)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func expectAction(t *testing.T, actions []k8stesting.Action, expected [][]string) {
|
||||
t.Helper()
|
||||
if len(actions) != len(expected) {
|
||||
|
@ -106,8 +106,7 @@ func (serviceCIDRStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Obj
|
||||
func (serviceCIDRStrategy) ValidateUpdate(ctx context.Context, new, old runtime.Object) field.ErrorList {
|
||||
newServiceCIDR := new.(*networking.ServiceCIDR)
|
||||
oldServiceCIDR := old.(*networking.ServiceCIDR)
|
||||
errList := validation.ValidateServiceCIDR(newServiceCIDR)
|
||||
errList = append(errList, validation.ValidateServiceCIDRUpdate(newServiceCIDR, oldServiceCIDR)...)
|
||||
errList := validation.ValidateServiceCIDRUpdate(newServiceCIDR, oldServiceCIDR)
|
||||
return errList
|
||||
}
|
||||
|
||||
|
@ -47,15 +47,15 @@ func TestServiceCIDRStrategy(t *testing.T) {
|
||||
|
||||
errors := Strategy.Validate(context.TODO(), obj)
|
||||
if len(errors) != 2 {
|
||||
t.Errorf("Expected 2 validation errors for invalid object, got %d", len(errors))
|
||||
t.Errorf("Expected 2 validation errors for invalid object, got %d : %v", len(errors), errors)
|
||||
}
|
||||
|
||||
oldObj := newServiceCIDR()
|
||||
newObj := oldObj.DeepCopy()
|
||||
newObj.Spec.CIDRs = []string{"bad cidr"}
|
||||
errors = Strategy.ValidateUpdate(context.TODO(), newObj, oldObj)
|
||||
if len(errors) != 2 {
|
||||
t.Errorf("Expected 2 validation errors for invalid update, got %d", len(errors))
|
||||
if len(errors) != 1 {
|
||||
t.Errorf("Expected 1 validation error for invalid update, got %d : %v", len(errors), errors)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1484,7 +1484,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
|
||||
sharedEtcd := framework.SharedEtcd()
|
||||
tCtx := ktesting.Init(t)
|
||||
|
||||
// Create an IPv4 only dual stack control-plane
|
||||
// Create an IPv4 only control-plane
|
||||
apiServerOptions := kubeapiservertesting.NewDefaultTestServerOptions()
|
||||
s := kubeapiservertesting.StartTestServerOrDie(t,
|
||||
apiServerOptions,
|
||||
@ -1532,6 +1532,10 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
// create a copy of the service so we can test creating it again after reconfiguring the control plane
|
||||
svcDual := svc.DeepCopy()
|
||||
svcDual.Name = "svc-dual"
|
||||
|
||||
// create the service
|
||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
@ -1582,9 +1586,25 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
|
||||
if err = validateServiceAndClusterIPFamily(svc, []v1.IPFamily{v1.IPv4Protocol}); err != nil {
|
||||
t.Fatalf("Unexpected error validating the service %s %v", svc.Name, err)
|
||||
}
|
||||
// validate that new services created with prefer dual are now dual stack
|
||||
|
||||
// create the service
|
||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svcDual, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
// validate the service was created correctly
|
||||
svcDual, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svcDual.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error to get the service %s %v", svcDual.Name, err)
|
||||
}
|
||||
// service should be dual stack
|
||||
if err = validateServiceAndClusterIPFamily(svcDual, []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}); err != nil {
|
||||
t.Fatalf("Unexpected error validating the service %s %v", svcDual.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDowngradeServicePreferToDualStack(t *testing.T) {
|
||||
func TestDowngradeServicePreferFromDualStack(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
|
||||
// Create a dual stack control-plane
|
||||
@ -1634,6 +1654,11 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// create a copy of the service so we can test creating it again after reconfiguring the control plane
|
||||
svcSingle := svc.DeepCopy()
|
||||
svcSingle.Name = "svc-single"
|
||||
|
||||
// create the service
|
||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
@ -1684,6 +1709,23 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) {
|
||||
if err = validateServiceAndClusterIPFamily(svc, []v1.IPFamily{v1.IPv4Protocol, v1.IPv6Protocol}); err != nil {
|
||||
t.Fatalf("Unexpected error validating the service %s %v", svc.Name, err)
|
||||
}
|
||||
|
||||
// validate that new services created with prefer dual are now single stack
|
||||
|
||||
// create the service
|
||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svcSingle, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
// validate the service was created correctly
|
||||
svcSingle, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svcSingle.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error to get the service %s %v", svcSingle.Name, err)
|
||||
}
|
||||
// service should be single stack
|
||||
if err = validateServiceAndClusterIPFamily(svcSingle, []v1.IPFamily{v1.IPv4Protocol}); err != nil {
|
||||
t.Fatalf("Unexpected error validating the service %s %v", svcSingle.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
type serviceMergePatch struct {
|
||||
|
@ -19,6 +19,8 @@ package servicecidr
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -29,6 +31,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/controller/servicecidrs"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/defaultservicecidr"
|
||||
@ -293,3 +296,319 @@ func TestMigrateServiceCIDR(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TestServiceCIDRMigrationScenarios tests various migration paths for ServiceCIDRs.
|
||||
func TestServiceCIDRMigrationScenarios(t *testing.T) {
|
||||
ipv4CIDRSmall := "10.0.0.0/29" // 6 IPs
|
||||
ipv4CIDRBig := "10.1.0.0/16"
|
||||
ipv6CIDRSmall := "2001:db8:1::/125" // 6 IPs
|
||||
ipv6CIDRBig := "2001:db8:2::/112"
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
initialCIDRs []string
|
||||
migratedCIDRs []string
|
||||
preMigrationSvcName string
|
||||
postMigrationSvcName string
|
||||
expectedPostMigrationSvcError bool // Changing the primary IP family and retaining the old allocator
|
||||
expectInconsistentState bool // New Service CIDR configured by flags are not applied
|
||||
}{
|
||||
// --- No Change ---
|
||||
{
|
||||
name: "IPv4 -> IPv4 (no change)",
|
||||
initialCIDRs: []string{ipv4CIDRSmall},
|
||||
migratedCIDRs: []string{ipv4CIDRSmall},
|
||||
preMigrationSvcName: "svc-pre-v4-v4",
|
||||
postMigrationSvcName: "svc-post-v4-v4",
|
||||
},
|
||||
{
|
||||
name: "IPv6 -> IPv6 (no change)",
|
||||
initialCIDRs: []string{ipv6CIDRSmall},
|
||||
migratedCIDRs: []string{ipv6CIDRSmall},
|
||||
preMigrationSvcName: "svc-pre-v6-v6",
|
||||
postMigrationSvcName: "svc-post-v6-v6",
|
||||
},
|
||||
{
|
||||
name: "IPv4,IPv6 -> IPv4,IPv6 (no change)",
|
||||
initialCIDRs: []string{ipv4CIDRSmall, ipv6CIDRSmall},
|
||||
migratedCIDRs: []string{ipv4CIDRSmall, ipv6CIDRSmall},
|
||||
preMigrationSvcName: "svc-pre-v4v6-v4v6",
|
||||
postMigrationSvcName: "svc-post-v4v6-v4v6",
|
||||
},
|
||||
{
|
||||
name: "IPv6,IPv4 -> IPv6,IPv4 (no change)",
|
||||
initialCIDRs: []string{ipv6CIDRSmall, ipv4CIDRSmall},
|
||||
migratedCIDRs: []string{ipv6CIDRSmall, ipv4CIDRSmall},
|
||||
preMigrationSvcName: "svc-pre-v6v4-v6v4",
|
||||
postMigrationSvcName: "svc-post-v6v4-v6v4",
|
||||
},
|
||||
// --- Valid Upgrades ---
|
||||
{
|
||||
name: "IPv4 -> IPv4,IPv6 (upgrade)",
|
||||
initialCIDRs: []string{ipv4CIDRSmall},
|
||||
migratedCIDRs: []string{ipv4CIDRSmall, ipv6CIDRBig},
|
||||
preMigrationSvcName: "svc-pre-v4-v4v6",
|
||||
postMigrationSvcName: "svc-post-v4-v4v6",
|
||||
},
|
||||
{
|
||||
name: "IPv6 -> IPv6,IPv4 (upgrade)",
|
||||
initialCIDRs: []string{ipv6CIDRSmall},
|
||||
migratedCIDRs: []string{ipv6CIDRSmall, ipv4CIDRBig},
|
||||
preMigrationSvcName: "svc-pre-v6-v6v4",
|
||||
postMigrationSvcName: "svc-post-v6-v6v4",
|
||||
},
|
||||
// --- Invalid Migrations (Require manual intervention) ---
|
||||
{
|
||||
name: "IPv4,IPv6 -> IPv6,IPv4 (change primary)",
|
||||
initialCIDRs: []string{ipv4CIDRSmall, ipv6CIDRSmall},
|
||||
migratedCIDRs: []string{ipv6CIDRSmall, ipv4CIDRSmall},
|
||||
preMigrationSvcName: "svc-pre-v4v6-v6v4",
|
||||
postMigrationSvcName: "svc-post-v4v6-v6v4",
|
||||
expectedPostMigrationSvcError: true,
|
||||
expectInconsistentState: true,
|
||||
},
|
||||
{
|
||||
name: "IPv6,IPv4 -> IPv4,IPv6 (change primary)",
|
||||
initialCIDRs: []string{ipv6CIDRSmall, ipv4CIDRSmall},
|
||||
migratedCIDRs: []string{ipv4CIDRSmall, ipv6CIDRSmall},
|
||||
preMigrationSvcName: "svc-pre-v6v4-v4v6",
|
||||
postMigrationSvcName: "svc-post-v6v4-v4v6",
|
||||
expectedPostMigrationSvcError: true,
|
||||
expectInconsistentState: true,
|
||||
},
|
||||
{
|
||||
name: "IPv4,IPv6 -> IPv4 (downgrade)",
|
||||
initialCIDRs: []string{ipv4CIDRSmall, ipv6CIDRSmall},
|
||||
migratedCIDRs: []string{ipv4CIDRSmall},
|
||||
preMigrationSvcName: "svc-pre-v4v6-v4",
|
||||
postMigrationSvcName: "svc-post-v4v6-v4",
|
||||
expectInconsistentState: true,
|
||||
},
|
||||
{
|
||||
name: "IPv4,IPv6 -> IPv6 (downgrade)",
|
||||
initialCIDRs: []string{ipv4CIDRSmall, ipv6CIDRSmall},
|
||||
migratedCIDRs: []string{ipv6CIDRSmall},
|
||||
preMigrationSvcName: "svc-pre-v4v6-v6",
|
||||
postMigrationSvcName: "svc-post-v4v6-v6",
|
||||
expectedPostMigrationSvcError: true,
|
||||
expectInconsistentState: true,
|
||||
},
|
||||
{
|
||||
name: "IPv4 -> IPv6 (change family)",
|
||||
initialCIDRs: []string{ipv4CIDRSmall},
|
||||
migratedCIDRs: []string{ipv6CIDRSmall},
|
||||
preMigrationSvcName: "svc-pre-v4-v6",
|
||||
postMigrationSvcName: "svc-post-v4-v6",
|
||||
expectedPostMigrationSvcError: true,
|
||||
expectInconsistentState: true,
|
||||
},
|
||||
{
|
||||
name: "IPv6 -> IPv4 (change family)",
|
||||
initialCIDRs: []string{ipv6CIDRSmall},
|
||||
migratedCIDRs: []string{ipv4CIDRSmall},
|
||||
preMigrationSvcName: "svc-pre-v6-v4",
|
||||
postMigrationSvcName: "svc-post-v6-v4",
|
||||
expectedPostMigrationSvcError: true,
|
||||
expectInconsistentState: true,
|
||||
},
|
||||
{
|
||||
name: "IPv4 -> IPv6,IPv4 (upgrade, change primary)",
|
||||
initialCIDRs: []string{ipv4CIDRSmall},
|
||||
migratedCIDRs: []string{ipv6CIDRBig, ipv4CIDRSmall}, // Change primary during upgrade
|
||||
preMigrationSvcName: "svc-pre-v4-v6v4",
|
||||
postMigrationSvcName: "svc-post-v4-v6v4",
|
||||
expectedPostMigrationSvcError: true,
|
||||
expectInconsistentState: true,
|
||||
},
|
||||
{
|
||||
name: "IPv6 -> IPv4,IPv6 (upgrade, change primary)",
|
||||
initialCIDRs: []string{ipv6CIDRSmall},
|
||||
migratedCIDRs: []string{ipv4CIDRBig, ipv6CIDRSmall}, // Change primary during upgrade
|
||||
preMigrationSvcName: "svc-pre-v6-v4v6",
|
||||
postMigrationSvcName: "svc-post-v6-v4v6",
|
||||
expectedPostMigrationSvcError: true,
|
||||
expectInconsistentState: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tCtx := ktesting.Init(t)
|
||||
etcdOptions := framework.SharedEtcd()
|
||||
apiServerOptions := kubeapiservertesting.NewDefaultTestServerOptions()
|
||||
resyncPeriod := 12 * time.Hour
|
||||
|
||||
// --- Initial Setup ---
|
||||
initialFlags := []string{
|
||||
"--service-cluster-ip-range=" + strings.Join(tc.initialCIDRs, ","),
|
||||
"--advertise-address=" + strings.Split(tc.initialCIDRs[0], "/")[0], // the advertise address MUST match the cluster primary ip family
|
||||
"--disable-admission-plugins=ServiceAccount",
|
||||
// fmt.Sprintf("--feature-gates=%s=true,%s=true", features.MultiCIDRServiceAllocator, features.DisableAllocatorDualWrite),
|
||||
}
|
||||
t.Logf("Starting API server with CIDRs: %v", tc.initialCIDRs)
|
||||
s1 := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions, initialFlags, etcdOptions)
|
||||
client1, err := clientset.NewForConfig(s1.ClientConfig)
|
||||
if err != nil {
|
||||
s1.TearDownFn()
|
||||
t.Fatalf("Failed to create client for initial server: %v", err)
|
||||
}
|
||||
|
||||
ns := framework.CreateNamespaceOrDie(client1, fmt.Sprintf("migrate-%d", i), t)
|
||||
|
||||
informers1 := informers.NewSharedInformerFactory(client1, resyncPeriod)
|
||||
controllerCtx1, cancelController1 := context.WithCancel(tCtx)
|
||||
go servicecidrs.NewController(
|
||||
controllerCtx1,
|
||||
informers1.Networking().V1().ServiceCIDRs(),
|
||||
informers1.Networking().V1().IPAddresses(),
|
||||
client1,
|
||||
).Run(controllerCtx1, 5)
|
||||
informers1.Start(controllerCtx1.Done())
|
||||
informers1.WaitForCacheSync(controllerCtx1.Done())
|
||||
|
||||
// Wait for default ServiceCIDR to be ready
|
||||
if err := waitForServiceCIDRState(tCtx, client1, tc.initialCIDRs, true); err != nil {
|
||||
s1.TearDownFn()
|
||||
cancelController1()
|
||||
t.Fatalf("Initial default ServiceCIDR did not become ready: %v", err)
|
||||
}
|
||||
|
||||
// Create pre-migration service
|
||||
preSvc, err := client1.CoreV1().Services(ns.Name).Create(tCtx, makeService(tc.preMigrationSvcName), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
s1.TearDownFn()
|
||||
cancelController1()
|
||||
t.Fatalf("Failed to create pre-migration service: %v", err)
|
||||
}
|
||||
t.Logf("Pre-migration service %s created with ClusterIPs: %v", preSvc.Name, preSvc.Spec.ClusterIPs)
|
||||
|
||||
// Basic verification of pre-migration service IP
|
||||
if len(preSvc.Spec.ClusterIPs) == 0 {
|
||||
s1.TearDownFn()
|
||||
cancelController1()
|
||||
t.Fatalf("Pre-migration service %s has no ClusterIPs", preSvc.Name)
|
||||
}
|
||||
if !cidrContainsIP(tc.initialCIDRs[0], preSvc.Spec.ClusterIPs[0]) {
|
||||
s1.TearDownFn()
|
||||
cancelController1()
|
||||
t.Fatalf("Pre-migration service %s primary IP %s not in expected range %s", preSvc.Name, preSvc.Spec.ClusterIPs[0], tc.initialCIDRs[0])
|
||||
}
|
||||
|
||||
// --- Migration ---
|
||||
t.Logf("Shutting down initial API server and controller")
|
||||
cancelController1()
|
||||
s1.TearDownFn()
|
||||
|
||||
t.Logf("Starting migrated API server with CIDRs: %v", tc.migratedCIDRs)
|
||||
migratedFlags := []string{
|
||||
"--service-cluster-ip-range=" + strings.Join(tc.migratedCIDRs, ","),
|
||||
"--advertise-address=" + strings.Split(tc.migratedCIDRs[0], "/")[0], // the advertise address MUST match the cluster configured primary ip family
|
||||
"--disable-admission-plugins=ServiceAccount",
|
||||
// fmt.Sprintf("--feature-gates=%s=true,%s=true", features.MultiCIDRServiceAllocator, features.DisableAllocatorDualWrite),
|
||||
}
|
||||
s2 := kubeapiservertesting.StartTestServerOrDie(t, apiServerOptions, migratedFlags, etcdOptions)
|
||||
defer s2.TearDownFn() // Ensure cleanup even on test failure
|
||||
|
||||
client2, err := clientset.NewForConfig(s2.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create client for migrated server: %v", err)
|
||||
}
|
||||
defer framework.DeleteNamespaceOrDie(client2, ns, t)
|
||||
|
||||
informers2 := informers.NewSharedInformerFactory(client2, resyncPeriod)
|
||||
controllerCtx2, cancelController2 := context.WithCancel(tCtx)
|
||||
defer cancelController2() // Ensure controller context is cancelled
|
||||
go servicecidrs.NewController(
|
||||
controllerCtx2,
|
||||
informers2.Networking().V1().ServiceCIDRs(),
|
||||
informers2.Networking().V1().IPAddresses(),
|
||||
client2,
|
||||
).Run(controllerCtx2, 5)
|
||||
informers2.Start(controllerCtx2.Done())
|
||||
informers2.WaitForCacheSync(controllerCtx2.Done())
|
||||
|
||||
// Wait for default ServiceCIDR to reflect migrated state
|
||||
// For inconsistent states, we expect to keep existing CIDRs.
|
||||
expectedCIDRs := tc.migratedCIDRs
|
||||
if tc.expectInconsistentState {
|
||||
expectedCIDRs = tc.initialCIDRs
|
||||
}
|
||||
if err := waitForServiceCIDRState(tCtx, client2, expectedCIDRs, true); err != nil {
|
||||
t.Fatalf("Migrated default ServiceCIDR did not reach expected state : %v", err)
|
||||
}
|
||||
|
||||
// --- Post-Migration Verification ---
|
||||
|
||||
// Verify pre-migration service still exists and retains its IP(s)
|
||||
preSvcMigrated, err := client2.CoreV1().Services(ns.Name).Get(tCtx, tc.preMigrationSvcName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get pre-migration service after migration: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(preSvcMigrated.Spec.ClusterIPs, preSvc.Spec.ClusterIPs) {
|
||||
t.Errorf("Pre-migration service %s ClusterIPs changed after migration. Before: %v, After: %v",
|
||||
preSvcMigrated.Name, preSvc.Spec.ClusterIPs, preSvcMigrated.Spec.ClusterIPs)
|
||||
}
|
||||
// Create post-migration service
|
||||
postSvc, err := client2.CoreV1().Services(ns.Name).Create(tCtx, makeService(tc.postMigrationSvcName), metav1.CreateOptions{})
|
||||
if err != nil && !tc.expectedPostMigrationSvcError {
|
||||
t.Fatalf("Failed to create post-migration service: %v", err)
|
||||
} else if err == nil && tc.expectedPostMigrationSvcError {
|
||||
return
|
||||
}
|
||||
|
||||
t.Logf("Post-migration service %s created with ClusterIPs: %v, Families: %v", postSvc.Name, postSvc.Spec.ClusterIPs, postSvc.Spec.IPFamilies)
|
||||
// Check if IPs are within the migrated CIDR ranges
|
||||
if len(postSvc.Spec.ClusterIPs) > 0 && !cidrContainsIP(expectedCIDRs[0], postSvc.Spec.ClusterIPs[0]) {
|
||||
t.Errorf("Post-migration service %s primary IP %s not in expected range %s", postSvc.Name, postSvc.Spec.ClusterIPs[0], expectedCIDRs[0])
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// waitForServiceCIDRState waits for the named ServiceCIDR to exist, match the expected CIDRs,
|
||||
// and have the specified Ready condition status.
|
||||
func waitForServiceCIDRState(ctx context.Context, client clientset.Interface, expectedCIDRs []string, expectReady bool) error {
|
||||
pollCtx, cancel := context.WithTimeout(ctx, 60*time.Second)
|
||||
defer cancel()
|
||||
|
||||
return wait.PollUntilContextCancel(pollCtx, 500*time.Millisecond, true, func(ctx context.Context) (bool, error) {
|
||||
cidr, err := client.NetworkingV1().ServiceCIDRs().Get(ctx, defaultservicecidr.DefaultServiceCIDRName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return true, fmt.Errorf("default ServiceCIDR must exist")
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check CIDRs match
|
||||
if !reflect.DeepEqual(cidr.Spec.CIDRs, expectedCIDRs) {
|
||||
klog.Infof("Waiting for ServiceCIDR %s CIDRs to match %v, current: %v", defaultservicecidr.DefaultServiceCIDRName, expectedCIDRs, cidr.Spec.CIDRs)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Check Ready condition
|
||||
isReady := false
|
||||
foundReadyCondition := false
|
||||
for _, condition := range cidr.Status.Conditions {
|
||||
if condition.Type == networkingv1.ServiceCIDRConditionReady {
|
||||
foundReadyCondition = true
|
||||
isReady = condition.Status == metav1.ConditionTrue
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !foundReadyCondition && expectReady {
|
||||
klog.Infof("Waiting for ServiceCIDR %s Ready condition to be set...", defaultservicecidr.DefaultServiceCIDRName)
|
||||
return false, nil // Ready condition not found yet
|
||||
}
|
||||
|
||||
if isReady != expectReady {
|
||||
klog.Infof("Waiting for ServiceCIDR %s Ready condition to be %v, current: %v", defaultservicecidr.DefaultServiceCIDRName, expectReady, isReady)
|
||||
return false, nil // Ready condition doesn't match expectation
|
||||
}
|
||||
|
||||
klog.Infof("ServiceCIDR %s reached desired state (Ready: %v, CIDRs: %v)", defaultservicecidr.DefaultServiceCIDRName, expectReady, expectedCIDRs)
|
||||
return true, nil // All conditions met
|
||||
})
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user