move repair loop interval to a constant

use Extraconfig to configure the repair interval

and add an integration test for services finalizers, and
possible races with the services repair loop.
This commit is contained in:
Antonio Ojea 2021-06-17 18:17:50 +02:00 committed by Antonio Ojea
parent dcfe8f5d5c
commit 7c12daed0f
4 changed files with 176 additions and 3 deletions

View File

@ -42,7 +42,9 @@ import (
"k8s.io/kubernetes/pkg/util/async"
)
const kubernetesServiceName = "kubernetes"
const (
kubernetesServiceName = "kubernetes"
)
// Controller is the controller manager for the core bootstrap Kubernetes
// controller loops, which manage creating the "kubernetes" service, the
@ -110,11 +112,11 @@ func (c *completedConfig) NewBootstrapController(legacyRESTStorage corerest.Lega
SecondaryServiceClusterIPRegistry: legacyRESTStorage.SecondaryServiceClusterIPAllocator,
SecondaryServiceClusterIPRange: c.ExtraConfig.SecondaryServiceIPRange,
ServiceClusterIPInterval: 3 * time.Minute,
ServiceClusterIPInterval: c.ExtraConfig.RepairServicesInterval,
ServiceNodePortRegistry: legacyRESTStorage.ServiceNodePortAllocator,
ServiceNodePortRange: c.ExtraConfig.ServiceNodePortRange,
ServiceNodePortInterval: 3 * time.Minute,
ServiceNodePortInterval: c.ExtraConfig.RepairServicesInterval,
PublicIP: c.GenericConfig.PublicAddress,

View File

@ -120,6 +120,8 @@ const (
KubeAPIServer = "kube-apiserver"
// KubeAPIServerIdentityLeaseLabelSelector selects kube-apiserver identity leases
KubeAPIServerIdentityLeaseLabelSelector = IdentityLeaseComponentLabelKey + "=" + KubeAPIServer
// repairLoopInterval defines the interval used to run the Services ClusterIP and NodePort repair loops
repairLoopInterval = 3 * time.Minute
)
// ExtraConfig defines extra configuration for the master
@ -199,6 +201,10 @@ type ExtraConfig struct {
IdentityLeaseDurationSeconds int
IdentityLeaseRenewIntervalSeconds int
// RepairServicesInterval interval used by the repair loops for
// the Services NodePort and ClusterIP resources
RepairServicesInterval time.Duration
}
// Config defines configuration for the master
@ -322,6 +328,10 @@ func (c *Config) Complete() CompletedConfig {
cfg.ExtraConfig.EndpointReconcilerConfig.Reconciler = c.createEndpointReconciler()
}
if cfg.ExtraConfig.RepairServicesInterval == 0 {
cfg.ExtraConfig.RepairServicesInterval = repairLoopInterval
}
return CompletedConfig{&cfg}
}

View File

@ -0,0 +1,27 @@
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"testing"
"k8s.io/kubernetes/test/integration/framework"
)
func TestMain(m *testing.M) {
framework.EtcdMain(m.Run)
}

View File

@ -0,0 +1,134 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"context"
"testing"
"time"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
netutils "k8s.io/utils/net"
"k8s.io/kubernetes/test/integration/framework"
)
// TestServicesFinalizersRepairLoop tests that Services participate in the object
// deletion when using finalizers, and that the Services Repair controller doesn't,
// mistakenly, repair the ClusterIP assigned to the Service that is being deleted.
// https://issues.k8s.io/87603
func TestServicesFinalizersRepairLoop(t *testing.T) {
serviceCIDR := "10.0.0.0/16"
clusterIP := "10.0.0.20"
interval := 5 * time.Second
cfg := framework.NewIntegrationTestControlPlaneConfig()
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
if err != nil {
t.Fatalf("bad cidr: %v", err)
}
cfg.ExtraConfig.ServiceIPRange = *cidr
cfg.ExtraConfig.RepairServicesInterval = interval
_, s, closeFn := framework.RunAnAPIServer(cfg)
defer closeFn()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
// verify client is working
if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
_, err = client.CoreV1().Endpoints(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
if err != nil {
t.Logf("error fetching endpoints: %v", err)
return false, nil
}
return true, nil
}); err != nil {
t.Errorf("server without enabled endpoints failed to register: %v", err)
}
// Create a NodePort service with one finalizer
svcNodePort := v1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "svc",
Finalizers: []string{"foo.bar/some-finalizer"},
},
Spec: v1.ServiceSpec{
ClusterIP: clusterIP,
Ports: []v1.ServicePort{{
Port: 8443,
NodePort: 30443,
TargetPort: intstr.FromInt(8443),
Protocol: v1.ProtocolTCP,
}},
Type: v1.ServiceTypeNodePort,
},
}
// Create service
if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), &svcNodePort, metav1.CreateOptions{}); err != nil {
t.Errorf("unexpected error creating service: %v", err)
}
t.Logf("Created service: %s", svcNodePort.Name)
// Check the service has been created correctly
svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svcNodePort.Name, metav1.GetOptions{})
if err != nil || svc.Spec.ClusterIP != clusterIP {
t.Errorf("created service is not correct: %v", err)
}
t.Logf("Service created successfully: %v", svc)
// Delete service
if err := client.CoreV1().Services(metav1.NamespaceDefault).Delete(context.TODO(), svcNodePort.Name, metav1.DeleteOptions{}); err != nil {
t.Errorf("unexpected error deleting service: %v", err)
}
t.Logf("Deleted service: %s", svcNodePort.Name)
// wait for the repair loop to recover the deleted resources
time.Sleep(interval + 1)
// Check that the service was not deleted and the IP is already allocated
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svcNodePort.Name, metav1.GetOptions{})
if err != nil || svc.Spec.ClusterIP != clusterIP {
t.Errorf("created service is not correct: %v", err)
}
t.Logf("Service after Delete: %v", svc)
// Remove the finalizer
if _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(context.TODO(), svcNodePort.Name, types.JSONPatchType, []byte(`[{"op":"remove","path":"/metadata/finalizers"}]`), metav1.PatchOptions{}); err != nil {
t.Errorf("unexpected error removing finalizer: %v", err)
}
t.Logf("Removed service finalizer: %s", svcNodePort.Name)
// Check that the service was deleted
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), svcNodePort.Name, metav1.GetOptions{})
if err == nil {
t.Errorf("service was not delete: %v", err)
}
// Try to create service again
if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), &svcNodePort, metav1.CreateOptions{}); err != nil {
t.Errorf("unexpected error creating service: %v", err)
}
t.Logf("Created service: %s", svcNodePort.Name)
}