mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-08 20:50:24 +00:00
Merge pull request #110362 from wojtek-t/fix_leaking_goroutines_5
Fix leaking goroutines in multiple integration tests
This commit is contained in:
@@ -31,10 +31,8 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
certificatesinformers "k8s.io/client-go/informers/certificates/v1"
|
certificatesinformers "k8s.io/client-go/informers/certificates/v1"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
||||||
certificateslisters "k8s.io/client-go/listers/certificates/v1"
|
certificateslisters "k8s.io/client-go/listers/certificates/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
@@ -60,11 +58,6 @@ func NewCertificateController(
|
|||||||
csrInformer certificatesinformers.CertificateSigningRequestInformer,
|
csrInformer certificatesinformers.CertificateSigningRequestInformer,
|
||||||
handler func(context.Context, *certificates.CertificateSigningRequest) error,
|
handler func(context.Context, *certificates.CertificateSigningRequest) error,
|
||||||
) *CertificateController {
|
) *CertificateController {
|
||||||
// Send events to the apiserver
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
|
||||||
|
|
||||||
cc := &CertificateController{
|
cc := &CertificateController{
|
||||||
name: name,
|
name: name,
|
||||||
kubeClient: kubeClient,
|
kubeClient: kubeClient,
|
||||||
|
@@ -358,7 +358,18 @@ func verifyGroupKind(controllerRef *metav1.OwnerReference, expectedKind string,
|
|||||||
|
|
||||||
func (dc *DisruptionController) Run(ctx context.Context) {
|
func (dc *DisruptionController) Run(ctx context.Context) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
// Start events processing pipeline.
|
||||||
|
if dc.kubeClient != nil {
|
||||||
|
klog.Infof("Sending events to api server.")
|
||||||
|
dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.kubeClient.CoreV1().Events("")})
|
||||||
|
} else {
|
||||||
|
klog.Infof("No api server defined - no events will be sent to API server.")
|
||||||
|
}
|
||||||
|
defer dc.broadcaster.Shutdown()
|
||||||
|
|
||||||
defer dc.queue.ShutDown()
|
defer dc.queue.ShutDown()
|
||||||
|
defer dc.recheckQueue.ShutDown()
|
||||||
|
|
||||||
klog.Infof("Starting disruption controller")
|
klog.Infof("Starting disruption controller")
|
||||||
defer klog.Infof("Shutting down disruption controller")
|
defer klog.Infof("Shutting down disruption controller")
|
||||||
@@ -367,12 +378,6 @@ func (dc *DisruptionController) Run(ctx context.Context) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if dc.kubeClient != nil {
|
|
||||||
klog.Infof("Sending events to api server.")
|
|
||||||
dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.kubeClient.CoreV1().Events("")})
|
|
||||||
} else {
|
|
||||||
klog.Infof("No api server defined - no events will be sent to API server.")
|
|
||||||
}
|
|
||||||
go wait.UntilWithContext(ctx, dc.worker, time.Second)
|
go wait.UntilWithContext(ctx, dc.worker, time.Second)
|
||||||
go wait.Until(dc.recheckWorker, time.Second, ctx.Done())
|
go wait.Until(dc.recheckWorker, time.Second, ctx.Done())
|
||||||
|
|
||||||
|
@@ -300,6 +300,7 @@ type Controller struct {
|
|||||||
|
|
||||||
getPodsAssignedToNode func(nodeName string) ([]*v1.Pod, error)
|
getPodsAssignedToNode func(nodeName string) ([]*v1.Pod, error)
|
||||||
|
|
||||||
|
broadcaster record.EventBroadcaster
|
||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
|
|
||||||
// Value controlling Controller monitoring period, i.e. how often does Controller
|
// Value controlling Controller monitoring period, i.e. how often does Controller
|
||||||
@@ -372,13 +373,6 @@ func NewNodeLifecycleController(
|
|||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "node-controller"})
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "node-controller"})
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
|
||||||
|
|
||||||
klog.Infof("Sending events to api server.")
|
|
||||||
eventBroadcaster.StartRecordingToSink(
|
|
||||||
&v1core.EventSinkImpl{
|
|
||||||
Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events(""),
|
|
||||||
})
|
|
||||||
|
|
||||||
if kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
if kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||||
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("node_lifecycle_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("node_lifecycle_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||||
@@ -390,6 +384,7 @@ func NewNodeLifecycleController(
|
|||||||
knownNodeSet: make(map[string]*v1.Node),
|
knownNodeSet: make(map[string]*v1.Node),
|
||||||
nodeHealthMap: newNodeHealthMap(),
|
nodeHealthMap: newNodeHealthMap(),
|
||||||
nodeEvictionMap: newNodeEvictionMap(),
|
nodeEvictionMap: newNodeEvictionMap(),
|
||||||
|
broadcaster: eventBroadcaster,
|
||||||
recorder: recorder,
|
recorder: recorder,
|
||||||
nodeMonitorPeriod: nodeMonitorPeriod,
|
nodeMonitorPeriod: nodeMonitorPeriod,
|
||||||
nodeStartupGracePeriod: nodeStartupGracePeriod,
|
nodeStartupGracePeriod: nodeStartupGracePeriod,
|
||||||
@@ -536,6 +531,19 @@ func NewNodeLifecycleController(
|
|||||||
func (nc *Controller) Run(ctx context.Context) {
|
func (nc *Controller) Run(ctx context.Context) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
// Start events processing pipeline.
|
||||||
|
nc.broadcaster.StartStructuredLogging(0)
|
||||||
|
klog.Infof("Sending events to api server.")
|
||||||
|
nc.broadcaster.StartRecordingToSink(
|
||||||
|
&v1core.EventSinkImpl{
|
||||||
|
Interface: v1core.New(nc.kubeClient.CoreV1().RESTClient()).Events(""),
|
||||||
|
})
|
||||||
|
defer nc.broadcaster.Shutdown()
|
||||||
|
|
||||||
|
// Close node update queue to cleanup go routine.
|
||||||
|
defer nc.nodeUpdateQueue.ShutDown()
|
||||||
|
defer nc.podUpdateQueue.ShutDown()
|
||||||
|
|
||||||
klog.Infof("Starting node controller")
|
klog.Infof("Starting node controller")
|
||||||
defer klog.Infof("Shutting down node controller")
|
defer klog.Infof("Shutting down node controller")
|
||||||
|
|
||||||
@@ -547,10 +555,6 @@ func (nc *Controller) Run(ctx context.Context) {
|
|||||||
go nc.taintManager.Run(ctx)
|
go nc.taintManager.Run(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close node update queue to cleanup go routine.
|
|
||||||
defer nc.nodeUpdateQueue.ShutDown()
|
|
||||||
defer nc.podUpdateQueue.ShutDown()
|
|
||||||
|
|
||||||
// Start workers to reconcile labels and/or update NoSchedule taint for nodes.
|
// Start workers to reconcile labels and/or update NoSchedule taint for nodes.
|
||||||
for i := 0; i < scheduler.UpdateWorkerSize; i++ {
|
for i := 0; i < scheduler.UpdateWorkerSize; i++ {
|
||||||
// Thanks to "workqueue", each worker just need to get item from queue, because
|
// Thanks to "workqueue", each worker just need to get item from queue, because
|
||||||
|
@@ -82,6 +82,7 @@ type GetPodsByNodeNameFunc func(nodeName string) ([]*v1.Pod, error)
|
|||||||
// from Nodes tainted with NoExecute Taints.
|
// from Nodes tainted with NoExecute Taints.
|
||||||
type NoExecuteTaintManager struct {
|
type NoExecuteTaintManager struct {
|
||||||
client clientset.Interface
|
client clientset.Interface
|
||||||
|
broadcaster record.EventBroadcaster
|
||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
getPod GetPodFunc
|
getPod GetPodFunc
|
||||||
getNode GetNodeFunc
|
getNode GetNodeFunc
|
||||||
@@ -158,16 +159,10 @@ func getMinTolerationTime(tolerations []v1.Toleration) time.Duration {
|
|||||||
func NewNoExecuteTaintManager(ctx context.Context, c clientset.Interface, getPod GetPodFunc, getNode GetNodeFunc, getPodsAssignedToNode GetPodsByNodeNameFunc) *NoExecuteTaintManager {
|
func NewNoExecuteTaintManager(ctx context.Context, c clientset.Interface, getPod GetPodFunc, getNode GetNodeFunc, getPodsAssignedToNode GetPodsByNodeNameFunc) *NoExecuteTaintManager {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "taint-controller"})
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "taint-controller"})
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
|
||||||
if c != nil {
|
|
||||||
klog.InfoS("Sending events to api server")
|
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.CoreV1().Events("")})
|
|
||||||
} else {
|
|
||||||
klog.Fatalf("kubeClient is nil when starting NodeController")
|
|
||||||
}
|
|
||||||
|
|
||||||
tm := &NoExecuteTaintManager{
|
tm := &NoExecuteTaintManager{
|
||||||
client: c,
|
client: c,
|
||||||
|
broadcaster: eventBroadcaster,
|
||||||
recorder: recorder,
|
recorder: recorder,
|
||||||
getPod: getPod,
|
getPod: getPod,
|
||||||
getNode: getNode,
|
getNode: getNode,
|
||||||
@@ -184,8 +179,23 @@ func NewNoExecuteTaintManager(ctx context.Context, c clientset.Interface, getPod
|
|||||||
|
|
||||||
// Run starts NoExecuteTaintManager which will run in loop until `stopCh` is closed.
|
// Run starts NoExecuteTaintManager which will run in loop until `stopCh` is closed.
|
||||||
func (tc *NoExecuteTaintManager) Run(ctx context.Context) {
|
func (tc *NoExecuteTaintManager) Run(ctx context.Context) {
|
||||||
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
klog.InfoS("Starting NoExecuteTaintManager")
|
klog.InfoS("Starting NoExecuteTaintManager")
|
||||||
|
|
||||||
|
// Start events processing pipeline.
|
||||||
|
tc.broadcaster.StartStructuredLogging(0)
|
||||||
|
if tc.client != nil {
|
||||||
|
klog.InfoS("Sending events to api server")
|
||||||
|
tc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: tc.client.CoreV1().Events("")})
|
||||||
|
} else {
|
||||||
|
klog.Fatalf("kubeClient is nil when starting NodeController")
|
||||||
|
}
|
||||||
|
defer tc.broadcaster.Shutdown()
|
||||||
|
|
||||||
|
defer tc.nodeUpdateQueue.ShutDown()
|
||||||
|
defer tc.podUpdateQueue.ShutDown()
|
||||||
|
|
||||||
for i := 0; i < UpdateWorkerSize; i++ {
|
for i := 0; i < UpdateWorkerSize; i++ {
|
||||||
tc.nodeUpdateChannels = append(tc.nodeUpdateChannels, make(chan nodeUpdateItem, NodeUpdateChannelSize))
|
tc.nodeUpdateChannels = append(tc.nodeUpdateChannels, make(chan nodeUpdateItem, NodeUpdateChannelSize))
|
||||||
tc.podUpdateChannels = append(tc.podUpdateChannels, make(chan podUpdateItem, podUpdateChannelSize))
|
tc.podUpdateChannels = append(tc.podUpdateChannels, make(chan podUpdateItem, podUpdateChannelSize))
|
||||||
|
@@ -58,7 +58,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||||
"k8s.io/apimachinery/pkg/util/runtime"
|
"k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
|
||||||
"k8s.io/apiserver/pkg/endpoints/discovery"
|
"k8s.io/apiserver/pkg/endpoints/discovery"
|
||||||
apiserverfeatures "k8s.io/apiserver/pkg/features"
|
apiserverfeatures "k8s.io/apiserver/pkg/features"
|
||||||
"k8s.io/apiserver/pkg/registry/generic"
|
"k8s.io/apiserver/pkg/registry/generic"
|
||||||
@@ -477,7 +476,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget)
|
|||||||
time.Duration(c.ExtraConfig.IdentityLeaseRenewIntervalSeconds)*time.Second,
|
time.Duration(c.ExtraConfig.IdentityLeaseRenewIntervalSeconds)*time.Second,
|
||||||
metav1.NamespaceSystem,
|
metav1.NamespaceSystem,
|
||||||
labelAPIServerHeartbeat)
|
labelAPIServerHeartbeat)
|
||||||
go controller.Run(wait.NeverStop)
|
go controller.Run(hookContext.StopCh)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
m.GenericAPIServer.AddPostStartHookOrDie("start-kube-apiserver-identity-lease-garbage-collector", func(hookContext genericapiserver.PostStartHookContext) error {
|
m.GenericAPIServer.AddPostStartHookOrDie("start-kube-apiserver-identity-lease-garbage-collector", func(hookContext genericapiserver.PostStartHookContext) error {
|
||||||
@@ -490,7 +489,7 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget)
|
|||||||
time.Duration(c.ExtraConfig.IdentityLeaseDurationSeconds)*time.Second,
|
time.Duration(c.ExtraConfig.IdentityLeaseDurationSeconds)*time.Second,
|
||||||
metav1.NamespaceSystem,
|
metav1.NamespaceSystem,
|
||||||
KubeAPIServerIdentityLeaseLabelSelector,
|
KubeAPIServerIdentityLeaseLabelSelector,
|
||||||
).Run(wait.NeverStop)
|
).Run(hookContext.StopCh)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@@ -51,12 +51,12 @@ import (
|
|||||||
func TestCSRDuration(t *testing.T) {
|
func TestCSRDuration(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
|
|
||||||
t.Cleanup(cancel)
|
|
||||||
|
|
||||||
s := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
s := kubeapiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||||
t.Cleanup(s.TearDownFn)
|
t.Cleanup(s.TearDownFn)
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
|
||||||
|
t.Cleanup(cancel)
|
||||||
|
|
||||||
// assert that the metrics we collect during the test run match expectations
|
// assert that the metrics we collect during the test run match expectations
|
||||||
// we have 7 valid test cases below that request a duration of which 6 should have their duration honored
|
// we have 7 valid test cases below that request a duration of which 6 should have their duration honored
|
||||||
wantMetricStrings := []string{
|
wantMetricStrings := []string{
|
||||||
|
@@ -101,7 +101,9 @@ func setup(t *testing.T) (*kubeapiservertesting.TestServer, *disruption.Disrupti
|
|||||||
func TestPDBWithScaleSubresource(t *testing.T) {
|
func TestPDBWithScaleSubresource(t *testing.T) {
|
||||||
s, pdbc, informers, clientSet, apiExtensionClient, dynamicClient := setup(t)
|
s, pdbc, informers, clientSet, apiExtensionClient, dynamicClient := setup(t)
|
||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
ctx := context.TODO()
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
nsName := "pdb-scale-subresource"
|
nsName := "pdb-scale-subresource"
|
||||||
createNs(ctx, t, nsName, clientSet)
|
createNs(ctx, t, nsName, clientSet)
|
||||||
|
|
||||||
@@ -187,16 +189,14 @@ func TestPDBWithScaleSubresource(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestEmptySelector(t *testing.T) {
|
func TestEmptySelector(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
testcases := []struct {
|
testcases := []struct {
|
||||||
name string
|
name string
|
||||||
createPDBFunc func(clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error
|
createPDBFunc func(ctx context.Context, clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error
|
||||||
expectedCurrentHealthy int32
|
expectedCurrentHealthy int32
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "v1beta1 should not target any pods",
|
name: "v1beta1 should not target any pods",
|
||||||
createPDBFunc: func(clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error {
|
createPDBFunc: func(ctx context.Context, clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error {
|
||||||
pdb := &v1beta1.PodDisruptionBudget{
|
pdb := &v1beta1.PodDisruptionBudget{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -213,7 +213,7 @@ func TestEmptySelector(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "v1 should target all pods",
|
name: "v1 should target all pods",
|
||||||
createPDBFunc: func(clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error {
|
createPDBFunc: func(ctx context.Context, clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error {
|
||||||
pdb := &policyv1.PodDisruptionBudget{
|
pdb := &policyv1.PodDisruptionBudget{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -235,6 +235,9 @@ func TestEmptySelector(t *testing.T) {
|
|||||||
s, pdbc, informers, clientSet, _, _ := setup(t)
|
s, pdbc, informers, clientSet, _, _ := setup(t)
|
||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
nsName := fmt.Sprintf("pdb-empty-selector-%d", i)
|
nsName := fmt.Sprintf("pdb-empty-selector-%d", i)
|
||||||
createNs(ctx, t, nsName, clientSet)
|
createNs(ctx, t, nsName, clientSet)
|
||||||
|
|
||||||
@@ -252,7 +255,7 @@ func TestEmptySelector(t *testing.T) {
|
|||||||
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4, v1.PodRunning)
|
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4, v1.PodRunning)
|
||||||
|
|
||||||
pdbName := "test-pdb"
|
pdbName := "test-pdb"
|
||||||
if err := tc.createPDBFunc(clientSet, pdbName, nsName, minAvailable); err != nil {
|
if err := tc.createPDBFunc(ctx, clientSet, pdbName, nsName, minAvailable); err != nil {
|
||||||
t.Errorf("Error creating PodDisruptionBudget: %v", err)
|
t.Errorf("Error creating PodDisruptionBudget: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -271,16 +274,14 @@ func TestEmptySelector(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSelectorsForPodsWithoutLabels(t *testing.T) {
|
func TestSelectorsForPodsWithoutLabels(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
testcases := []struct {
|
testcases := []struct {
|
||||||
name string
|
name string
|
||||||
createPDBFunc func(clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error
|
createPDBFunc func(ctx context.Context, clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error
|
||||||
expectedCurrentHealthy int32
|
expectedCurrentHealthy int32
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "pods with no labels can be targeted by v1 PDBs with empty selector",
|
name: "pods with no labels can be targeted by v1 PDBs with empty selector",
|
||||||
createPDBFunc: func(clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error {
|
createPDBFunc: func(ctx context.Context, clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error {
|
||||||
pdb := &policyv1.PodDisruptionBudget{
|
pdb := &policyv1.PodDisruptionBudget{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -297,7 +298,7 @@ func TestSelectorsForPodsWithoutLabels(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "pods with no labels can be targeted by v1 PDBs with DoesNotExist selector",
|
name: "pods with no labels can be targeted by v1 PDBs with DoesNotExist selector",
|
||||||
createPDBFunc: func(clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error {
|
createPDBFunc: func(ctx context.Context, clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error {
|
||||||
pdb := &policyv1.PodDisruptionBudget{
|
pdb := &policyv1.PodDisruptionBudget{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -321,7 +322,7 @@ func TestSelectorsForPodsWithoutLabels(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "pods with no labels can be targeted by v1beta1 PDBs with DoesNotExist selector",
|
name: "pods with no labels can be targeted by v1beta1 PDBs with DoesNotExist selector",
|
||||||
createPDBFunc: func(clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error {
|
createPDBFunc: func(ctx context.Context, clientSet clientset.Interface, name, nsName string, minAvailable intstr.IntOrString) error {
|
||||||
pdb := &v1beta1.PodDisruptionBudget{
|
pdb := &v1beta1.PodDisruptionBudget{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
Name: name,
|
Name: name,
|
||||||
@@ -350,6 +351,9 @@ func TestSelectorsForPodsWithoutLabels(t *testing.T) {
|
|||||||
s, pdbc, informers, clientSet, _, _ := setup(t)
|
s, pdbc, informers, clientSet, _, _ := setup(t)
|
||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
nsName := fmt.Sprintf("pdb-selectors-%d", i)
|
nsName := fmt.Sprintf("pdb-selectors-%d", i)
|
||||||
createNs(ctx, t, nsName, clientSet)
|
createNs(ctx, t, nsName, clientSet)
|
||||||
|
|
||||||
@@ -360,7 +364,7 @@ func TestSelectorsForPodsWithoutLabels(t *testing.T) {
|
|||||||
|
|
||||||
// Create the PDB first and wait for it to settle.
|
// Create the PDB first and wait for it to settle.
|
||||||
pdbName := "test-pdb"
|
pdbName := "test-pdb"
|
||||||
if err := tc.createPDBFunc(clientSet, pdbName, nsName, minAvailable); err != nil {
|
if err := tc.createPDBFunc(ctx, clientSet, pdbName, nsName, minAvailable); err != nil {
|
||||||
t.Errorf("Error creating PodDisruptionBudget: %v", err)
|
t.Errorf("Error creating PodDisruptionBudget: %v", err)
|
||||||
}
|
}
|
||||||
waitPDBStable(ctx, t, clientSet, 0, nsName, pdbName)
|
waitPDBStable(ctx, t, clientSet, 0, nsName, pdbName)
|
||||||
@@ -498,9 +502,15 @@ func waitToObservePods(t *testing.T, podInformer cache.SharedIndexInformer, podN
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestPatchCompatibility(t *testing.T) {
|
func TestPatchCompatibility(t *testing.T) {
|
||||||
s, _, _, clientSet, _, _ := setup(t)
|
s, pdbc, _, clientSet, _, _ := setup(t)
|
||||||
defer s.TearDownFn()
|
defer s.TearDownFn()
|
||||||
|
|
||||||
|
// Even though pdbc isn't used in this test, its creation is already
|
||||||
|
// spawning some goroutines. So we need to run it to ensure they won't leak.
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
cancel()
|
||||||
|
pdbc.Run(ctx)
|
||||||
|
|
||||||
testcases := []struct {
|
testcases := []struct {
|
||||||
name string
|
name string
|
||||||
version string
|
version string
|
||||||
@@ -634,5 +644,4 @@ func TestPatchCompatibility(t *testing.T) {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@@ -58,13 +58,17 @@ func TestEventCompatibility(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
|
defer close(stopCh)
|
||||||
oldBroadcaster := record.NewBroadcaster()
|
oldBroadcaster := record.NewBroadcaster()
|
||||||
|
defer oldBroadcaster.Shutdown()
|
||||||
oldRecorder := oldBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "integration"})
|
oldRecorder := oldBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "integration"})
|
||||||
oldBroadcaster.StartRecordingToSink(&typedv1.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
oldBroadcaster.StartRecordingToSink(&typedv1.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||||
oldRecorder.Eventf(regarding, v1.EventTypeNormal, "started", "note")
|
oldRecorder.Eventf(regarding, v1.EventTypeNormal, "started", "note")
|
||||||
|
|
||||||
newBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
|
newBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
|
||||||
|
defer newBroadcaster.Shutdown()
|
||||||
newRecorder := newBroadcaster.NewRecorder(scheme.Scheme, "k8s.io/kube-scheduler")
|
newRecorder := newBroadcaster.NewRecorder(scheme.Scheme, "k8s.io/kube-scheduler")
|
||||||
newBroadcaster.StartRecordingToSink(stopCh)
|
newBroadcaster.StartRecordingToSink(stopCh)
|
||||||
newRecorder.Eventf(regarding, related, v1.EventTypeNormal, "memoryPressure", "killed", "memory pressure")
|
newRecorder.Eventf(regarding, related, v1.EventTypeNormal, "memoryPressure", "killed", "memory pressure")
|
||||||
|
@@ -26,9 +26,8 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||||
restclient "k8s.io/client-go/rest"
|
"k8s.io/kubernetes/pkg/controlplane"
|
||||||
netutils "k8s.io/utils/net"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
)
|
)
|
||||||
@@ -38,26 +37,23 @@ import (
|
|||||||
// mistakenly, repair the ClusterIP assigned to the Service that is being deleted.
|
// mistakenly, repair the ClusterIP assigned to the Service that is being deleted.
|
||||||
// https://issues.k8s.io/87603
|
// https://issues.k8s.io/87603
|
||||||
func TestServicesFinalizersRepairLoop(t *testing.T) {
|
func TestServicesFinalizersRepairLoop(t *testing.T) {
|
||||||
|
|
||||||
serviceCIDR := "10.0.0.0/16"
|
serviceCIDR := "10.0.0.0/16"
|
||||||
clusterIP := "10.0.0.20"
|
clusterIP := "10.0.0.20"
|
||||||
interval := 5 * time.Second
|
interval := 5 * time.Second
|
||||||
|
|
||||||
cfg := framework.NewIntegrationTestControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
if err != nil {
|
opts.ServiceClusterIPRanges = serviceCIDR
|
||||||
t.Fatalf("bad cidr: %v", err)
|
},
|
||||||
}
|
ModifyServerConfig: func(cfg *controlplane.Config) {
|
||||||
cfg.ExtraConfig.ServiceIPRange = *cidr
|
|
||||||
cfg.ExtraConfig.RepairServicesInterval = interval
|
cfg.ExtraConfig.RepairServicesInterval = interval
|
||||||
_, s, closeFn := framework.RunAnAPIServer(cfg)
|
},
|
||||||
defer closeFn()
|
})
|
||||||
|
defer tearDownFn()
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
|
|
||||||
// verify client is working
|
// verify client is working
|
||||||
if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
|
if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
|
||||||
_, err = client.CoreV1().Endpoints(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Endpoints(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("error fetching endpoints: %v", err)
|
t.Logf("error fetching endpoints: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
|
Reference in New Issue
Block a user