mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Merge pull request #110264 from wojtek-t/fix_leaking_goroutines_4
Fix leaking goroutines in multiple integration tests
This commit is contained in:
commit
03d0e2c338
@ -73,8 +73,6 @@ const (
|
|||||||
func NewEndpointController(podInformer coreinformers.PodInformer, serviceInformer coreinformers.ServiceInformer,
|
func NewEndpointController(podInformer coreinformers.PodInformer, serviceInformer coreinformers.ServiceInformer,
|
||||||
endpointsInformer coreinformers.EndpointsInformer, client clientset.Interface, endpointUpdatesBatchPeriod time.Duration) *Controller {
|
endpointsInformer coreinformers.EndpointsInformer, client clientset.Interface, endpointUpdatesBatchPeriod time.Duration) *Controller {
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster()
|
||||||
broadcaster.StartStructuredLogging(0)
|
|
||||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
|
||||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-controller"})
|
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-controller"})
|
||||||
|
|
||||||
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
|
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||||
@ -173,6 +171,12 @@ type Controller struct {
|
|||||||
// endpoints will be handled in parallel.
|
// endpoints will be handled in parallel.
|
||||||
func (e *Controller) Run(ctx context.Context, workers int) {
|
func (e *Controller) Run(ctx context.Context, workers int) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
// Start events processing pipeline.
|
||||||
|
e.eventBroadcaster.StartStructuredLogging(0)
|
||||||
|
e.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: e.client.CoreV1().Events("")})
|
||||||
|
defer e.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
defer e.queue.ShutDown()
|
defer e.queue.ShutDown()
|
||||||
|
|
||||||
klog.Infof("Starting endpoint controller")
|
klog.Infof("Starting endpoint controller")
|
||||||
|
@ -86,8 +86,6 @@ func NewController(podInformer coreinformers.PodInformer,
|
|||||||
endpointUpdatesBatchPeriod time.Duration,
|
endpointUpdatesBatchPeriod time.Duration,
|
||||||
) *Controller {
|
) *Controller {
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster()
|
||||||
broadcaster.StartStructuredLogging(0)
|
|
||||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
|
||||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-controller"})
|
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-controller"})
|
||||||
|
|
||||||
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
|
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||||
@ -252,6 +250,12 @@ type Controller struct {
|
|||||||
// Run will not return until stopCh is closed.
|
// Run will not return until stopCh is closed.
|
||||||
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
|
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
// Start events processing pipeline.
|
||||||
|
c.eventBroadcaster.StartLogging(klog.Infof)
|
||||||
|
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")})
|
||||||
|
defer c.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
defer c.queue.ShutDown()
|
defer c.queue.ShutDown()
|
||||||
|
|
||||||
klog.Infof("Starting endpoint slice controller")
|
klog.Infof("Starting endpoint slice controller")
|
||||||
|
@ -75,8 +75,6 @@ func NewController(endpointsInformer coreinformers.EndpointsInformer,
|
|||||||
endpointUpdatesBatchPeriod time.Duration,
|
endpointUpdatesBatchPeriod time.Duration,
|
||||||
) *Controller {
|
) *Controller {
|
||||||
broadcaster := record.NewBroadcaster()
|
broadcaster := record.NewBroadcaster()
|
||||||
broadcaster.StartLogging(klog.Infof)
|
|
||||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
|
||||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-mirroring-controller"})
|
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "endpoint-slice-mirroring-controller"})
|
||||||
|
|
||||||
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
|
if client != nil && client.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||||
@ -207,6 +205,12 @@ type Controller struct {
|
|||||||
// Run will not return until stopCh is closed.
|
// Run will not return until stopCh is closed.
|
||||||
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
|
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
|
|
||||||
|
// Start events processing pipeline.
|
||||||
|
c.eventBroadcaster.StartLogging(klog.Infof)
|
||||||
|
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")})
|
||||||
|
defer c.eventBroadcaster.Shutdown()
|
||||||
|
|
||||||
defer c.queue.ShutDown()
|
defer c.queue.ShutDown()
|
||||||
|
|
||||||
klog.Infof("Starting EndpointSliceMirroring controller")
|
klog.Infof("Starting EndpointSliceMirroring controller")
|
||||||
|
@ -28,9 +28,11 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
"k8s.io/client-go/tools/cache"
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||||
|
|
||||||
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/controller"
|
"k8s.io/kubernetes/pkg/controller"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -58,6 +60,11 @@ func newController(batchPeriod time.Duration) (*fake.Clientset, *endpointSliceMi
|
|||||||
client,
|
client,
|
||||||
batchPeriod)
|
batchPeriod)
|
||||||
|
|
||||||
|
// The event processing pipeline is normally started via Run() method.
|
||||||
|
// However, since we don't start it in unit tests, we explicitly start it here.
|
||||||
|
esController.eventBroadcaster.StartLogging(klog.Infof)
|
||||||
|
esController.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.CoreV1().Events("")})
|
||||||
|
|
||||||
esController.endpointsSynced = alwaysReady
|
esController.endpointsSynced = alwaysReady
|
||||||
esController.endpointSlicesSynced = alwaysReady
|
esController.endpointSlicesSynced = alwaysReady
|
||||||
esController.servicesSynced = alwaysReady
|
esController.servicesSynced = alwaysReady
|
||||||
|
@ -34,7 +34,7 @@ import (
|
|||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
"k8s.io/apimachinery/pkg/api/meta"
|
"k8s.io/apimachinery/pkg/api/meta"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
|
yamlutil "k8s.io/apimachinery/pkg/util/yaml"
|
||||||
@ -44,28 +44,23 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/kubernetes/pkg/controlplane"
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO(wojtek-t): Migrate to use testing.TestServer instead.
|
func setup(t testing.TB) (clientset.Interface, kubeapiservertesting.TearDownFunc) {
|
||||||
func setup(t testing.TB, groupVersions ...schema.GroupVersion) (clientset.Interface, framework.CloseFunc) {
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
opts := framework.ControlPlaneConfigOptions{EtcdOptions: framework.DefaultEtcdOptions()}
|
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
||||||
opts.EtcdOptions.DefaultStorageMediaType = "application/vnd.kubernetes.protobuf"
|
|
||||||
controlPlaneConfig := framework.NewIntegrationTestControlPlaneConfigWithOptions(&opts)
|
|
||||||
if len(groupVersions) > 0 {
|
|
||||||
resourceConfig := controlplane.DefaultAPIResourceConfigSource()
|
|
||||||
resourceConfig.EnableVersions(groupVersions...)
|
|
||||||
controlPlaneConfig.ExtraConfig.APIResourceConfigSource = resourceConfig
|
|
||||||
}
|
|
||||||
controlPlaneConfig.GenericConfig.OpenAPIConfig = framework.DefaultOpenAPIConfig()
|
|
||||||
_, s, closeFn := framework.RunAnAPIServer(controlPlaneConfig)
|
|
||||||
|
|
||||||
clientSet, err := clientset.NewForConfig(&restclient.Config{Host: s.URL, QPS: -1})
|
config := restclient.CopyConfig(server.ClientConfig)
|
||||||
|
// There are some tests (in scale_test.go) that rely on the response to be returned in JSON.
|
||||||
|
// So we overwrite it here.
|
||||||
|
config.ContentType = runtime.ContentTypeJSON
|
||||||
|
clientSet, err := clientset.NewForConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error in create clientset: %v", err)
|
t.Fatalf("Error in create clientset: %v", err)
|
||||||
}
|
}
|
||||||
return clientSet, closeFn
|
return clientSet, server.TearDownFn
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestApplyAlsoCreates makes sure that PATCH requests with the apply content type
|
// TestApplyAlsoCreates makes sure that PATCH requests with the apply content type
|
||||||
@ -2812,15 +2807,13 @@ spec:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestStopTrackingManagedFieldsOnFeatureDisabled(t *testing.T) {
|
func TestStopTrackingManagedFieldsOnFeatureDisabled(t *testing.T) {
|
||||||
sharedEtcd := framework.DefaultEtcdOptions()
|
sharedEtcd := framework.SharedEtcd()
|
||||||
controlPlaneConfig := framework.NewIntegrationTestControlPlaneConfigWithOptions(&framework.ControlPlaneConfigOptions{
|
|
||||||
EtcdOptions: sharedEtcd,
|
|
||||||
})
|
|
||||||
controlPlaneConfig.GenericConfig.OpenAPIConfig = framework.DefaultOpenAPIConfig()
|
|
||||||
|
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, true)()
|
||||||
_, instanceConfig, closeFn := framework.RunAnAPIServer(controlPlaneConfig)
|
|
||||||
client, err := clientset.NewForConfig(&restclient.Config{Host: instanceConfig.URL, QPS: -1})
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
|
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, sharedEtcd)
|
||||||
|
client, err := clientset.NewForConfig(server.ClientConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error in create clientset: %v", err)
|
t.Fatalf("Error in create clientset: %v", err)
|
||||||
}
|
}
|
||||||
@ -2870,14 +2863,15 @@ spec:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Restart server with server-side apply disabled
|
// Restart server with server-side apply disabled
|
||||||
closeFn()
|
server.TearDownFn()
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, false)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, genericfeatures.ServerSideApply, false)()
|
||||||
_, instanceConfig, closeFn = framework.RunAnAPIServer(controlPlaneConfig)
|
|
||||||
client, err = clientset.NewForConfig(&restclient.Config{Host: instanceConfig.URL, QPS: -1})
|
server = kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, sharedEtcd)
|
||||||
|
defer server.TearDownFn()
|
||||||
|
client, err = clientset.NewForConfig(server.ClientConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error in create clientset: %v", err)
|
t.Fatalf("Error in create clientset: %v", err)
|
||||||
}
|
}
|
||||||
defer closeFn()
|
|
||||||
|
|
||||||
_, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType).
|
_, err = client.CoreV1().RESTClient().Patch(types.ApplyPatchType).
|
||||||
AbsPath("/apis/apps/v1").
|
AbsPath("/apis/apps/v1").
|
||||||
|
@ -155,12 +155,12 @@ func TestCronJobLaunchesPodAndCleansUp(t *testing.T) {
|
|||||||
|
|
||||||
cjClient := clientSet.BatchV1().CronJobs(ns.Name)
|
cjClient := clientSet.BatchV1().CronJobs(ns.Name)
|
||||||
|
|
||||||
stopCh := make(chan struct{})
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer close(stopCh)
|
defer cancel()
|
||||||
|
|
||||||
informerSet.Start(stopCh)
|
informerSet.Start(ctx.Done())
|
||||||
go cjc.Run(context.TODO(), 1)
|
go cjc.Run(ctx, 1)
|
||||||
go jc.Run(context.TODO(), 1)
|
go jc.Run(ctx, 1)
|
||||||
|
|
||||||
_, err := cjClient.Create(context.TODO(), newCronJob(cronJobName, ns.Name, "* * * * ?"), metav1.CreateOptions{})
|
_, err := cjClient.Create(context.TODO(), newCronJob(cronJobName, ns.Name, "* * * * ?"), metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -51,7 +51,7 @@ import (
|
|||||||
|
|
||||||
var zero = int64(0)
|
var zero = int64(0)
|
||||||
|
|
||||||
func setup(t *testing.T) (kubeapiservertesting.TearDownFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) {
|
func setup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) {
|
||||||
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount,TaintNodesByCondition"}, framework.SharedEtcd())
|
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount,TaintNodesByCondition"}, framework.SharedEtcd())
|
||||||
|
|
||||||
@ -74,22 +74,15 @@ func setup(t *testing.T) (kubeapiservertesting.TearDownFunc, *daemon.DaemonSetsC
|
|||||||
t.Fatalf("error creating DaemonSets controller: %v", err)
|
t.Fatalf("error creating DaemonSets controller: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return server.TearDownFn, dc, informers, clientSet
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
}
|
|
||||||
|
|
||||||
func setupScheduler(
|
|
||||||
ctx context.Context,
|
|
||||||
t *testing.T,
|
|
||||||
cs clientset.Interface,
|
|
||||||
informerFactory informers.SharedInformerFactory,
|
|
||||||
) {
|
|
||||||
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
|
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
|
||||||
Interface: cs.EventsV1(),
|
Interface: clientSet.EventsV1(),
|
||||||
})
|
})
|
||||||
|
|
||||||
sched, err := scheduler.New(
|
sched, err := scheduler.New(
|
||||||
cs,
|
clientSet,
|
||||||
informerFactory,
|
informers,
|
||||||
nil,
|
nil,
|
||||||
profile.NewRecorderFactory(eventBroadcaster),
|
profile.NewRecorderFactory(eventBroadcaster),
|
||||||
ctx.Done(),
|
ctx.Done(),
|
||||||
@ -99,8 +92,15 @@ func setupScheduler(
|
|||||||
}
|
}
|
||||||
|
|
||||||
eventBroadcaster.StartRecordingToSink(ctx.Done())
|
eventBroadcaster.StartRecordingToSink(ctx.Done())
|
||||||
|
|
||||||
go sched.Run(ctx)
|
go sched.Run(ctx)
|
||||||
|
|
||||||
|
tearDownFn := func() {
|
||||||
|
cancel()
|
||||||
|
server.TearDownFn()
|
||||||
|
eventBroadcaster.Shutdown()
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx, tearDownFn, dc, informers, clientSet
|
||||||
}
|
}
|
||||||
|
|
||||||
func testLabels() map[string]string {
|
func testLabels() map[string]string {
|
||||||
@ -421,7 +421,7 @@ func forEachStrategy(t *testing.T, tf func(t *testing.T, strategy *apps.DaemonSe
|
|||||||
|
|
||||||
func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
||||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||||
closeFn, dc, informers, clientset := setup(t)
|
ctx, closeFn, dc, informers, clientset := setup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(clientset, "one-node-daemonset-test", t)
|
ns := framework.CreateNamespaceOrDie(clientset, "one-node-daemonset-test", t)
|
||||||
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
||||||
@ -431,12 +431,6 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
|||||||
nodeClient := clientset.CoreV1().Nodes()
|
nodeClient := clientset.CoreV1().Nodes()
|
||||||
podInformer := informers.Core().V1().Pods().Informer()
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
// Start Scheduler
|
|
||||||
setupScheduler(ctx, t, clientset, informers)
|
|
||||||
|
|
||||||
informers.Start(ctx.Done())
|
informers.Start(ctx.Done())
|
||||||
go dc.Run(ctx, 2)
|
go dc.Run(ctx, 2)
|
||||||
|
|
||||||
@ -460,7 +454,7 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
|||||||
|
|
||||||
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
||||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||||
closeFn, dc, informers, clientset := setup(t)
|
ctx, closeFn, dc, informers, clientset := setup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(clientset, "simple-daemonset-test", t)
|
ns := framework.CreateNamespaceOrDie(clientset, "simple-daemonset-test", t)
|
||||||
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
||||||
@ -470,15 +464,9 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
|||||||
nodeClient := clientset.CoreV1().Nodes()
|
nodeClient := clientset.CoreV1().Nodes()
|
||||||
podInformer := informers.Core().V1().Pods().Informer()
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
informers.Start(ctx.Done())
|
informers.Start(ctx.Done())
|
||||||
go dc.Run(ctx, 2)
|
go dc.Run(ctx, 2)
|
||||||
|
|
||||||
// Start Scheduler
|
|
||||||
setupScheduler(ctx, t, clientset, informers)
|
|
||||||
|
|
||||||
ds := newDaemonSet("foo", ns.Name)
|
ds := newDaemonSet("foo", ns.Name)
|
||||||
ds.Spec.UpdateStrategy = *strategy
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
_, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{})
|
_, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{})
|
||||||
@ -496,7 +484,7 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
|
|||||||
|
|
||||||
func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) {
|
func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) {
|
||||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||||
closeFn, dc, informers, clientset := setup(t)
|
ctx, closeFn, dc, informers, clientset := setup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(clientset, "simple-daemonset-test", t)
|
ns := framework.CreateNamespaceOrDie(clientset, "simple-daemonset-test", t)
|
||||||
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
||||||
@ -506,15 +494,9 @@ func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) {
|
|||||||
nodeClient := clientset.CoreV1().Nodes()
|
nodeClient := clientset.CoreV1().Nodes()
|
||||||
podInformer := informers.Core().V1().Pods().Informer()
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
informers.Start(ctx.Done())
|
informers.Start(ctx.Done())
|
||||||
go dc.Run(ctx, 2)
|
go dc.Run(ctx, 2)
|
||||||
|
|
||||||
// Start Scheduler
|
|
||||||
setupScheduler(ctx, t, clientset, informers)
|
|
||||||
|
|
||||||
ds := newDaemonSet("foo", ns.Name)
|
ds := newDaemonSet("foo", ns.Name)
|
||||||
ds.Spec.UpdateStrategy = *strategy
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
|
|
||||||
@ -565,7 +547,7 @@ func TestDaemonSetWithNodeSelectorLaunchesPods(t *testing.T) {
|
|||||||
|
|
||||||
func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||||
closeFn, dc, informers, clientset := setup(t)
|
ctx, closeFn, dc, informers, clientset := setup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(clientset, "simple-daemonset-test", t)
|
ns := framework.CreateNamespaceOrDie(clientset, "simple-daemonset-test", t)
|
||||||
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
||||||
@ -575,15 +557,9 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
|||||||
nodeClient := clientset.CoreV1().Nodes()
|
nodeClient := clientset.CoreV1().Nodes()
|
||||||
podInformer := informers.Core().V1().Pods().Informer()
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
informers.Start(ctx.Done())
|
informers.Start(ctx.Done())
|
||||||
go dc.Run(ctx, 2)
|
go dc.Run(ctx, 2)
|
||||||
|
|
||||||
// Start Scheduler
|
|
||||||
setupScheduler(ctx, t, clientset, informers)
|
|
||||||
|
|
||||||
ds := newDaemonSet("foo", ns.Name)
|
ds := newDaemonSet("foo", ns.Name)
|
||||||
ds.Spec.UpdateStrategy = *strategy
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
_, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{})
|
_, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{})
|
||||||
@ -612,7 +588,7 @@ func TestNotReadyNodeDaemonDoesLaunchPod(t *testing.T) {
|
|||||||
// not schedule Pods onto the nodes with insufficient resource.
|
// not schedule Pods onto the nodes with insufficient resource.
|
||||||
func TestInsufficientCapacityNode(t *testing.T) {
|
func TestInsufficientCapacityNode(t *testing.T) {
|
||||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||||
closeFn, dc, informers, clientset := setup(t)
|
ctx, closeFn, dc, informers, clientset := setup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(clientset, "insufficient-capacity", t)
|
ns := framework.CreateNamespaceOrDie(clientset, "insufficient-capacity", t)
|
||||||
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
||||||
@ -622,15 +598,9 @@ func TestInsufficientCapacityNode(t *testing.T) {
|
|||||||
podInformer := informers.Core().V1().Pods().Informer()
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
nodeClient := clientset.CoreV1().Nodes()
|
nodeClient := clientset.CoreV1().Nodes()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
informers.Start(ctx.Done())
|
informers.Start(ctx.Done())
|
||||||
go dc.Run(ctx, 2)
|
go dc.Run(ctx, 2)
|
||||||
|
|
||||||
// Start Scheduler
|
|
||||||
setupScheduler(ctx, t, clientset, informers)
|
|
||||||
|
|
||||||
ds := newDaemonSet("foo", ns.Name)
|
ds := newDaemonSet("foo", ns.Name)
|
||||||
ds.Spec.Template.Spec = resourcePodSpec("", "120M", "75m")
|
ds.Spec.Template.Spec = resourcePodSpec("", "120M", "75m")
|
||||||
ds.Spec.UpdateStrategy = *strategy
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
@ -676,7 +646,7 @@ func TestInsufficientCapacityNode(t *testing.T) {
|
|||||||
// TestLaunchWithHashCollision tests that a DaemonSet can be updated even if there is a
|
// TestLaunchWithHashCollision tests that a DaemonSet can be updated even if there is a
|
||||||
// hash collision with an existing ControllerRevision
|
// hash collision with an existing ControllerRevision
|
||||||
func TestLaunchWithHashCollision(t *testing.T) {
|
func TestLaunchWithHashCollision(t *testing.T) {
|
||||||
closeFn, dc, informers, clientset := setup(t)
|
ctx, closeFn, dc, informers, clientset := setup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(clientset, "one-node-daemonset-test", t)
|
ns := framework.CreateNamespaceOrDie(clientset, "one-node-daemonset-test", t)
|
||||||
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
||||||
@ -685,15 +655,9 @@ func TestLaunchWithHashCollision(t *testing.T) {
|
|||||||
podInformer := informers.Core().V1().Pods().Informer()
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
nodeClient := clientset.CoreV1().Nodes()
|
nodeClient := clientset.CoreV1().Nodes()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
informers.Start(ctx.Done())
|
informers.Start(ctx.Done())
|
||||||
go dc.Run(ctx, 2)
|
go dc.Run(ctx, 2)
|
||||||
|
|
||||||
// Start Scheduler
|
|
||||||
setupScheduler(ctx, t, clientset, informers)
|
|
||||||
|
|
||||||
// Create single node
|
// Create single node
|
||||||
_, err := nodeClient.Create(context.TODO(), newNode("single-node", nil), metav1.CreateOptions{})
|
_, err := nodeClient.Create(context.TODO(), newNode("single-node", nil), metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -787,7 +751,7 @@ func TestLaunchWithHashCollision(t *testing.T) {
|
|||||||
// 2. Add a node to ensure the controller sync
|
// 2. Add a node to ensure the controller sync
|
||||||
// 3. The dsc is expected to "PATCH" the existing pod label with new hash and deletes the old controllerrevision once finishes the update
|
// 3. The dsc is expected to "PATCH" the existing pod label with new hash and deletes the old controllerrevision once finishes the update
|
||||||
func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) {
|
func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) {
|
||||||
closeFn, dc, informers, clientset := setup(t)
|
ctx, closeFn, dc, informers, clientset := setup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(clientset, "one-node-daemonset-test", t)
|
ns := framework.CreateNamespaceOrDie(clientset, "one-node-daemonset-test", t)
|
||||||
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
||||||
@ -796,15 +760,9 @@ func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) {
|
|||||||
podInformer := informers.Core().V1().Pods().Informer()
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
nodeClient := clientset.CoreV1().Nodes()
|
nodeClient := clientset.CoreV1().Nodes()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
informers.Start(ctx.Done())
|
informers.Start(ctx.Done())
|
||||||
go dc.Run(ctx, 2)
|
go dc.Run(ctx, 2)
|
||||||
|
|
||||||
// Start Scheduler
|
|
||||||
setupScheduler(ctx, t, clientset, informers)
|
|
||||||
|
|
||||||
// Create single node
|
// Create single node
|
||||||
_, err := nodeClient.Create(context.TODO(), newNode("single-node", nil), metav1.CreateOptions{})
|
_, err := nodeClient.Create(context.TODO(), newNode("single-node", nil), metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -915,7 +873,7 @@ func TestDSCUpdatesPodLabelAfterDedupCurHistories(t *testing.T) {
|
|||||||
// TestTaintedNode tests tainted node isn't expected to have pod scheduled
|
// TestTaintedNode tests tainted node isn't expected to have pod scheduled
|
||||||
func TestTaintedNode(t *testing.T) {
|
func TestTaintedNode(t *testing.T) {
|
||||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||||
closeFn, dc, informers, clientset := setup(t)
|
ctx, closeFn, dc, informers, clientset := setup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(clientset, "tainted-node", t)
|
ns := framework.CreateNamespaceOrDie(clientset, "tainted-node", t)
|
||||||
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
||||||
@ -925,15 +883,9 @@ func TestTaintedNode(t *testing.T) {
|
|||||||
podInformer := informers.Core().V1().Pods().Informer()
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
nodeClient := clientset.CoreV1().Nodes()
|
nodeClient := clientset.CoreV1().Nodes()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
informers.Start(ctx.Done())
|
informers.Start(ctx.Done())
|
||||||
go dc.Run(ctx, 2)
|
go dc.Run(ctx, 2)
|
||||||
|
|
||||||
// Start Scheduler
|
|
||||||
setupScheduler(ctx, t, clientset, informers)
|
|
||||||
|
|
||||||
ds := newDaemonSet("foo", ns.Name)
|
ds := newDaemonSet("foo", ns.Name)
|
||||||
ds.Spec.UpdateStrategy = *strategy
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{})
|
ds, err := dsClient.Create(context.TODO(), ds, metav1.CreateOptions{})
|
||||||
@ -980,7 +932,7 @@ func TestTaintedNode(t *testing.T) {
|
|||||||
// to the Unschedulable nodes.
|
// to the Unschedulable nodes.
|
||||||
func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
|
func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
|
||||||
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
|
||||||
closeFn, dc, informers, clientset := setup(t)
|
ctx, closeFn, dc, informers, clientset := setup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateNamespaceOrDie(clientset, "daemonset-unschedulable-test", t)
|
ns := framework.CreateNamespaceOrDie(clientset, "daemonset-unschedulable-test", t)
|
||||||
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
defer framework.DeleteNamespaceOrDie(clientset, ns, t)
|
||||||
@ -990,15 +942,9 @@ func TestUnschedulableNodeDaemonDoesLaunchPod(t *testing.T) {
|
|||||||
nodeClient := clientset.CoreV1().Nodes()
|
nodeClient := clientset.CoreV1().Nodes()
|
||||||
podInformer := informers.Core().V1().Pods().Informer()
|
podInformer := informers.Core().V1().Pods().Informer()
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
informers.Start(ctx.Done())
|
informers.Start(ctx.Done())
|
||||||
go dc.Run(ctx, 2)
|
go dc.Run(ctx, 2)
|
||||||
|
|
||||||
// Start Scheduler
|
|
||||||
setupScheduler(ctx, t, clientset, informers)
|
|
||||||
|
|
||||||
ds := newDaemonSet("foo", ns.Name)
|
ds := newDaemonSet("foo", ns.Name)
|
||||||
ds.Spec.UpdateStrategy = *strategy
|
ds.Spec.UpdateStrategy = *strategy
|
||||||
ds.Spec.Template.Spec.HostNetwork = true
|
ds.Spec.Template.Spec.HostNetwork = true
|
||||||
|
@ -22,25 +22,23 @@ import (
|
|||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
restclient "k8s.io/client-go/rest"
|
|
||||||
"k8s.io/kubernetes/pkg/apis/core/helper"
|
"k8s.io/kubernetes/pkg/apis/core/helper"
|
||||||
|
"k8s.io/kubernetes/pkg/controlplane"
|
||||||
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
|
"k8s.io/kubernetes/plugin/pkg/admission/defaulttolerationseconds"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestAdmission(t *testing.T) {
|
func TestAdmission(t *testing.T) {
|
||||||
controlPlaneConfig := framework.NewControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
controlPlaneConfig.GenericConfig.EnableProfiling = true
|
ModifyServerConfig: func(cfg *controlplane.Config) {
|
||||||
controlPlaneConfig.GenericConfig.AdmissionControl = defaulttolerationseconds.NewDefaultTolerationSeconds()
|
cfg.GenericConfig.EnableProfiling = true
|
||||||
_, s, closeFn := framework.RunAnAPIServer(controlPlaneConfig)
|
cfg.GenericConfig.AdmissionControl = defaulttolerationseconds.NewDefaultTolerationSeconds()
|
||||||
defer closeFn()
|
},
|
||||||
|
})
|
||||||
|
defer tearDownFn()
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}})
|
ns := framework.CreateNamespaceOrDie(client, "default-toleration-seconds", t)
|
||||||
|
defer framework.DeleteNamespaceOrDie(client, ns, t)
|
||||||
ns := framework.CreateTestingNamespace("default-toleration-seconds", t)
|
|
||||||
defer framework.DeleteTestingNamespace(ns, t)
|
|
||||||
|
|
||||||
pod := v1.Pod{
|
pod := v1.Pod{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
@ -29,42 +29,31 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/informers"
|
"k8s.io/client-go/informers"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||||
restclient "k8s.io/client-go/rest"
|
|
||||||
"k8s.io/kubernetes/pkg/controller/endpoint"
|
"k8s.io/kubernetes/pkg/controller/endpoint"
|
||||||
"k8s.io/kubernetes/pkg/controller/endpointslice"
|
"k8s.io/kubernetes/pkg/controller/endpointslice"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
netutils "k8s.io/utils/net"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDualStackEndpoints(t *testing.T) {
|
func TestDualStackEndpoints(t *testing.T) {
|
||||||
// Create an IPv4IPv6 dual stack control-plane
|
// Create an IPv4IPv6 dual stack control-plane
|
||||||
serviceCIDR := "10.0.0.0/16"
|
serviceCIDR := "10.0.0.0/16"
|
||||||
secondaryServiceCIDR := "2001:db8:1::/48"
|
secondaryServiceCIDR := "2001:db8:1::/112"
|
||||||
labelMap := func() map[string]string {
|
labelMap := func() map[string]string {
|
||||||
return map[string]string{"foo": "bar"}
|
return map[string]string{"foo": "bar"}
|
||||||
}
|
}
|
||||||
|
|
||||||
cfg := framework.NewIntegrationTestControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
if err != nil {
|
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
|
||||||
t.Fatalf("Bad cidr: %v", err)
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
}
|
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
|
||||||
cfg.ExtraConfig.ServiceIPRange = *cidr
|
},
|
||||||
|
})
|
||||||
_, secCidr, err := netutils.ParseCIDRSloppy(secondaryServiceCIDR)
|
defer tearDownFn()
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Bad cidr: %v", err)
|
|
||||||
}
|
|
||||||
cfg.ExtraConfig.SecondaryServiceIPRange = *secCidr
|
|
||||||
|
|
||||||
_, s, closeFn := framework.RunAnAPIServer(cfg)
|
|
||||||
defer closeFn()
|
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
@ -156,8 +145,8 @@ func TestDualStackEndpoints(t *testing.T) {
|
|||||||
|
|
||||||
for i, tc := range testcases {
|
for i, tc := range testcases {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
ns := framework.CreateTestingNamespace(fmt.Sprintf("test-endpointslice-dualstack-%d", i), t)
|
ns := framework.CreateNamespaceOrDie(client, fmt.Sprintf("test-endpointslice-dualstack-%d", i), t)
|
||||||
defer framework.DeleteTestingNamespace(ns, t)
|
defer framework.DeleteNamespaceOrDie(client, ns, t)
|
||||||
|
|
||||||
// Create a pod with labels
|
// Create a pod with labels
|
||||||
pod := &v1.Pod{
|
pod := &v1.Pod{
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
@ -36,9 +35,7 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/intstr"
|
"k8s.io/apimachinery/pkg/util/intstr"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
|
||||||
restclient "k8s.io/client-go/rest"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
netutils "k8s.io/utils/net"
|
netutils "k8s.io/utils/net"
|
||||||
)
|
)
|
||||||
@ -48,19 +45,15 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) {
|
|||||||
// Create an IPv4 single stack control-plane
|
// Create an IPv4 single stack control-plane
|
||||||
serviceCIDR := "10.0.0.0/16"
|
serviceCIDR := "10.0.0.0/16"
|
||||||
|
|
||||||
cfg := framework.NewIntegrationTestControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
if err != nil {
|
opts.ServiceClusterIPRanges = serviceCIDR
|
||||||
t.Fatalf("bad cidr: %v", err)
|
},
|
||||||
}
|
})
|
||||||
cfg.ExtraConfig.ServiceIPRange = *cidr
|
defer tearDownFn()
|
||||||
_, s, closeFn := framework.RunAnAPIServer(cfg)
|
|
||||||
defer closeFn()
|
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
@ -239,7 +232,7 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create the service
|
// create the service
|
||||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
if (err != nil) != tc.expectError {
|
if (err != nil) != tc.expectError {
|
||||||
t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err)
|
t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err)
|
||||||
}
|
}
|
||||||
@ -262,23 +255,18 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) {
|
|||||||
// TestCreateServiceDualStackIPv6 test the Service dualstackness in an IPv6 only DualStack cluster
|
// TestCreateServiceDualStackIPv6 test the Service dualstackness in an IPv6 only DualStack cluster
|
||||||
func TestCreateServiceDualStackIPv6(t *testing.T) {
|
func TestCreateServiceDualStackIPv6(t *testing.T) {
|
||||||
// Create an IPv6 only dual stack control-plane
|
// Create an IPv6 only dual stack control-plane
|
||||||
serviceCIDR := "2001:db8:1::/48"
|
serviceCIDR := "2001:db8:1::/112"
|
||||||
|
|
||||||
cfg := framework.NewIntegrationTestControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
if err != nil {
|
opts.ServiceClusterIPRanges = serviceCIDR
|
||||||
t.Fatalf("bad cidr: %v", err)
|
opts.GenericServerRunOptions.AdvertiseAddress = netutils.ParseIPSloppy("2001:db8::10")
|
||||||
}
|
},
|
||||||
cfg.ExtraConfig.ServiceIPRange = *cidr
|
})
|
||||||
cfg.GenericConfig.PublicAddress = netutils.ParseIPSloppy("2001:db8::10")
|
defer tearDownFn()
|
||||||
|
|
||||||
_, s, closeFn := framework.RunAnAPIServer(cfg)
|
|
||||||
defer closeFn()
|
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
@ -459,7 +447,7 @@ func TestCreateServiceDualStackIPv6(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create the service
|
// create the service
|
||||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
if (err != nil) != tc.expectError {
|
if (err != nil) != tc.expectError {
|
||||||
t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err)
|
t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err)
|
||||||
}
|
}
|
||||||
@ -483,28 +471,17 @@ func TestCreateServiceDualStackIPv6(t *testing.T) {
|
|||||||
func TestCreateServiceDualStackIPv4IPv6(t *testing.T) {
|
func TestCreateServiceDualStackIPv4IPv6(t *testing.T) {
|
||||||
// Create an IPv4IPv6 dual stack control-plane
|
// Create an IPv4IPv6 dual stack control-plane
|
||||||
serviceCIDR := "10.0.0.0/16"
|
serviceCIDR := "10.0.0.0/16"
|
||||||
secondaryServiceCIDR := "2001:db8:1::/48"
|
secondaryServiceCIDR := "2001:db8:1::/112"
|
||||||
|
|
||||||
cfg := framework.NewIntegrationTestControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
if err != nil {
|
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
|
||||||
t.Fatalf("bad cidr: %v", err)
|
},
|
||||||
}
|
})
|
||||||
cfg.ExtraConfig.ServiceIPRange = *cidr
|
defer tearDownFn()
|
||||||
|
|
||||||
_, secCidr, err := netutils.ParseCIDRSloppy(secondaryServiceCIDR)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bad cidr: %v", err)
|
|
||||||
}
|
|
||||||
cfg.ExtraConfig.SecondaryServiceIPRange = *secCidr
|
|
||||||
|
|
||||||
_, s, closeFn := framework.RunAnAPIServer(cfg)
|
|
||||||
defer closeFn()
|
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
@ -684,7 +661,7 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create a service
|
// create a service
|
||||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
if (err != nil) != tc.expectError {
|
if (err != nil) != tc.expectError {
|
||||||
t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err)
|
t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err)
|
||||||
}
|
}
|
||||||
@ -708,30 +685,19 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) {
|
|||||||
// TestCreateServiceDualStackIPv6IPv4 test the Service dualstackness in a IPv6IPv4 DualStack cluster
|
// TestCreateServiceDualStackIPv6IPv4 test the Service dualstackness in a IPv6IPv4 DualStack cluster
|
||||||
func TestCreateServiceDualStackIPv6IPv4(t *testing.T) {
|
func TestCreateServiceDualStackIPv6IPv4(t *testing.T) {
|
||||||
// Create an IPv6IPv4 dual stack control-plane
|
// Create an IPv6IPv4 dual stack control-plane
|
||||||
serviceCIDR := "2001:db8:1::/48"
|
serviceCIDR := "2001:db8:1::/112"
|
||||||
secondaryServiceCIDR := "10.0.0.0/16"
|
secondaryServiceCIDR := "10.0.0.0/16"
|
||||||
|
|
||||||
cfg := framework.NewIntegrationTestControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
if err != nil {
|
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
|
||||||
t.Fatalf("bad cidr: %v", err)
|
opts.GenericServerRunOptions.AdvertiseAddress = netutils.ParseIPSloppy("2001:db8::10")
|
||||||
}
|
},
|
||||||
cfg.ExtraConfig.ServiceIPRange = *cidr
|
})
|
||||||
cfg.GenericConfig.PublicAddress = netutils.ParseIPSloppy("2001:db8::10")
|
defer tearDownFn()
|
||||||
|
|
||||||
_, secCidr, err := netutils.ParseCIDRSloppy(secondaryServiceCIDR)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bad cidr: %v", err)
|
|
||||||
}
|
|
||||||
cfg.ExtraConfig.SecondaryServiceIPRange = *secCidr
|
|
||||||
|
|
||||||
_, s, closeFn := framework.RunAnAPIServer(cfg)
|
|
||||||
defer closeFn()
|
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
@ -743,7 +709,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) {
|
|||||||
|
|
||||||
// verify client is working
|
// verify client is working
|
||||||
if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
|
if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
|
||||||
_, err = client.CoreV1().Endpoints("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Endpoints("default").Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Logf("error fetching endpoints: %v", err)
|
t.Logf("error fetching endpoints: %v", err)
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -914,7 +880,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create a service
|
// create a service
|
||||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
if (err != nil) != tc.expectError {
|
if (err != nil) != tc.expectError {
|
||||||
t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err)
|
t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err)
|
||||||
}
|
}
|
||||||
@ -939,28 +905,17 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) {
|
|||||||
func TestUpgradeDowngrade(t *testing.T) {
|
func TestUpgradeDowngrade(t *testing.T) {
|
||||||
// Create an IPv4IPv6 dual stack control-plane
|
// Create an IPv4IPv6 dual stack control-plane
|
||||||
serviceCIDR := "10.0.0.0/16"
|
serviceCIDR := "10.0.0.0/16"
|
||||||
secondaryServiceCIDR := "2001:db8:1::/48"
|
secondaryServiceCIDR := "2001:db8:1::/112"
|
||||||
|
|
||||||
cfg := framework.NewIntegrationTestControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
if err != nil {
|
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
|
||||||
t.Fatalf("bad cidr: %v", err)
|
},
|
||||||
}
|
})
|
||||||
cfg.ExtraConfig.ServiceIPRange = *cidr
|
defer tearDownFn()
|
||||||
|
|
||||||
_, secCidr, err := netutils.ParseCIDRSloppy(secondaryServiceCIDR)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bad cidr: %v", err)
|
|
||||||
}
|
|
||||||
cfg.ExtraConfig.SecondaryServiceIPRange = *secCidr
|
|
||||||
|
|
||||||
_, s, closeFn := framework.RunAnAPIServer(cfg)
|
|
||||||
defer closeFn()
|
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
@ -988,7 +943,7 @@ func TestUpgradeDowngrade(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create a service
|
// create a service
|
||||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error while creating service:%v", err)
|
t.Fatalf("unexpected error while creating service:%v", err)
|
||||||
}
|
}
|
||||||
@ -1053,28 +1008,17 @@ func TestUpgradeDowngrade(t *testing.T) {
|
|||||||
func TestConvertToFromExternalName(t *testing.T) {
|
func TestConvertToFromExternalName(t *testing.T) {
|
||||||
// Create an IPv4IPv6 dual stack control-plane
|
// Create an IPv4IPv6 dual stack control-plane
|
||||||
serviceCIDR := "10.0.0.0/16"
|
serviceCIDR := "10.0.0.0/16"
|
||||||
secondaryServiceCIDR := "2001:db8:1::/48"
|
secondaryServiceCIDR := "2001:db8:1::/112"
|
||||||
|
|
||||||
cfg := framework.NewIntegrationTestControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
if err != nil {
|
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
|
||||||
t.Fatalf("bad cidr: %v", err)
|
},
|
||||||
}
|
})
|
||||||
cfg.ExtraConfig.ServiceIPRange = *cidr
|
defer tearDownFn()
|
||||||
|
|
||||||
_, secCidr, err := netutils.ParseCIDRSloppy(secondaryServiceCIDR)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bad cidr: %v", err)
|
|
||||||
}
|
|
||||||
cfg.ExtraConfig.SecondaryServiceIPRange = *secCidr
|
|
||||||
|
|
||||||
_, s, closeFn := framework.RunAnAPIServer(cfg)
|
|
||||||
defer closeFn()
|
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
@ -1101,7 +1045,7 @@ func TestConvertToFromExternalName(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create a service
|
// create a service
|
||||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error while creating service:%v", err)
|
t.Fatalf("unexpected error while creating service:%v", err)
|
||||||
}
|
}
|
||||||
@ -1145,28 +1089,17 @@ func TestConvertToFromExternalName(t *testing.T) {
|
|||||||
func TestPreferDualStack(t *testing.T) {
|
func TestPreferDualStack(t *testing.T) {
|
||||||
// Create an IPv4IPv6 dual stack control-plane
|
// Create an IPv4IPv6 dual stack control-plane
|
||||||
serviceCIDR := "10.0.0.0/16"
|
serviceCIDR := "10.0.0.0/16"
|
||||||
secondaryServiceCIDR := "2001:db8:1::/48"
|
secondaryServiceCIDR := "2001:db8:1::/112"
|
||||||
|
|
||||||
cfg := framework.NewIntegrationTestControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
if err != nil {
|
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
|
||||||
t.Fatalf("bad cidr: %v", err)
|
},
|
||||||
}
|
})
|
||||||
cfg.ExtraConfig.ServiceIPRange = *cidr
|
defer tearDownFn()
|
||||||
|
|
||||||
_, secCidr, err := netutils.ParseCIDRSloppy(secondaryServiceCIDR)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bad cidr: %v", err)
|
|
||||||
}
|
|
||||||
cfg.ExtraConfig.SecondaryServiceIPRange = *secCidr
|
|
||||||
|
|
||||||
_, s, closeFn := framework.RunAnAPIServer(cfg)
|
|
||||||
defer closeFn()
|
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
@ -1197,7 +1130,7 @@ func TestPreferDualStack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create a service
|
// create a service
|
||||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unexpected error while creating service:%v", err)
|
t.Fatalf("unexpected error while creating service:%v", err)
|
||||||
}
|
}
|
||||||
@ -1231,19 +1164,15 @@ func TestServiceUpdate(t *testing.T) {
|
|||||||
// Create an IPv4 single stack control-plane
|
// Create an IPv4 single stack control-plane
|
||||||
serviceCIDR := "10.0.0.0/16"
|
serviceCIDR := "10.0.0.0/16"
|
||||||
|
|
||||||
cfg := framework.NewIntegrationTestControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
if err != nil {
|
opts.ServiceClusterIPRanges = serviceCIDR
|
||||||
t.Fatalf("bad cidr: %v", err)
|
},
|
||||||
}
|
})
|
||||||
cfg.ExtraConfig.ServiceIPRange = *cidr
|
defer tearDownFn()
|
||||||
_, s, closeFn := framework.RunAnAPIServer(cfg)
|
|
||||||
defer closeFn()
|
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
@ -1270,7 +1199,7 @@ func TestServiceUpdate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create the service
|
// create the service
|
||||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
// if no error was expected validate the service otherwise return
|
// if no error was expected validate the service otherwise return
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error creating service:%v", err)
|
t.Errorf("unexpected error creating service:%v", err)
|
||||||
@ -1392,21 +1321,20 @@ func validateServiceAndClusterIPFamily(svc *v1.Service, expectedIPFamilies []v1.
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestUpgradeServicePreferToDualStack(t *testing.T) {
|
func TestUpgradeServicePreferToDualStack(t *testing.T) {
|
||||||
|
sharedEtcd := framework.SharedEtcd()
|
||||||
|
|
||||||
// Create an IPv4 only dual stack control-plane
|
// Create an IPv4 only dual stack control-plane
|
||||||
serviceCIDR := "192.168.0.0/24"
|
serviceCIDR := "192.168.0.0/24"
|
||||||
|
|
||||||
cfg := framework.NewIntegrationTestControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
if err != nil {
|
opts.Etcd.StorageConfig = *sharedEtcd
|
||||||
t.Fatalf("bad cidr: %v", err)
|
opts.ServiceClusterIPRanges = serviceCIDR
|
||||||
}
|
},
|
||||||
cfg.ExtraConfig.ServiceIPRange = *cidr
|
})
|
||||||
_, s, closeFn := framework.RunAnAPIServer(cfg)
|
|
||||||
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
@ -1438,7 +1366,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// create the service
|
// create the service
|
||||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -1452,18 +1380,17 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// reconfigure the apiserver to be dual-stack
|
// reconfigure the apiserver to be dual-stack
|
||||||
closeFn()
|
tearDownFn()
|
||||||
|
|
||||||
secondaryServiceCIDR := "2001:db8:1::/48"
|
secondaryServiceCIDR := "2001:db8:1::/112"
|
||||||
_, secCidr, err := netutils.ParseCIDRSloppy(secondaryServiceCIDR)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bad cidr: %v", err)
|
|
||||||
}
|
|
||||||
cfg.ExtraConfig.SecondaryServiceIPRange = *secCidr
|
|
||||||
_, s, closeFn = framework.RunAnAPIServer(cfg)
|
|
||||||
defer closeFn()
|
|
||||||
|
|
||||||
client = clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
client, _, tearDownFn = framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
|
opts.Etcd.StorageConfig = *sharedEtcd
|
||||||
|
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
|
||||||
|
},
|
||||||
|
})
|
||||||
|
defer tearDownFn()
|
||||||
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
@ -1487,25 +1414,21 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDowngradeServicePreferToDualStack(t *testing.T) {
|
func TestDowngradeServicePreferToDualStack(t *testing.T) {
|
||||||
|
sharedEtcd := framework.SharedEtcd()
|
||||||
|
|
||||||
// Create a dual stack control-plane
|
// Create a dual stack control-plane
|
||||||
serviceCIDR := "192.168.0.0/24"
|
serviceCIDR := "192.168.0.0/24"
|
||||||
secondaryServiceCIDR := "2001:db8:1::/48"
|
secondaryServiceCIDR := "2001:db8:1::/112"
|
||||||
|
|
||||||
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
|
opts.Etcd.StorageConfig = *sharedEtcd
|
||||||
|
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
dualStackCfg := framework.NewIntegrationTestControlPlaneConfig()
|
|
||||||
_, cidr, err := netutils.ParseCIDRSloppy(serviceCIDR)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bad cidr: %v", err)
|
|
||||||
}
|
|
||||||
dualStackCfg.ExtraConfig.ServiceIPRange = *cidr
|
|
||||||
_, secCidr, err := netutils.ParseCIDRSloppy(secondaryServiceCIDR)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("bad cidr: %v", err)
|
|
||||||
}
|
|
||||||
dualStackCfg.ExtraConfig.SecondaryServiceIPRange = *secCidr
|
|
||||||
_, s, closeFn := framework.RunAnAPIServer(dualStackCfg)
|
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
if err != nil && !apierrors.IsNotFound(err) {
|
if err != nil && !apierrors.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
@ -1535,7 +1458,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
// create the service
|
// create the service
|
||||||
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error: %v", err)
|
t.Fatalf("Unexpected error: %v", err)
|
||||||
}
|
}
|
||||||
@ -1548,14 +1471,17 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) {
|
|||||||
t.Fatalf("Unexpected error validating the service %s %v", svc.Name, err)
|
t.Fatalf("Unexpected error validating the service %s %v", svc.Name, err)
|
||||||
}
|
}
|
||||||
// reconfigure the apiserver to be sinlge stack
|
// reconfigure the apiserver to be sinlge stack
|
||||||
closeFn()
|
tearDownFn()
|
||||||
// reset secondary
|
|
||||||
var emptyCidr net.IPNet
|
// reset secondary
|
||||||
dualStackCfg.ExtraConfig.SecondaryServiceIPRange = emptyCidr
|
client, _, tearDownFn = framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
|
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
|
||||||
|
opts.Etcd.StorageConfig = *sharedEtcd
|
||||||
|
opts.ServiceClusterIPRanges = serviceCIDR
|
||||||
|
},
|
||||||
|
})
|
||||||
|
defer tearDownFn()
|
||||||
|
|
||||||
_, s, closeFn = framework.RunAnAPIServer(dualStackCfg)
|
|
||||||
defer closeFn()
|
|
||||||
client = clientset.NewForConfigOrDie(&restclient.Config{Host: s.URL})
|
|
||||||
// Wait until the default "kubernetes" service is created.
|
// Wait until the default "kubernetes" service is created.
|
||||||
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
|
||||||
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||||
@ -1587,18 +1513,11 @@ type specMergePatch struct {
|
|||||||
|
|
||||||
// tests success when converting ClusterIP:Headless service to ExternalName
|
// tests success when converting ClusterIP:Headless service to ExternalName
|
||||||
func Test_ServiceChangeTypeHeadlessToExternalNameWithPatch(t *testing.T) {
|
func Test_ServiceChangeTypeHeadlessToExternalNameWithPatch(t *testing.T) {
|
||||||
controlPlaneConfig := framework.NewIntegrationTestControlPlaneConfig()
|
client, _, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{})
|
||||||
_, server, closeFn := framework.RunAnAPIServer(controlPlaneConfig)
|
defer tearDownFn()
|
||||||
defer closeFn()
|
|
||||||
|
|
||||||
config := restclient.Config{Host: server.URL}
|
ns := framework.CreateNamespaceOrDie(client, "test-service-allocate-node-ports", t)
|
||||||
client, err := clientset.NewForConfig(&config)
|
defer framework.DeleteNamespaceOrDie(client, ns, t)
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Error creating clientset: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
ns := framework.CreateTestingNamespace("test-service-allocate-node-ports", t)
|
|
||||||
defer framework.DeleteTestingNamespace(ns, t)
|
|
||||||
|
|
||||||
service := &v1.Service{
|
service := &v1.Service{
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
@ -1611,6 +1530,7 @@ func Test_ServiceChangeTypeHeadlessToExternalNameWithPatch(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
service, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})
|
service, err = client.CoreV1().Services(ns.Name).Create(context.TODO(), service, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating test service: %v", err)
|
t.Fatalf("Error creating test service: %v", err)
|
||||||
|
@ -47,6 +47,7 @@ import (
|
|||||||
"k8s.io/client-go/util/retry"
|
"k8s.io/client-go/util/retry"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/controller-manager/pkg/informerfactory"
|
"k8s.io/controller-manager/pkg/informerfactory"
|
||||||
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
||||||
jobcontroller "k8s.io/kubernetes/pkg/controller/job"
|
jobcontroller "k8s.io/kubernetes/pkg/controller/job"
|
||||||
@ -586,9 +587,7 @@ func TestFinalizersClearedWhenBackoffLimitExceeded(t *testing.T) {
|
|||||||
closeFn, restConfig, clientSet, ns := setup(t, "simple")
|
closeFn, restConfig, clientSet, ns := setup(t, "simple")
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ctx, cancel := startJobControllerAndWaitForCaches(restConfig)
|
ctx, cancel := startJobControllerAndWaitForCaches(restConfig)
|
||||||
defer func() {
|
defer cancel()
|
||||||
cancel()
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Job tracking with finalizers requires less calls in Indexed mode,
|
// Job tracking with finalizers requires less calls in Indexed mode,
|
||||||
// so it's more likely to process all finalizers before all the pods
|
// so it's more likely to process all finalizers before all the pods
|
||||||
@ -785,9 +784,7 @@ func TestSuspendJobControllerRestart(t *testing.T) {
|
|||||||
closeFn, restConfig, clientSet, ns := setup(t, "suspend")
|
closeFn, restConfig, clientSet, ns := setup(t, "suspend")
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ctx, cancel := startJobControllerAndWaitForCaches(restConfig)
|
ctx, cancel := startJobControllerAndWaitForCaches(restConfig)
|
||||||
defer func() {
|
defer cancel()
|
||||||
cancel()
|
|
||||||
}()
|
|
||||||
|
|
||||||
job, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{
|
job, err := createJobWithDefaults(ctx, clientSet, ns.Name, &batchv1.Job{
|
||||||
Spec: batchv1.JobSpec{
|
Spec: batchv1.JobSpec{
|
||||||
@ -1162,24 +1159,23 @@ func createJobWithDefaults(ctx context.Context, clientSet clientset.Interface, n
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setup(t *testing.T, nsBaseName string) (framework.CloseFunc, *restclient.Config, clientset.Interface, *v1.Namespace) {
|
func setup(t *testing.T, nsBaseName string) (framework.CloseFunc, *restclient.Config, clientset.Interface, *v1.Namespace) {
|
||||||
controlPlaneConfig := framework.NewIntegrationTestControlPlaneConfig()
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
_, server, apiServerCloseFn := framework.RunAnAPIServer(controlPlaneConfig)
|
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
||||||
|
|
||||||
config := restclient.Config{
|
config := restclient.CopyConfig(server.ClientConfig)
|
||||||
Host: server.URL,
|
config.QPS = 200
|
||||||
QPS: 200.0,
|
config.Burst = 200
|
||||||
Burst: 200,
|
clientSet, err := clientset.NewForConfig(config)
|
||||||
}
|
|
||||||
clientSet, err := clientset.NewForConfig(&config)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Error creating clientset: %v", err)
|
t.Fatalf("Error creating clientset: %v", err)
|
||||||
}
|
}
|
||||||
ns := framework.CreateTestingNamespace(nsBaseName, t)
|
|
||||||
|
ns := framework.CreateNamespaceOrDie(clientSet, nsBaseName, t)
|
||||||
closeFn := func() {
|
closeFn := func() {
|
||||||
framework.DeleteTestingNamespace(ns, t)
|
framework.DeleteNamespaceOrDie(clientSet, ns, t)
|
||||||
apiServerCloseFn()
|
server.TearDownFn()
|
||||||
}
|
}
|
||||||
return closeFn, &config, clientSet, ns
|
return closeFn, config, clientSet, ns
|
||||||
}
|
}
|
||||||
|
|
||||||
func startJobControllerAndWaitForCaches(restConfig *restclient.Config) (context.Context, context.CancelFunc) {
|
func startJobControllerAndWaitForCaches(restConfig *restclient.Config) (context.Context, context.CancelFunc) {
|
||||||
|
@ -353,7 +353,7 @@ func TestSchedulerExtender(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, scheduler.WithExtenders(extenders...))
|
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, scheduler.WithExtenders(extenders...))
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
defer testutils.CleanupTest(t, testCtx)
|
defer testutils.CleanupTest(t, testCtx)
|
||||||
|
@ -1489,7 +1489,7 @@ func TestBindPlugin(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Create the scheduler with the test plugin set.
|
// Create the scheduler with the test plugin set.
|
||||||
testCtx := testutils.InitTestSchedulerWithOptions(t, testContext,
|
testCtx := testutils.InitTestSchedulerWithOptions(t, testContext, 0,
|
||||||
scheduler.WithProfiles(cfg.Profiles...),
|
scheduler.WithProfiles(cfg.Profiles...),
|
||||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncInformerFactory(testCtx)
|
||||||
@ -2421,7 +2421,7 @@ func TestActivatePods(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testutils.TestContext, nodeCount int, opts ...scheduler.Option) *testutils.TestContext {
|
func initTestSchedulerForFrameworkTest(t *testing.T, testCtx *testutils.TestContext, nodeCount int, opts ...scheduler.Option) *testutils.TestContext {
|
||||||
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, opts...)
|
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, opts...)
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
|
|
||||||
|
@ -171,6 +171,7 @@ func TestPreemption(t *testing.T) {
|
|||||||
|
|
||||||
testCtx := testutils.InitTestSchedulerWithOptions(t,
|
testCtx := testutils.InitTestSchedulerWithOptions(t,
|
||||||
testutils.InitTestAPIServer(t, "preemption", nil),
|
testutils.InitTestAPIServer(t, "preemption", nil),
|
||||||
|
0,
|
||||||
scheduler.WithProfiles(cfg.Profiles...),
|
scheduler.WithProfiles(cfg.Profiles...),
|
||||||
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
scheduler.WithFrameworkOutOfTreeRegistry(registry))
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncInformerFactory(testCtx)
|
||||||
@ -1436,7 +1437,7 @@ func TestPDBInPreemption(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func initTestPreferNominatedNode(t *testing.T, nsPrefix string, opts ...scheduler.Option) *testutils.TestContext {
|
func initTestPreferNominatedNode(t *testing.T, nsPrefix string, opts ...scheduler.Option) *testutils.TestContext {
|
||||||
testCtx := testutils.InitTestSchedulerWithOptions(t, testutils.InitTestAPIServer(t, nsPrefix, nil), opts...)
|
testCtx := testutils.InitTestSchedulerWithOptions(t, testutils.InitTestAPIServer(t, nsPrefix, nil), 0, opts...)
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncInformerFactory(testCtx)
|
||||||
// wraps the NextPod() method to make it appear the preemption has been done already and the nominated node has been set.
|
// wraps the NextPod() method to make it appear the preemption has been done already and the nominated node has been set.
|
||||||
f := testCtx.Scheduler.NextPod
|
f := testCtx.Scheduler.NextPod
|
||||||
|
@ -56,6 +56,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
|
|||||||
testCtx := testutils.InitTestSchedulerWithOptions(
|
testCtx := testutils.InitTestSchedulerWithOptions(
|
||||||
t,
|
t,
|
||||||
testutils.InitTestAPIServer(t, "core-res-enqueue", nil),
|
testutils.InitTestAPIServer(t, "core-res-enqueue", nil),
|
||||||
|
0,
|
||||||
scheduler.WithPodInitialBackoffSeconds(0),
|
scheduler.WithPodInitialBackoffSeconds(0),
|
||||||
scheduler.WithPodMaxBackoffSeconds(0),
|
scheduler.WithPodMaxBackoffSeconds(0),
|
||||||
)
|
)
|
||||||
@ -236,6 +237,7 @@ func TestCustomResourceEnqueue(t *testing.T) {
|
|||||||
testCtx = testutils.InitTestSchedulerWithOptions(
|
testCtx = testutils.InitTestSchedulerWithOptions(
|
||||||
t,
|
t,
|
||||||
testCtx,
|
testCtx,
|
||||||
|
0,
|
||||||
scheduler.WithProfiles(cfg.Profiles...),
|
scheduler.WithProfiles(cfg.Profiles...),
|
||||||
scheduler.WithFrameworkOutOfTreeRegistry(registry),
|
scheduler.WithFrameworkOutOfTreeRegistry(registry),
|
||||||
scheduler.WithPodInitialBackoffSeconds(0),
|
scheduler.WithPodInitialBackoffSeconds(0),
|
||||||
|
@ -267,7 +267,7 @@ func TestMultipleSchedulers(t *testing.T) {
|
|||||||
}},
|
}},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, scheduler.WithProfiles(cfg.Profiles...))
|
testCtx = testutils.InitTestSchedulerWithOptions(t, testCtx, 0, scheduler.WithProfiles(cfg.Profiles...))
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
|
|
||||||
|
@ -92,6 +92,7 @@ func initTestSchedulerForPriorityTest(t *testing.T, scorePluginName string) *tes
|
|||||||
testCtx := testutils.InitTestSchedulerWithOptions(
|
testCtx := testutils.InitTestSchedulerWithOptions(
|
||||||
t,
|
t,
|
||||||
testutils.InitTestAPIServer(t, strings.ToLower(scorePluginName), nil),
|
testutils.InitTestAPIServer(t, strings.ToLower(scorePluginName), nil),
|
||||||
|
0,
|
||||||
scheduler.WithProfiles(cfg.Profiles...),
|
scheduler.WithProfiles(cfg.Profiles...),
|
||||||
)
|
)
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncInformerFactory(testCtx)
|
||||||
@ -127,6 +128,7 @@ func initTestSchedulerForNodeResourcesTest(t *testing.T) *testutils.TestContext
|
|||||||
testCtx := testutils.InitTestSchedulerWithOptions(
|
testCtx := testutils.InitTestSchedulerWithOptions(
|
||||||
t,
|
t,
|
||||||
testutils.InitTestAPIServer(t, strings.ToLower(noderesources.Name), nil),
|
testutils.InitTestAPIServer(t, strings.ToLower(noderesources.Name), nil),
|
||||||
|
0,
|
||||||
scheduler.WithProfiles(cfg.Profiles...),
|
scheduler.WithProfiles(cfg.Profiles...),
|
||||||
)
|
)
|
||||||
testutils.SyncInformerFactory(testCtx)
|
testutils.SyncInformerFactory(testCtx)
|
||||||
|
@ -37,6 +37,7 @@ import (
|
|||||||
apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
apiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/controller/statefulset"
|
"k8s.io/kubernetes/pkg/controller/statefulset"
|
||||||
|
"k8s.io/kubernetes/pkg/controlplane"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
"k8s.io/kubernetes/test/integration/framework"
|
||||||
)
|
)
|
||||||
@ -49,10 +50,7 @@ const (
|
|||||||
// TestVolumeTemplateNoopUpdate ensures embedded StatefulSet objects with embedded PersistentVolumes can be updated
|
// TestVolumeTemplateNoopUpdate ensures embedded StatefulSet objects with embedded PersistentVolumes can be updated
|
||||||
func TestVolumeTemplateNoopUpdate(t *testing.T) {
|
func TestVolumeTemplateNoopUpdate(t *testing.T) {
|
||||||
// Start the server with default storage setup
|
// Start the server with default storage setup
|
||||||
server, err := apiservertesting.StartTestServer(t, apiservertesting.NewDefaultTestServerOptions(), nil, framework.SharedEtcd())
|
server := apiservertesting.StartTestServerOrDie(t, nil, nil, framework.SharedEtcd())
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
defer server.TearDownFn()
|
defer server.TearDownFn()
|
||||||
|
|
||||||
c, err := dynamic.NewForConfig(server.ClientConfig)
|
c, err := dynamic.NewForConfig(server.ClientConfig)
|
||||||
@ -125,8 +123,8 @@ func TestVolumeTemplateNoopUpdate(t *testing.T) {
|
|||||||
func TestSpecReplicasChange(t *testing.T) {
|
func TestSpecReplicasChange(t *testing.T) {
|
||||||
closeFn, rm, informers, c := scSetup(t)
|
closeFn, rm, informers, c := scSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateTestingNamespace("test-spec-replicas-change", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-spec-replicas-change", t)
|
||||||
defer framework.DeleteTestingNamespace(ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
cancel := runControllerAndInformers(rm, informers)
|
cancel := runControllerAndInformers(rm, informers)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -168,8 +166,8 @@ func TestSpecReplicasChange(t *testing.T) {
|
|||||||
func TestDeletingAndFailedPods(t *testing.T) {
|
func TestDeletingAndFailedPods(t *testing.T) {
|
||||||
closeFn, rm, informers, c := scSetup(t)
|
closeFn, rm, informers, c := scSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateTestingNamespace("test-deleting-and-failed-pods", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-deleting-and-failed-pods", t)
|
||||||
defer framework.DeleteTestingNamespace(ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
cancel := runControllerAndInformers(rm, informers)
|
cancel := runControllerAndInformers(rm, informers)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -269,8 +267,8 @@ func TestStatefulSetAvailable(t *testing.T) {
|
|||||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetMinReadySeconds, test.enabled)()
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetMinReadySeconds, test.enabled)()
|
||||||
closeFn, rm, informers, c := scSetup(t)
|
closeFn, rm, informers, c := scSetup(t)
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
ns := framework.CreateTestingNamespace("test-available-pods", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-available-pods", t)
|
||||||
defer framework.DeleteTestingNamespace(ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
cancel := runControllerAndInformers(rm, informers)
|
cancel := runControllerAndInformers(rm, informers)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
@ -358,31 +356,28 @@ func setPodsReadyCondition(t *testing.T, clientSet clientset.Interface, pods *v1
|
|||||||
// add for issue: https://github.com/kubernetes/kubernetes/issues/108837
|
// add for issue: https://github.com/kubernetes/kubernetes/issues/108837
|
||||||
func TestStatefulSetStatusWithPodFail(t *testing.T) {
|
func TestStatefulSetStatusWithPodFail(t *testing.T) {
|
||||||
limitedPodNumber := 2
|
limitedPodNumber := 2
|
||||||
controlPlaneConfig := framework.NewIntegrationTestControlPlaneConfig()
|
c, config, closeFn := framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
controlPlaneConfig.GenericConfig.AdmissionControl = &fakePodFailAdmission{
|
ModifyServerConfig: func(config *controlplane.Config) {
|
||||||
limitedPodNumber: limitedPodNumber,
|
config.GenericConfig.AdmissionControl = &fakePodFailAdmission{
|
||||||
}
|
limitedPodNumber: limitedPodNumber,
|
||||||
_, s, closeFn := framework.RunAnAPIServer(controlPlaneConfig)
|
}
|
||||||
|
},
|
||||||
|
})
|
||||||
defer closeFn()
|
defer closeFn()
|
||||||
|
|
||||||
config := restclient.Config{Host: s.URL}
|
|
||||||
c, err := clientset.NewForConfig(&config)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Could not create clientset: %v", err)
|
|
||||||
}
|
|
||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "statefulset-informers")), resyncPeriod)
|
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-informers")), resyncPeriod)
|
||||||
|
|
||||||
ssc := statefulset.NewStatefulSetController(
|
ssc := statefulset.NewStatefulSetController(
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Apps().V1().StatefulSets(),
|
informers.Apps().V1().StatefulSets(),
|
||||||
informers.Core().V1().PersistentVolumeClaims(),
|
informers.Core().V1().PersistentVolumeClaims(),
|
||||||
informers.Apps().V1().ControllerRevisions(),
|
informers.Apps().V1().ControllerRevisions(),
|
||||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "statefulset-controller")),
|
clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-controller")),
|
||||||
)
|
)
|
||||||
|
|
||||||
ns := framework.CreateTestingNamespace("test-pod-fail", t)
|
ns := framework.CreateNamespaceOrDie(c, "test-pod-fail", t)
|
||||||
defer framework.DeleteTestingNamespace(ns, t)
|
defer framework.DeleteNamespaceOrDie(c, ns, t)
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
@ -390,7 +385,7 @@ func TestStatefulSetStatusWithPodFail(t *testing.T) {
|
|||||||
go ssc.Run(ctx, 5)
|
go ssc.Run(ctx, 5)
|
||||||
|
|
||||||
sts := newSTS("sts", ns.Name, 4)
|
sts := newSTS("sts", ns.Name, 4)
|
||||||
_, err = c.AppsV1().StatefulSets(sts.Namespace).Create(context.TODO(), sts, metav1.CreateOptions{})
|
_, err := c.AppsV1().StatefulSets(sts.Namespace).Create(context.TODO(), sts, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Could not create statefuleSet %s: %v", sts.Name, err)
|
t.Fatalf("Could not create statefuleSet %s: %v", sts.Name, err)
|
||||||
}
|
}
|
||||||
|
@ -35,6 +35,7 @@ import (
|
|||||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
"k8s.io/client-go/util/retry"
|
"k8s.io/client-go/util/retry"
|
||||||
|
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||||
api "k8s.io/kubernetes/pkg/apis/core"
|
api "k8s.io/kubernetes/pkg/apis/core"
|
||||||
|
|
||||||
//svc "k8s.io/kubernetes/pkg/api/v1/service"
|
//svc "k8s.io/kubernetes/pkg/api/v1/service"
|
||||||
@ -159,27 +160,27 @@ func newStatefulSetPVC(name string) v1.PersistentVolumeClaim {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// scSetup sets up necessities for Statefulset integration test, including control plane, apiserver, informers, and clientset
|
// scSetup sets up necessities for Statefulset integration test, including control plane, apiserver, informers, and clientset
|
||||||
func scSetup(t *testing.T) (framework.CloseFunc, *statefulset.StatefulSetController, informers.SharedInformerFactory, clientset.Interface) {
|
func scSetup(t *testing.T) (kubeapiservertesting.TearDownFunc, *statefulset.StatefulSetController, informers.SharedInformerFactory, clientset.Interface) {
|
||||||
controlPlaneConfig := framework.NewIntegrationTestControlPlaneConfig()
|
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
|
||||||
_, s, closeFn := framework.RunAnAPIServer(controlPlaneConfig)
|
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{"--disable-admission-plugins=ServiceAccount"}, framework.SharedEtcd())
|
||||||
|
|
||||||
config := restclient.Config{Host: s.URL}
|
config := restclient.CopyConfig(server.ClientConfig)
|
||||||
clientSet, err := clientset.NewForConfig(&config)
|
clientSet, err := clientset.NewForConfig(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error in create clientset: %v", err)
|
t.Fatalf("error in create clientset: %v", err)
|
||||||
}
|
}
|
||||||
resyncPeriod := 12 * time.Hour
|
resyncPeriod := 12 * time.Hour
|
||||||
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "statefulset-informers")), resyncPeriod)
|
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-informers")), resyncPeriod)
|
||||||
|
|
||||||
sc := statefulset.NewStatefulSetController(
|
sc := statefulset.NewStatefulSetController(
|
||||||
informers.Core().V1().Pods(),
|
informers.Core().V1().Pods(),
|
||||||
informers.Apps().V1().StatefulSets(),
|
informers.Apps().V1().StatefulSets(),
|
||||||
informers.Core().V1().PersistentVolumeClaims(),
|
informers.Core().V1().PersistentVolumeClaims(),
|
||||||
informers.Apps().V1().ControllerRevisions(),
|
informers.Apps().V1().ControllerRevisions(),
|
||||||
clientset.NewForConfigOrDie(restclient.AddUserAgent(&config, "statefulset-controller")),
|
clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "statefulset-controller")),
|
||||||
)
|
)
|
||||||
|
|
||||||
return closeFn, sc, informers, clientSet
|
return server.TearDownFn, sc, informers, clientSet
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run STS controller and informers
|
// Run STS controller and informers
|
||||||
|
@ -342,7 +342,7 @@ func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interf
|
|||||||
|
|
||||||
testCtx.ClientSet, testCtx.KubeConfig, testCtx.CloseFn = framework.StartTestServer(t, framework.TestServerSetup{
|
testCtx.ClientSet, testCtx.KubeConfig, testCtx.CloseFn = framework.StartTestServer(t, framework.TestServerSetup{
|
||||||
ModifyServerRunOptions: func(options *options.ServerRunOptions) {
|
ModifyServerRunOptions: func(options *options.ServerRunOptions) {
|
||||||
options.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition", "Priority"}
|
options.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition", "Priority", "StorageObjectInUseProtection"}
|
||||||
},
|
},
|
||||||
ModifyServerConfig: func(config *controlplane.Config) {
|
ModifyServerConfig: func(config *controlplane.Config) {
|
||||||
if admission != nil {
|
if admission != nil {
|
||||||
@ -380,7 +380,7 @@ func InitTestScheduler(
|
|||||||
testCtx *TestContext,
|
testCtx *TestContext,
|
||||||
) *TestContext {
|
) *TestContext {
|
||||||
// Pod preemption is enabled by default scheduler configuration.
|
// Pod preemption is enabled by default scheduler configuration.
|
||||||
return InitTestSchedulerWithOptions(t, testCtx)
|
return InitTestSchedulerWithOptions(t, testCtx, 0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// InitTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
// InitTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
||||||
@ -388,10 +388,11 @@ func InitTestScheduler(
|
|||||||
func InitTestSchedulerWithOptions(
|
func InitTestSchedulerWithOptions(
|
||||||
t *testing.T,
|
t *testing.T,
|
||||||
testCtx *TestContext,
|
testCtx *TestContext,
|
||||||
|
resyncPeriod time.Duration,
|
||||||
opts ...scheduler.Option,
|
opts ...scheduler.Option,
|
||||||
) *TestContext {
|
) *TestContext {
|
||||||
// 1. Create scheduler
|
// 1. Create scheduler
|
||||||
testCtx.InformerFactory = scheduler.NewInformerFactory(testCtx.ClientSet, 0)
|
testCtx.InformerFactory = scheduler.NewInformerFactory(testCtx.ClientSet, resyncPeriod)
|
||||||
if testCtx.KubeConfig != nil {
|
if testCtx.KubeConfig != nil {
|
||||||
dynClient := dynamic.NewForConfigOrDie(testCtx.KubeConfig)
|
dynClient := dynamic.NewForConfigOrDie(testCtx.KubeConfig)
|
||||||
testCtx.DynInformerFactory = dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynClient, 0, v1.NamespaceAll, nil)
|
testCtx.DynInformerFactory = dynamicinformer.NewFilteredDynamicSharedInformerFactory(dynClient, 0, v1.NamespaceAll, nil)
|
||||||
@ -489,7 +490,7 @@ func InitDisruptionController(t *testing.T, testCtx *TestContext) *disruption.Di
|
|||||||
// InitTestSchedulerWithNS initializes a test environment and creates API server and scheduler with default
|
// InitTestSchedulerWithNS initializes a test environment and creates API server and scheduler with default
|
||||||
// configuration.
|
// configuration.
|
||||||
func InitTestSchedulerWithNS(t *testing.T, nsPrefix string, opts ...scheduler.Option) *TestContext {
|
func InitTestSchedulerWithNS(t *testing.T, nsPrefix string, opts ...scheduler.Option) *TestContext {
|
||||||
testCtx := InitTestSchedulerWithOptions(t, InitTestAPIServer(t, nsPrefix, nil), opts...)
|
testCtx := InitTestSchedulerWithOptions(t, InitTestAPIServer(t, nsPrefix, nil), 0, opts...)
|
||||||
SyncInformerFactory(testCtx)
|
SyncInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
return testCtx
|
return testCtx
|
||||||
@ -512,6 +513,7 @@ func InitTestDisablePreemption(t *testing.T, nsPrefix string) *TestContext {
|
|||||||
})
|
})
|
||||||
testCtx := InitTestSchedulerWithOptions(
|
testCtx := InitTestSchedulerWithOptions(
|
||||||
t, InitTestAPIServer(t, nsPrefix, nil),
|
t, InitTestAPIServer(t, nsPrefix, nil),
|
||||||
|
0,
|
||||||
scheduler.WithProfiles(cfg.Profiles...))
|
scheduler.WithProfiles(cfg.Profiles...))
|
||||||
SyncInformerFactory(testCtx)
|
SyncInformerFactory(testCtx)
|
||||||
go testCtx.Scheduler.Run(testCtx.Ctx)
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
|
@ -18,125 +18,15 @@ package volumescheduling
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"net/http/httptest"
|
|
||||||
"testing"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
||||||
"k8s.io/apimachinery/pkg/util/uuid"
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/apiserver/pkg/admission"
|
|
||||||
"k8s.io/client-go/informers"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
restclient "k8s.io/client-go/rest"
|
|
||||||
"k8s.io/client-go/tools/events"
|
|
||||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||||
"k8s.io/kubernetes/pkg/controlplane"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/profile"
|
|
||||||
"k8s.io/kubernetes/test/integration/framework"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type testContext struct {
|
|
||||||
closeFn framework.CloseFunc
|
|
||||||
httpServer *httptest.Server
|
|
||||||
ns *v1.Namespace
|
|
||||||
clientSet *clientset.Clientset
|
|
||||||
informerFactory informers.SharedInformerFactory
|
|
||||||
scheduler *scheduler.Scheduler
|
|
||||||
|
|
||||||
ctx context.Context
|
|
||||||
cancelFn context.CancelFunc
|
|
||||||
}
|
|
||||||
|
|
||||||
// initTestAPIServer initializes a test environment and creates an API server with default
|
|
||||||
// configuration. Alpha resources are enabled automatically if the corresponding feature
|
|
||||||
// is enabled.
|
|
||||||
func initTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interface) *testContext {
|
|
||||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
|
||||||
testCtx := testContext{
|
|
||||||
ctx: ctx,
|
|
||||||
cancelFn: cancelFunc,
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. Create API server
|
|
||||||
controlPlaneConfig := framework.NewIntegrationTestControlPlaneConfig()
|
|
||||||
resourceConfig := controlplane.DefaultAPIResourceConfigSource()
|
|
||||||
controlPlaneConfig.ExtraConfig.APIResourceConfigSource = resourceConfig
|
|
||||||
|
|
||||||
if admission != nil {
|
|
||||||
controlPlaneConfig.GenericConfig.AdmissionControl = admission
|
|
||||||
}
|
|
||||||
|
|
||||||
_, testCtx.httpServer, testCtx.closeFn = framework.RunAnAPIServer(controlPlaneConfig)
|
|
||||||
s := testCtx.httpServer
|
|
||||||
|
|
||||||
if nsPrefix != "default" {
|
|
||||||
testCtx.ns = framework.CreateTestingNamespace(nsPrefix+string(uuid.NewUUID()), t)
|
|
||||||
} else {
|
|
||||||
testCtx.ns = framework.CreateTestingNamespace("default", t)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 2. Create kubeclient
|
|
||||||
testCtx.clientSet = clientset.NewForConfigOrDie(
|
|
||||||
&restclient.Config{
|
|
||||||
QPS: -1, Host: s.URL,
|
|
||||||
ContentConfig: restclient.ContentConfig{
|
|
||||||
GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
)
|
|
||||||
return &testCtx
|
|
||||||
}
|
|
||||||
|
|
||||||
// initTestSchedulerWithOptions initializes a test environment and creates a scheduler with default
|
|
||||||
// configuration and other options.
|
|
||||||
func initTestSchedulerWithOptions(
|
|
||||||
t *testing.T,
|
|
||||||
testCtx *testContext,
|
|
||||||
resyncPeriod time.Duration,
|
|
||||||
) *testContext {
|
|
||||||
// 1. Create scheduler
|
|
||||||
testCtx.informerFactory = informers.NewSharedInformerFactory(testCtx.clientSet, resyncPeriod)
|
|
||||||
|
|
||||||
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{
|
|
||||||
Interface: testCtx.clientSet.EventsV1(),
|
|
||||||
})
|
|
||||||
|
|
||||||
var err error
|
|
||||||
testCtx.scheduler, err = scheduler.New(
|
|
||||||
testCtx.clientSet,
|
|
||||||
testCtx.informerFactory,
|
|
||||||
nil,
|
|
||||||
profile.NewRecorderFactory(eventBroadcaster),
|
|
||||||
testCtx.ctx.Done())
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("Couldn't create scheduler: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
eventBroadcaster.StartRecordingToSink(testCtx.ctx.Done())
|
|
||||||
|
|
||||||
testCtx.informerFactory.Start(testCtx.scheduler.StopEverything)
|
|
||||||
testCtx.informerFactory.WaitForCacheSync(testCtx.scheduler.StopEverything)
|
|
||||||
|
|
||||||
go testCtx.scheduler.Run(testCtx.ctx)
|
|
||||||
return testCtx
|
|
||||||
}
|
|
||||||
|
|
||||||
// cleanupTest deletes the scheduler and the test namespace. It should be called
|
|
||||||
// at the end of a test.
|
|
||||||
func cleanupTest(t *testing.T, testCtx *testContext) {
|
|
||||||
// Kill the scheduler.
|
|
||||||
testCtx.cancelFn()
|
|
||||||
// Cleanup nodes.
|
|
||||||
testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
|
||||||
framework.DeleteTestingNamespace(testCtx.ns, t)
|
|
||||||
testCtx.closeFn()
|
|
||||||
}
|
|
||||||
|
|
||||||
// waitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns
|
// waitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns
|
||||||
// an error if it does not scheduled within the given timeout.
|
// an error if it does not scheduled within the given timeout.
|
||||||
func waitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
func waitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
|
||||||
|
@ -43,6 +43,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodevolumelimits"
|
||||||
"k8s.io/kubernetes/pkg/volume"
|
"k8s.io/kubernetes/pkg/volume"
|
||||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||||
|
testutil "k8s.io/kubernetes/test/integration/util"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -990,19 +991,16 @@ func TestCapacity(t *testing.T) {
|
|||||||
// selectedNode annotation from a claim to reschedule volume provision
|
// selectedNode annotation from a claim to reschedule volume provision
|
||||||
// on provision failure.
|
// on provision failure.
|
||||||
func TestRescheduleProvisioning(t *testing.T) {
|
func TestRescheduleProvisioning(t *testing.T) {
|
||||||
// Set feature gates
|
testCtx := testutil.InitTestAPIServer(t, "reschedule-volume-provision", nil)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
testCtx := initTestAPIServer(t, "reschedule-volume-provision", nil)
|
clientset := testCtx.ClientSet
|
||||||
|
ns := testCtx.NS.Name
|
||||||
clientset := testCtx.clientSet
|
|
||||||
ns := testCtx.ns.Name
|
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
cancel()
|
testCtx.CancelFn()
|
||||||
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
||||||
testCtx.clientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
testCtx.ClientSet.CoreV1().Nodes().DeleteCollection(context.TODO(), metav1.DeleteOptions{}, metav1.ListOptions{})
|
||||||
testCtx.closeFn()
|
testCtx.CloseFn()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ctrl, informerFactory, err := initPVController(t, testCtx, 0)
|
ctrl, informerFactory, err := initPVController(t, testCtx, 0)
|
||||||
@ -1038,9 +1036,9 @@ func TestRescheduleProvisioning(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Start controller.
|
// Start controller.
|
||||||
go ctrl.Run(ctx)
|
go ctrl.Run(testCtx.Ctx)
|
||||||
informerFactory.Start(ctx.Done())
|
informerFactory.Start(testCtx.Ctx.Done())
|
||||||
informerFactory.WaitForCacheSync(ctx.Done())
|
informerFactory.WaitForCacheSync(testCtx.Ctx.Done())
|
||||||
|
|
||||||
// Validate that the annotation is removed by controller for provision reschedule.
|
// Validate that the annotation is removed by controller for provision reschedule.
|
||||||
if err := waitForProvisionAnn(clientset, pvc, false); err != nil {
|
if err := waitForProvisionAnn(clientset, pvc, false); err != nil {
|
||||||
@ -1049,18 +1047,21 @@ func TestRescheduleProvisioning(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
|
func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
|
||||||
testCtx := initTestSchedulerWithOptions(t, initTestAPIServer(t, nsName, nil), resyncPeriod)
|
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod)
|
||||||
clientset := testCtx.clientSet
|
testutil.SyncInformerFactory(testCtx)
|
||||||
ns := testCtx.ns.Name
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
|
|
||||||
|
clientset := testCtx.ClientSet
|
||||||
|
ns := testCtx.NS.Name
|
||||||
|
|
||||||
ctrl, informerFactory, err := initPVController(t, testCtx, provisionDelaySeconds)
|
ctrl, informerFactory, err := initPVController(t, testCtx, provisionDelaySeconds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create PV controller: %v", err)
|
t.Fatalf("Failed to create PV controller: %v", err)
|
||||||
}
|
}
|
||||||
go ctrl.Run(testCtx.ctx)
|
go ctrl.Run(testCtx.Ctx)
|
||||||
// Start informer factory after all controllers are configured and running.
|
// Start informer factory after all controllers are configured and running.
|
||||||
informerFactory.Start(testCtx.ctx.Done())
|
informerFactory.Start(testCtx.Ctx.Done())
|
||||||
informerFactory.WaitForCacheSync(testCtx.ctx.Done())
|
informerFactory.WaitForCacheSync(testCtx.Ctx.Done())
|
||||||
|
|
||||||
// Create shared objects
|
// Create shared objects
|
||||||
// Create nodes
|
// Create nodes
|
||||||
@ -1081,17 +1082,17 @@ func setupCluster(t *testing.T, nsName string, numberOfNodes int, resyncPeriod t
|
|||||||
return &testConfig{
|
return &testConfig{
|
||||||
client: clientset,
|
client: clientset,
|
||||||
ns: ns,
|
ns: ns,
|
||||||
stop: testCtx.ctx.Done(),
|
stop: testCtx.Ctx.Done(),
|
||||||
teardown: func() {
|
teardown: func() {
|
||||||
klog.Infof("test cluster %q start to tear down", ns)
|
klog.Infof("test cluster %q start to tear down", ns)
|
||||||
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
||||||
cleanupTest(t, testCtx)
|
testutil.CleanupTest(t, testCtx)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func initPVController(t *testing.T, testCtx *testContext, provisionDelaySeconds int) (*persistentvolume.PersistentVolumeController, informers.SharedInformerFactory, error) {
|
func initPVController(t *testing.T, testCtx *testutil.TestContext, provisionDelaySeconds int) (*persistentvolume.PersistentVolumeController, informers.SharedInformerFactory, error) {
|
||||||
clientset := testCtx.clientSet
|
clientset := testCtx.ClientSet
|
||||||
// Informers factory for controllers
|
// Informers factory for controllers
|
||||||
informerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
informerFactory := informers.NewSharedInformerFactory(clientset, 0)
|
||||||
|
|
||||||
|
@ -31,6 +31,7 @@ import (
|
|||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
testutil "k8s.io/kubernetes/test/integration/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -46,28 +47,31 @@ func mergeNodeLabels(node *v1.Node, labels map[string]string) *v1.Node {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
|
func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig {
|
||||||
textCtx := initTestSchedulerWithOptions(t, initTestAPIServer(t, nsName, nil), resyncPeriod)
|
testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod)
|
||||||
clientset := textCtx.clientSet
|
testutil.SyncInformerFactory(testCtx)
|
||||||
ns := textCtx.ns.Name
|
go testCtx.Scheduler.Run(testCtx.Ctx)
|
||||||
|
|
||||||
ctrl, informerFactory, err := initPVController(t, textCtx, provisionDelaySeconds)
|
clientset := testCtx.ClientSet
|
||||||
|
ns := testCtx.NS.Name
|
||||||
|
|
||||||
|
ctrl, informerFactory, err := initPVController(t, testCtx, provisionDelaySeconds)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create PV controller: %v", err)
|
t.Fatalf("Failed to create PV controller: %v", err)
|
||||||
}
|
}
|
||||||
go ctrl.Run(textCtx.ctx)
|
go ctrl.Run(testCtx.Ctx)
|
||||||
|
|
||||||
// Start informer factory after all controllers are configured and running.
|
// Start informer factory after all controllers are configured and running.
|
||||||
informerFactory.Start(textCtx.ctx.Done())
|
informerFactory.Start(testCtx.Ctx.Done())
|
||||||
informerFactory.WaitForCacheSync(textCtx.ctx.Done())
|
informerFactory.WaitForCacheSync(testCtx.Ctx.Done())
|
||||||
|
|
||||||
return &testConfig{
|
return &testConfig{
|
||||||
client: clientset,
|
client: clientset,
|
||||||
ns: ns,
|
ns: ns,
|
||||||
stop: textCtx.ctx.Done(),
|
stop: testCtx.Ctx.Done(),
|
||||||
teardown: func() {
|
teardown: func() {
|
||||||
klog.Infof("test cluster %q start to tear down", ns)
|
klog.Infof("test cluster %q start to tear down", ns)
|
||||||
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
deleteTestObjects(clientset, ns, metav1.DeleteOptions{})
|
||||||
cleanupTest(t, textCtx)
|
testutil.CleanupTest(t, testCtx)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user