test: use cancelation from ktesting

The return type of ktesting.NewTestContext is now a TContext. Code
which combined it WithCancel often didn't compile anymore (cannot overwrite
ktesting.TContext with context.Context). This is a good thing because all of
that code can be simplified to let ktesting handle the cancelation.
This commit is contained in:
Patrick Ohly 2023-12-25 19:40:56 +01:00
parent 3df07e446b
commit 1d653e6185
34 changed files with 458 additions and 695 deletions

View File

@ -1538,12 +1538,10 @@ func TestStalePodDisruption(t *testing.T) {
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
dc, _ := newFakeDisruptionControllerWithTime(ctx, now)
go dc.Run(ctx)
if _, err := dc.coreClient.CoreV1().Pods(tc.pod.Namespace).Create(ctx, tc.pod, metav1.CreateOptions{}); err != nil {
tCtx := ktesting.Init(t)
dc, _ := newFakeDisruptionControllerWithTime(tCtx, now)
go dc.Run(tCtx)
if _, err := dc.coreClient.CoreV1().Pods(tc.pod.Namespace).Create(tCtx, tc.pod, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create pod: %v", err)
}
dc.clock.Sleep(tc.timePassed)
@ -1552,7 +1550,7 @@ func TestStalePodDisruption(t *testing.T) {
}
diff := ""
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
pod, err := dc.kubeClient.CoreV1().Pods(tc.pod.Namespace).Get(ctx, tc.pod.Name, metav1.GetOptions{})
pod, err := dc.kubeClient.CoreV1().Pods(tc.pod.Namespace).Get(tCtx, tc.pod.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed getting updated pod: %v", err)
}

View File

@ -137,11 +137,9 @@ func expectNoPodUpdate(t *testing.T, ch <-chan kubetypes.PodUpdate) {
}
func TestNewPodAdded(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
channel, ch, config := createPodConfigTester(ctx, PodConfigNotificationIncremental)
channel, ch, config := createPodConfigTester(tCtx, PodConfigNotificationIncremental)
// see an update
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))
@ -153,11 +151,9 @@ func TestNewPodAdded(t *testing.T) {
}
func TestNewPodAddedInvalidNamespace(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
channel, ch, config := createPodConfigTester(ctx, PodConfigNotificationIncremental)
channel, ch, config := createPodConfigTester(tCtx, PodConfigNotificationIncremental)
// see an update
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", ""))
@ -169,11 +165,9 @@ func TestNewPodAddedInvalidNamespace(t *testing.T) {
}
func TestNewPodAddedDefaultNamespace(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
channel, ch, config := createPodConfigTester(ctx, PodConfigNotificationIncremental)
channel, ch, config := createPodConfigTester(tCtx, PodConfigNotificationIncremental)
// see an update
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "default"))
@ -185,11 +179,9 @@ func TestNewPodAddedDefaultNamespace(t *testing.T) {
}
func TestNewPodAddedDifferentNamespaces(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
channel, ch, config := createPodConfigTester(ctx, PodConfigNotificationIncremental)
channel, ch, config := createPodConfigTester(tCtx, PodConfigNotificationIncremental)
// see an update
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "default"))
@ -206,11 +198,9 @@ func TestNewPodAddedDifferentNamespaces(t *testing.T) {
}
func TestInvalidPodFiltered(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
channel, ch, _ := createPodConfigTester(ctx, PodConfigNotificationIncremental)
channel, ch, _ := createPodConfigTester(tCtx, PodConfigNotificationIncremental)
// see an update
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))
@ -224,11 +214,9 @@ func TestInvalidPodFiltered(t *testing.T) {
}
func TestNewPodAddedSnapshotAndUpdates(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
channel, ch, config := createPodConfigTester(ctx, PodConfigNotificationSnapshotAndUpdates)
channel, ch, config := createPodConfigTester(tCtx, PodConfigNotificationSnapshotAndUpdates)
// see an set
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))
@ -246,11 +234,9 @@ func TestNewPodAddedSnapshotAndUpdates(t *testing.T) {
}
func TestNewPodAddedSnapshot(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
channel, ch, config := createPodConfigTester(ctx, PodConfigNotificationSnapshot)
channel, ch, config := createPodConfigTester(tCtx, PodConfigNotificationSnapshot)
// see an set
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))
@ -268,11 +254,9 @@ func TestNewPodAddedSnapshot(t *testing.T) {
}
func TestNewPodAddedUpdatedRemoved(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
channel, ch, _ := createPodConfigTester(ctx, PodConfigNotificationIncremental)
channel, ch, _ := createPodConfigTester(tCtx, PodConfigNotificationIncremental)
// should register an add
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"))
@ -295,11 +279,9 @@ func TestNewPodAddedUpdatedRemoved(t *testing.T) {
}
func TestNewPodAddedDelete(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
channel, ch, _ := createPodConfigTester(ctx, PodConfigNotificationIncremental)
channel, ch, _ := createPodConfigTester(tCtx, PodConfigNotificationIncremental)
// should register an add
addedPod := CreateValidPod("foo", "new")
@ -318,11 +300,9 @@ func TestNewPodAddedDelete(t *testing.T) {
}
func TestNewPodAddedUpdatedSet(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
channel, ch, _ := createPodConfigTester(ctx, PodConfigNotificationIncremental)
channel, ch, _ := createPodConfigTester(tCtx, PodConfigNotificationIncremental)
// should register an add
podUpdate := CreatePodUpdate(kubetypes.ADD, TestSource, CreateValidPod("foo", "new"), CreateValidPod("foo2", "new"), CreateValidPod("foo3", "new"))
@ -344,9 +324,7 @@ func TestNewPodAddedUpdatedSet(t *testing.T) {
}
func TestNewPodAddedSetReconciled(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
// Create and touch new test pods, return the new pods and touched pod. We should create new pod list
// before touching to avoid data race.
@ -370,7 +348,7 @@ func TestNewPodAddedSetReconciled(t *testing.T) {
} {
var podWithStatusChange *v1.Pod
pods, _ := newTestPods(false, false)
channel, ch, _ := createPodConfigTester(ctx, PodConfigNotificationIncremental)
channel, ch, _ := createPodConfigTester(tCtx, PodConfigNotificationIncremental)
// Use SET to initialize the config, especially initialize the source set
channel <- CreatePodUpdate(kubetypes.SET, TestSource, pods...)
@ -393,9 +371,7 @@ func TestNewPodAddedSetReconciled(t *testing.T) {
}
func TestInitialEmptySet(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
for _, test := range []struct {
mode PodConfigNotificationMode
@ -405,7 +381,7 @@ func TestInitialEmptySet(t *testing.T) {
{PodConfigNotificationSnapshot, kubetypes.SET},
{PodConfigNotificationSnapshotAndUpdates, kubetypes.SET},
} {
channel, ch, _ := createPodConfigTester(ctx, test.mode)
channel, ch, _ := createPodConfigTester(tCtx, test.mode)
// should register an empty PodUpdate operation
podUpdate := CreatePodUpdate(kubetypes.SET, TestSource)
@ -422,11 +398,9 @@ func TestInitialEmptySet(t *testing.T) {
}
func TestPodUpdateAnnotations(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
channel, ch, _ := createPodConfigTester(ctx, PodConfigNotificationIncremental)
channel, ch, _ := createPodConfigTester(tCtx, PodConfigNotificationIncremental)
pod := CreateValidPod("foo2", "new")
pod.Annotations = make(map[string]string)
@ -455,11 +429,9 @@ func TestPodUpdateAnnotations(t *testing.T) {
}
func TestPodUpdateLabels(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
channel, ch, _ := createPodConfigTester(ctx, PodConfigNotificationIncremental)
channel, ch, _ := createPodConfigTester(tCtx, PodConfigNotificationIncremental)
pod := CreateValidPod("foo2", "new")
pod.Labels = make(map[string]string)
@ -479,11 +451,9 @@ func TestPodUpdateLabels(t *testing.T) {
}
func TestPodConfigRace(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
eventBroadcaster := record.NewBroadcaster(record.WithContext(ctx))
eventBroadcaster := record.NewBroadcaster(record.WithContext(tCtx))
config := NewPodConfig(PodConfigNotificationIncremental, eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "kubelet"}), &mockPodStartupSLIObserver{})
seenSources := sets.NewString(TestSource)
var wg sync.WaitGroup
@ -491,7 +461,7 @@ func TestPodConfigRace(t *testing.T) {
wg.Add(2)
go func() {
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := context.WithCancel(tCtx)
defer cancel()
defer wg.Done()
for i := 0; i < iterations; i++ {

View File

@ -75,11 +75,10 @@ func setup(t *testing.T, groupVersions ...schema.GroupVersion) (context.Context,
return setupWithResources(t, groupVersions, nil)
}
func setupWithResources(t *testing.T, groupVersions []schema.GroupVersion, resources []schema.GroupVersionResource) (context.Context, clientset.Interface, *restclient.Config, framework.TearDownFunc) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
func setupWithResources(t *testing.T, groupVersions []schema.GroupVersion, resources []schema.GroupVersionResource) (context.Context, clientset.Interface /* TODO (pohly): return ktesting.TContext */, *restclient.Config, framework.TearDownFunc) {
tCtx := ktesting.Init(t)
client, config, teardown := framework.StartTestServer(ctx, t, framework.TestServerSetup{
client, config, teardown := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerConfig: func(config *controlplane.Config) {
if len(groupVersions) > 0 || len(resources) > 0 {
resourceConfig := controlplane.DefaultAPIResourceConfigSource()
@ -91,11 +90,11 @@ func setupWithResources(t *testing.T, groupVersions []schema.GroupVersion, resou
})
newTeardown := func() {
cancel()
tCtx.Cancel("tearing down apiserver")
teardown()
}
return ctx, client, config, newTeardown
return tCtx, client, config, newTeardown
}
func verifyStatusCode(t *testing.T, transport http.RoundTripper, verb, URL, body string, expectedStatusCode int) {
@ -375,12 +374,10 @@ func TestListOptions(t *testing.T) {
for _, watchCacheEnabled := range []bool{true, false} {
t.Run(fmt.Sprintf("watchCacheEnabled=%t", watchCacheEnabled), func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
var storageTransport *storagebackend.TransportConfig
clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
clientSet, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.Etcd.EnableWatchCache = watchCacheEnabled
storageTransport = &opts.Etcd.StorageConfig.Transport
@ -397,7 +394,7 @@ func TestListOptions(t *testing.T) {
for i := 0; i < 15; i++ {
rs := newRS(ns.Name)
rs.Name = fmt.Sprintf("test-%d", i)
created, err := rsClient.Create(context.Background(), rs, metav1.CreateOptions{})
created, err := rsClient.Create(tCtx, rs, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
@ -407,7 +404,7 @@ func TestListOptions(t *testing.T) {
// delete the first 5, and then compact them
if i < 5 {
var zero int64
if err := rsClient.Delete(context.Background(), rs.Name, metav1.DeleteOptions{GracePeriodSeconds: &zero}); err != nil {
if err := rsClient.Delete(tCtx, rs.Name, metav1.DeleteOptions{GracePeriodSeconds: &zero}); err != nil {
t.Fatal(err)
}
oldestUncompactedRv = created.ResourceVersion
@ -427,12 +424,12 @@ func TestListOptions(t *testing.T) {
if err != nil {
t.Fatal(err)
}
_, err = kvClient.Compact(context.Background(), int64(revision))
_, err = kvClient.Compact(tCtx, int64(revision))
if err != nil {
t.Fatal(err)
}
listObj, err := rsClient.List(context.Background(), metav1.ListOptions{
listObj, err := rsClient.List(tCtx, metav1.ListOptions{
Limit: 6,
})
if err != nil {
@ -618,11 +615,9 @@ func TestListResourceVersion0(t *testing.T) {
for _, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
clientSet, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.Etcd.EnableWatchCache = tc.watchCacheEnabled
},
@ -637,7 +632,7 @@ func TestListResourceVersion0(t *testing.T) {
for i := 0; i < 10; i++ {
rs := newRS(ns.Name)
rs.Name = fmt.Sprintf("test-%d", i)
if _, err := rsClient.Create(ctx, rs, metav1.CreateOptions{}); err != nil {
if _, err := rsClient.Create(tCtx, rs, metav1.CreateOptions{}); err != nil {
t.Fatal(err)
}
}
@ -645,7 +640,7 @@ func TestListResourceVersion0(t *testing.T) {
if tc.watchCacheEnabled {
// poll until the watch cache has the full list in memory
err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) {
list, err := clientSet.AppsV1().ReplicaSets(ns.Name).List(ctx, metav1.ListOptions{ResourceVersion: "0"})
list, err := clientSet.AppsV1().ReplicaSets(ns.Name).List(tCtx, metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
@ -657,12 +652,12 @@ func TestListResourceVersion0(t *testing.T) {
}
pagerFn := func(opts metav1.ListOptions) (runtime.Object, error) {
return rsClient.List(ctx, opts)
return rsClient.List(tCtx, opts)
}
p := pager.New(pager.SimplePageFunc(pagerFn))
p.PageSize = 3
listObj, _, err := p.List(ctx, metav1.ListOptions{ResourceVersion: "0"})
listObj, _, err := p.List(tCtx, metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
t.Fatalf("Unexpected list error: %v", err)
}

View File

@ -136,9 +136,7 @@ func TestClientCARecreate(t *testing.T) {
}
func testClientCA(t *testing.T, recreate bool) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
frontProxyCA, err := newTestCAWithClient(
pkix.Name{
@ -175,7 +173,7 @@ func testClientCA(t *testing.T, recreate bool) {
clientCAFilename := ""
frontProxyCAFilename := ""
kubeClient, kubeconfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
kubeClient, kubeconfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024
clientCAFilename = opts.Authentication.ClientCert.ClientCA
@ -305,7 +303,7 @@ func testClientCA(t *testing.T, recreate bool) {
}
// Call an endpoint to make sure we are authenticated
_, err = testClient.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
_, err = testClient.CoreV1().Nodes().List(tCtx, metav1.ListOptions{})
if err != nil {
t.Error(err)
}
@ -473,13 +471,11 @@ func TestServingCertRecreate(t *testing.T) {
}
func testServingCert(t *testing.T, recreate bool) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
var servingCertPath string
_, kubeconfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
_, kubeconfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024
servingCertPath = opts.SecureServing.ServerCert.CertDirectory
@ -518,11 +514,9 @@ func testServingCert(t *testing.T, recreate bool) {
func TestSNICert(t *testing.T) {
var servingCertPath string
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
_, kubeconfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
_, kubeconfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024
servingCertPath = opts.SecureServing.ServerCert.CertDirectory

View File

@ -48,10 +48,9 @@ const (
)
func setup(t testing.TB, maxReadonlyRequestsInFlight, maxMutatingRequestsInFlight int) (context.Context, *rest.Config, framework.TearDownFunc) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
tCtx := ktesting.Init(t)
_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
_, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Ensure all clients are allowed to send requests.
opts.Authorization.Modes = []string{"AlwaysAllow"}
@ -61,10 +60,10 @@ func setup(t testing.TB, maxReadonlyRequestsInFlight, maxMutatingRequestsInFligh
})
newTeardown := func() {
cancel()
tCtx.Cancel("tearing down apiserver")
tearDownFn()
}
return ctx, kubeConfig, newTeardown
return tCtx, kubeConfig, newTeardown
}
func TestPriorityLevelIsolation(t *testing.T) {

View File

@ -145,11 +145,8 @@ func (d *noxuDelayingAuthorizer) Authorize(ctx context.Context, a authorizer.Att
// Secondarily, this test also checks the observed seat utilizations against values derived from expecting that
// the throughput observed by the client equals the execution throughput observed by the server.
func TestConcurrencyIsolation(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
_, kubeConfig, closeFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
_, kubeConfig, closeFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Ensure all clients are allowed to send requests.
opts.Authorization.Modes = []string{"AlwaysAllow"}
@ -190,7 +187,7 @@ func TestConcurrencyIsolation(t *testing.T) {
wg.Add(noxu1NumGoroutines)
streamRequests(noxu1NumGoroutines, func() {
start := time.Now()
_, err := noxu1Client.CoreV1().Namespaces().Get(ctx, "default", metav1.GetOptions{})
_, err := noxu1Client.CoreV1().Namespaces().Get(tCtx, "default", metav1.GetOptions{})
duration := time.Since(start).Seconds()
noxu1LatMeasure.update(duration)
if err != nil {
@ -203,7 +200,7 @@ func TestConcurrencyIsolation(t *testing.T) {
wg.Add(noxu2NumGoroutines)
streamRequests(noxu2NumGoroutines, func() {
start := time.Now()
_, err := noxu2Client.CoreV1().Namespaces().Get(ctx, "default", metav1.GetOptions{})
_, err := noxu2Client.CoreV1().Namespaces().Get(tCtx, "default", metav1.GetOptions{})
duration := time.Since(start).Seconds()
noxu2LatMeasure.update(duration)
if err != nil {

View File

@ -17,7 +17,6 @@ limitations under the License.
package apiserver
import (
"context"
"fmt"
"strings"
"testing"
@ -33,11 +32,9 @@ import (
// Tests that the apiserver limits the number of operations in a json patch.
func TestMaxJSONPatchOperations(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
clientSet, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024
},
@ -55,13 +52,13 @@ func TestMaxJSONPatchOperations(t *testing.T) {
Name: "test",
},
}
_, err := clientSet.CoreV1().Secrets("default").Create(ctx, secret, metav1.CreateOptions{})
_, err := clientSet.CoreV1().Secrets("default").Create(tCtx, secret, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
err = c.Patch(types.JSONPatchType).AbsPath(fmt.Sprintf("/api/v1/namespaces/default/secrets/test")).
Body(hugePatch).Do(ctx).Error()
Body(hugePatch).Do(tCtx).Error()
if err == nil {
t.Fatalf("unexpected no error")
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package apiserver
import (
"context"
"strings"
"testing"
@ -31,11 +30,8 @@ import (
// Tests that the apiserver limits the resource size in write operations.
func TestMaxResourceSize(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{})
tCtx := ktesting.Init(t)
clientSet, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{})
defer tearDownFn()
hugeData := []byte(strings.Repeat("x", 3*1024*1024+1))
@ -45,7 +41,7 @@ func TestMaxResourceSize(t *testing.T) {
c := clientSet.CoreV1().RESTClient()
t.Run("Create should limit the request body size", func(t *testing.T) {
err := c.Post().AbsPath("/api/v1/namespaces/default/pods").
Body(hugeData).Do(ctx).Error()
Body(hugeData).Do(tCtx).Error()
if err == nil {
t.Fatalf("unexpected no error")
}
@ -61,14 +57,14 @@ func TestMaxResourceSize(t *testing.T) {
Name: "test",
},
}
_, err := clientSet.CoreV1().Secrets("default").Create(ctx, secret, metav1.CreateOptions{})
_, err := clientSet.CoreV1().Secrets("default").Create(tCtx, secret, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
t.Run("Update should limit the request body size", func(t *testing.T) {
err = c.Put().AbsPath("/api/v1/namespaces/default/secrets/test").
Body(hugeData).Do(ctx).Error()
Body(hugeData).Do(tCtx).Error()
if err == nil {
t.Fatalf("unexpected no error")
}
@ -79,7 +75,7 @@ func TestMaxResourceSize(t *testing.T) {
})
t.Run("Patch should limit the request body size", func(t *testing.T) {
err = c.Patch(types.JSONPatchType).AbsPath("/api/v1/namespaces/default/secrets/test").
Body(hugeData).Do(ctx).Error()
Body(hugeData).Do(tCtx).Error()
if err == nil {
t.Fatalf("unexpected no error")
}
@ -94,7 +90,7 @@ func TestMaxResourceSize(t *testing.T) {
}
patchBody := []byte(`[{"op":"add","path":"/foo","value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}]`)
err = rest.Patch(types.JSONPatchType).AbsPath("/api/v1/namespaces/default/secrets/test").
Body(patchBody).Do(ctx).Error()
Body(patchBody).Do(tCtx).Error()
if err != nil && !apierrors.IsBadRequest(err) {
t.Errorf("expected success or bad request err, got %v", err)
}
@ -105,7 +101,7 @@ func TestMaxResourceSize(t *testing.T) {
}
patchBody := []byte(`[{"op":"add","path":"/foo","value":0` + strings.Repeat(" ", 3*1024*1024-100) + `}]`)
err = rest.Patch(types.JSONPatchType).AbsPath("/api/v1/namespaces/default/secrets/test").
Body(patchBody).Do(ctx).Error()
Body(patchBody).Do(tCtx).Error()
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -116,7 +112,7 @@ func TestMaxResourceSize(t *testing.T) {
}
patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`)
err = rest.Patch(types.MergePatchType).AbsPath("/api/v1/namespaces/default/secrets/test").
Body(patchBody).Do(ctx).Error()
Body(patchBody).Do(tCtx).Error()
if err != nil && !apierrors.IsBadRequest(err) {
t.Errorf("expected success or bad request err, got %v", err)
}
@ -127,7 +123,7 @@ func TestMaxResourceSize(t *testing.T) {
}
patchBody := []byte(`{"value":0` + strings.Repeat(" ", 3*1024*1024-100) + `}`)
err = rest.Patch(types.MergePatchType).AbsPath("/api/v1/namespaces/default/secrets/test").
Body(patchBody).Do(ctx).Error()
Body(patchBody).Do(tCtx).Error()
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -138,7 +134,7 @@ func TestMaxResourceSize(t *testing.T) {
}
patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`)
err = rest.Patch(types.StrategicMergePatchType).AbsPath("/api/v1/namespaces/default/secrets/test").
Body(patchBody).Do(ctx).Error()
Body(patchBody).Do(tCtx).Error()
if err != nil && !apierrors.IsBadRequest(err) {
t.Errorf("expected success or bad request err, got %v", err)
}
@ -149,7 +145,7 @@ func TestMaxResourceSize(t *testing.T) {
}
patchBody := []byte(`{"value":0` + strings.Repeat(" ", 3*1024*1024-100) + `}`)
err = rest.Patch(types.StrategicMergePatchType).AbsPath("/api/v1/namespaces/default/secrets/test").
Body(patchBody).Do(ctx).Error()
Body(patchBody).Do(tCtx).Error()
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@ -160,7 +156,7 @@ func TestMaxResourceSize(t *testing.T) {
}
patchBody := []byte(`{"value":` + strings.Repeat("[", 3*1024*1024/2-100) + strings.Repeat("]", 3*1024*1024/2-100) + `}`)
err = rest.Patch(types.ApplyPatchType).Param("fieldManager", "test").AbsPath("/api/v1/namespaces/default/secrets/test").
Body(patchBody).Do(ctx).Error()
Body(patchBody).Do(tCtx).Error()
if err != nil && !apierrors.IsBadRequest(err) {
t.Errorf("expected success or bad request err, got %#v", err)
}
@ -171,14 +167,14 @@ func TestMaxResourceSize(t *testing.T) {
}
patchBody := []byte(`{"apiVersion":"v1","kind":"Secret"` + strings.Repeat(" ", 3*1024*1024-100) + `}`)
err = rest.Patch(types.ApplyPatchType).Param("fieldManager", "test").AbsPath("/api/v1/namespaces/default/secrets/test").
Body(patchBody).Do(ctx).Error()
Body(patchBody).Do(tCtx).Error()
if err != nil {
t.Errorf("unexpected error: %v", err)
}
})
t.Run("Delete should limit the request body size", func(t *testing.T) {
err = c.Delete().AbsPath("/api/v1/namespaces/default/secrets/test").
Body(hugeData).Do(ctx).Error()
Body(hugeData).Do(tCtx).Error()
if err == nil {
t.Fatalf("unexpected no error")
}
@ -202,7 +198,7 @@ values: ` + strings.Repeat("[", 3*1024*1024))
SetHeader("Content-Type", "application/yaml").
AbsPath("/api/v1/namespaces/default/configmaps").
Body(yamlBody).
DoRaw(ctx)
DoRaw(tCtx)
if !apierrors.IsRequestEntityTooLargeError(err) {
t.Errorf("expected too large error, got %v", err)
}
@ -225,7 +221,7 @@ values: ` + strings.Repeat("[", 3*1024*1024/2-500) + strings.Repeat("]", 3*1024*
SetHeader("Content-Type", "application/yaml").
AbsPath("/api/v1/namespaces/default/configmaps").
Body(yamlBody).
DoRaw(ctx)
DoRaw(tCtx)
if !apierrors.IsBadRequest(err) {
t.Errorf("expected bad request, got %v", err)
}
@ -248,7 +244,7 @@ values: ` + strings.Repeat("[", 3*1024*1024-1000))
SetHeader("Content-Type", "application/yaml").
AbsPath("/api/v1/namespaces/default/configmaps").
Body(yamlBody).
DoRaw(ctx)
DoRaw(tCtx)
if !apierrors.IsBadRequest(err) {
t.Errorf("expected bad request, got %v", err)
}
@ -269,7 +265,7 @@ values: ` + strings.Repeat("[", 3*1024*1024-1000))
SetHeader("Content-Type", "application/json").
AbsPath("/api/v1/namespaces/default/configmaps").
Body(jsonBody).
DoRaw(ctx)
DoRaw(tCtx)
if !apierrors.IsRequestEntityTooLargeError(err) {
t.Errorf("expected too large error, got %v", err)
}
@ -293,7 +289,7 @@ values: ` + strings.Repeat("[", 3*1024*1024-1000))
SetHeader("Content-Type", "application/json").
AbsPath("/api/v1/namespaces/default/configmaps").
Body(jsonBody).
DoRaw(ctx)
DoRaw(tCtx)
// TODO(liggitt): expect bad request on deep nesting, rather than success on dropped unknown field data
if err != nil && !apierrors.IsBadRequest(err) {
t.Errorf("expected bad request, got %v", err)
@ -318,7 +314,7 @@ values: ` + strings.Repeat("[", 3*1024*1024-1000))
SetHeader("Content-Type", "application/json").
AbsPath("/api/v1/namespaces/default/configmaps").
Body(jsonBody).
DoRaw(ctx)
DoRaw(tCtx)
if !apierrors.IsBadRequest(err) {
t.Errorf("expected bad request, got %v", err)
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package openapi
import (
"context"
"encoding/json"
"net/http"
"testing"
@ -56,10 +55,7 @@ func TestEnablingOpenAPIEnumTypes(t *testing.T) {
},
} {
t.Run(tc.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.OpenAPIEnums, tc.featureEnabled)()
getDefinitionsFn := openapi.GetOpenAPIDefinitionsWithoutDisabledFeatures(func(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition {
@ -79,7 +75,7 @@ func TestEnablingOpenAPIEnumTypes(t *testing.T) {
return defs
})
_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
_, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerConfig: func(config *controlplane.Config) {
config.GenericConfig.OpenAPIConfig = framework.DefaultOpenAPIConfig()
config.GenericConfig.OpenAPIConfig.GetDefinitions = getDefinitionsFn

View File

@ -43,11 +43,8 @@ import (
)
func TestOpenAPIV3SpecRoundTrip(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{})
tCtx := ktesting.Init(t)
_, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{})
defer tearDownFn()
paths := []string{
@ -189,11 +186,8 @@ func TestOpenAPIV3ProtoRoundtrip(t *testing.T) {
// See https://github.com/kubernetes/kubernetes/issues/106387 for more details
t.Skip("Skipping OpenAPI V3 Proto roundtrip test")
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{})
tCtx := ktesting.Init(t)
_, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{})
defer tearDownFn()
rt, err := restclient.TransportFor(kubeConfig)

View File

@ -78,11 +78,8 @@ Bgqc+dJN9xS9Ah5gLiGQJ6C4niUA11piCpvMsy+j/LQ1Erx47KMar5fuMXYk7iPq
-----END CERTIFICATE-----
`))
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
clientSet, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
clientSet, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.GenericServerRunOptions.MaxRequestBodyBytes = 1024 * 1024
// I have no idea what this cert is, but it doesn't matter, we just want something that always fails validation
@ -97,7 +94,7 @@ Bgqc+dJN9xS9Ah5gLiGQJ6C4niUA11piCpvMsy+j/LQ1Erx47KMar5fuMXYk7iPq
}))
defer fakeKubeletServer.Close()
pod := prepareFakeNodeAndPod(ctx, t, clientSet, fakeKubeletServer)
pod := prepareFakeNodeAndPod(tCtx, t, clientSet, fakeKubeletServer)
insecureResult := clientSet.CoreV1().Pods("ns").GetLogs(pod.Name, &corev1.PodLogOptions{InsecureSkipTLSVerifyBackend: true}).Do(context.TODO())
if err := insecureResult.Error(); err != nil {
@ -109,7 +106,7 @@ Bgqc+dJN9xS9Ah5gLiGQJ6C4niUA11piCpvMsy+j/LQ1Erx47KMar5fuMXYk7iPq
t.Fatal(insecureStatusCode)
}
secureResult := clientSet.CoreV1().Pods("ns").GetLogs(pod.Name, &corev1.PodLogOptions{}).Do(ctx)
secureResult := clientSet.CoreV1().Pods("ns").GetLogs(pod.Name, &corev1.PodLogOptions{}).Do(tCtx)
if err := secureResult.Error(); err == nil || !strings.Contains(err.Error(), "x509: certificate signed by unknown authority") {
t.Fatal(err)
}

View File

@ -85,11 +85,8 @@ func multiEtcdSetup(ctx context.Context, t *testing.T) (clientset.Interface, fra
}
func TestWatchCacheUpdatedByEtcd(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
c, closeFn := multiEtcdSetup(ctx, t)
tCtx := ktesting.Init(t)
c, closeFn := multiEtcdSetup(tCtx, t)
defer closeFn()
makeConfigMap := func(name string) *v1.ConfigMap {
@ -102,11 +99,11 @@ func TestWatchCacheUpdatedByEtcd(t *testing.T) {
return &v1.Event{ObjectMeta: metav1.ObjectMeta{Name: name}}
}
cm, err := c.CoreV1().ConfigMaps("default").Create(ctx, makeConfigMap("name"), metav1.CreateOptions{})
cm, err := c.CoreV1().ConfigMaps("default").Create(tCtx, makeConfigMap("name"), metav1.CreateOptions{})
if err != nil {
t.Errorf("Couldn't create configmap: %v", err)
}
ev, err := c.CoreV1().Events("default").Create(ctx, makeEvent("name"), metav1.CreateOptions{})
ev, err := c.CoreV1().Events("default").Create(tCtx, makeEvent("name"), metav1.CreateOptions{})
if err != nil {
t.Errorf("Couldn't create event: %v", err)
}
@ -120,7 +117,7 @@ func TestWatchCacheUpdatedByEtcd(t *testing.T) {
// resources (being the last updates).
t.Logf("Waiting for configmaps watchcache synced to %s", cm.ResourceVersion)
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
res, err := c.CoreV1().ConfigMaps("default").List(ctx, listOptions)
res, err := c.CoreV1().ConfigMaps("default").List(tCtx, listOptions)
if err != nil {
return false, nil
}
@ -130,7 +127,7 @@ func TestWatchCacheUpdatedByEtcd(t *testing.T) {
}
t.Logf("Waiting for events watchcache synced to %s", ev.ResourceVersion)
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
res, err := c.CoreV1().Events("default").List(ctx, listOptions)
res, err := c.CoreV1().Events("default").List(tCtx, listOptions)
if err != nil {
return false, nil
}
@ -141,14 +138,14 @@ func TestWatchCacheUpdatedByEtcd(t *testing.T) {
// Create a secret, that is stored in the same etcd as configmap, but
// different than events.
se, err := c.CoreV1().Secrets("default").Create(ctx, makeSecret("name"), metav1.CreateOptions{})
se, err := c.CoreV1().Secrets("default").Create(tCtx, makeSecret("name"), metav1.CreateOptions{})
if err != nil {
t.Errorf("Couldn't create secret: %v", err)
}
t.Logf("Waiting for configmaps watchcache synced to %s", se.ResourceVersion)
if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
res, err := c.CoreV1().ConfigMaps("default").List(ctx, listOptions)
res, err := c.CoreV1().ConfigMaps("default").List(tCtx, listOptions)
if err != nil {
return false, nil
}
@ -158,7 +155,7 @@ func TestWatchCacheUpdatedByEtcd(t *testing.T) {
}
t.Logf("Waiting for events watchcache NOT synced to %s", se.ResourceVersion)
if err := wait.Poll(100*time.Millisecond, 5*time.Second, func() (bool, error) {
res, err := c.CoreV1().Events("default").List(ctx, listOptions)
res, err := c.CoreV1().Events("default").List(tCtx, listOptions)
if err != nil {
return false, nil
}
@ -169,11 +166,8 @@ func TestWatchCacheUpdatedByEtcd(t *testing.T) {
}
func BenchmarkListFromWatchCache(b *testing.B) {
_, ctx := ktesting.NewTestContext(b)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
c, _, tearDownFn := framework.StartTestServer(ctx, b, framework.TestServerSetup{
tCtx := ktesting.Init(b)
c, _, tearDownFn := framework.StartTestServer(tCtx, b, framework.TestServerSetup{
ModifyServerConfig: func(config *controlplane.Config) {
// Switch off endpoints reconciler to avoid unnecessary operations.
config.ExtraConfig.EndpointReconcilerType = reconcilers.NoneEndpointReconcilerType
@ -194,7 +188,7 @@ func BenchmarkListFromWatchCache(b *testing.B) {
ns := &v1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("namespace-%d", index)},
}
ns, err := c.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
ns, err := c.CoreV1().Namespaces().Create(tCtx, ns, metav1.CreateOptions{})
if err != nil {
errCh <- err
return
@ -206,7 +200,7 @@ func BenchmarkListFromWatchCache(b *testing.B) {
Name: fmt.Sprintf("secret-%d", j),
},
}
_, err := c.CoreV1().Secrets(ns.Name).Create(ctx, secret, metav1.CreateOptions{})
_, err := c.CoreV1().Secrets(ns.Name).Create(tCtx, secret, metav1.CreateOptions{})
if err != nil {
errCh <- err
return
@ -227,7 +221,7 @@ func BenchmarkListFromWatchCache(b *testing.B) {
ResourceVersion: "0",
}
for i := 0; i < b.N; i++ {
secrets, err := c.CoreV1().Secrets("").List(ctx, opts)
secrets, err := c.CoreV1().Secrets("").List(tCtx, opts)
if err != nil {
b.Errorf("failed to list secrets: %v", err)
}

View File

@ -58,11 +58,8 @@ func alwaysAlice(req *http.Request) (*authenticator.Response, bool, error) {
}
func TestSubjectAccessReview(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
clientset, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
clientset, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerConfig: func(config *controlplane.Config) {
// Unset BearerToken to disable BearerToken authenticator.
config.GenericConfig.LoopbackClientConfig.BearerToken = ""
@ -132,7 +129,7 @@ func TestSubjectAccessReview(t *testing.T) {
}
for _, test := range tests {
response, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(ctx, test.sar, metav1.CreateOptions{})
response, err := clientset.AuthorizationV1().SubjectAccessReviews().Create(tCtx, test.sar, metav1.CreateOptions{})
switch {
case err == nil && len(test.expectedError) == 0:
@ -154,9 +151,7 @@ func TestSubjectAccessReview(t *testing.T) {
}
func TestSelfSubjectAccessReview(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
var mutex sync.Mutex
@ -174,7 +169,7 @@ func TestSelfSubjectAccessReview(t *testing.T) {
}, true, nil
}
clientset, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
clientset, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerConfig: func(config *controlplane.Config) {
// Unset BearerToken to disable BearerToken authenticator.
config.GenericConfig.LoopbackClientConfig.BearerToken = ""
@ -235,7 +230,7 @@ func TestSelfSubjectAccessReview(t *testing.T) {
username = test.username
mutex.Unlock()
response, err := clientset.AuthorizationV1().SelfSubjectAccessReviews().Create(ctx, test.sar, metav1.CreateOptions{})
response, err := clientset.AuthorizationV1().SelfSubjectAccessReviews().Create(tCtx, test.sar, metav1.CreateOptions{})
switch {
case err == nil && len(test.expectedError) == 0:
@ -257,11 +252,8 @@ func TestSelfSubjectAccessReview(t *testing.T) {
}
func TestLocalSubjectAccessReview(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
clientset, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
clientset, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerConfig: func(config *controlplane.Config) {
// Unset BearerToken to disable BearerToken authenticator.
config.GenericConfig.LoopbackClientConfig.BearerToken = ""
@ -359,7 +351,7 @@ func TestLocalSubjectAccessReview(t *testing.T) {
}
for _, test := range tests {
response, err := clientset.AuthorizationV1().LocalSubjectAccessReviews(test.namespace).Create(ctx, test.sar, metav1.CreateOptions{})
response, err := clientset.AuthorizationV1().LocalSubjectAccessReviews(test.namespace).Create(tCtx, test.sar, metav1.CreateOptions{})
switch {
case err == nil && len(test.expectedError) == 0:

View File

@ -454,11 +454,8 @@ func getTestRequests(namespace string) []testRequest {
//
// TODO(etune): write a fuzz test of the REST API.
func TestAuthModeAlwaysAllow(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
@ -562,11 +559,8 @@ func getPreviousResourceVersionKey(url, id string) string {
}
func TestAuthModeAlwaysDeny(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
@ -610,11 +604,8 @@ func TestAuthModeAlwaysDeny(t *testing.T) {
// TestAliceNotForbiddenOrUnauthorized tests a user who is known to
// the authentication system and authorized to do any actions.
func TestAliceNotForbiddenOrUnauthorized(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
@ -690,11 +681,8 @@ func TestAliceNotForbiddenOrUnauthorized(t *testing.T) {
// the authentication system but not authorized to do any actions
// should receive "Forbidden".
func TestBobIsForbidden(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
@ -743,11 +731,8 @@ func TestBobIsForbidden(t *testing.T) {
// An authorization module is installed in this scenario for integration
// test purposes, but requests aren't expected to reach it.
func TestUnknownUserIsUnauthorized(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
@ -819,11 +804,8 @@ func (impersonateAuthorizer) Authorize(ctx context.Context, a authorizer.Attribu
}
func TestImpersonateIsForbidden(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
@ -1125,13 +1107,11 @@ func (a *trackingAuthorizer) Authorize(ctx context.Context, attributes authorize
// TestAuthorizationAttributeDetermination tests that authorization attributes are built correctly
func TestAuthorizationAttributeDetermination(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
trackingAuthorizer := &trackingAuthorizer{}
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
@ -1201,11 +1181,9 @@ func TestAuthorizationAttributeDetermination(t *testing.T) {
// TestNamespaceAuthorization tests that authorization can be controlled
// by namespace.
func TestNamespaceAuthorization(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
@ -1306,11 +1284,9 @@ func TestNamespaceAuthorization(t *testing.T) {
// TestKindAuthorization tests that authorization can be controlled
// by namespace.
func TestKindAuthorization(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
@ -1393,11 +1369,8 @@ func TestKindAuthorization(t *testing.T) {
// TestReadOnlyAuthorization tests that authorization can be controlled
// by namespace.
func TestReadOnlyAuthorization(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
@ -1461,10 +1434,7 @@ func TestWebhookTokenAuthenticatorCustomDial(t *testing.T) {
}
func testWebhookTokenAuthenticator(customDialer bool, t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
authServer := newTestWebhookTokenAuthServer()
defer authServer.Close()
var authenticator authenticator.Request
@ -1480,7 +1450,7 @@ func testWebhookTokenAuthenticator(customDialer bool, t *testing.T) {
t.Fatalf("error starting webhook token authenticator server: %v", err)
}
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}

View File

@ -18,7 +18,6 @@ package auth
import (
"bytes"
"context"
"fmt"
"io"
"net/http"
@ -121,13 +120,10 @@ func TestBootstrapTokenAuth(t *testing.T) {
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
authenticator := group.NewAuthenticatedGroupAdder(bearertoken.New(bootstrap.NewTokenAuthenticator(bootstrapSecrets{test.secret})))
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.Authorization.Modes = []string{"AlwaysAllow"}
},

View File

@ -54,11 +54,8 @@ func TestDynamicClientBuilder(t *testing.T) {
t.Fatalf("parse duration failed: %v", err)
}
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
baseClient, baseConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
baseClient, baseConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceAccountSigningKeyFile = tmpfile.Name()
opts.ServiceAccountTokenMaxExpiration = maxExpirationDuration
@ -102,7 +99,7 @@ func TestDynamicClientBuilder(t *testing.T) {
// We want to trigger token rotation here by deleting service account
// the dynamic client was using.
if err = dymClient.CoreV1().ServiceAccounts(ns).Delete(ctx, saName, metav1.DeleteOptions{}); err != nil {
if err = dymClient.CoreV1().ServiceAccounts(ns).Delete(tCtx, saName, metav1.DeleteOptions{}); err != nil {
t.Fatalf("delete service account %s failed: %v", saName, err)
}
time.Sleep(time.Second * 10)

View File

@ -26,7 +26,6 @@ import (
"reflect"
"strings"
"testing"
"time"
rbacapi "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -538,10 +537,7 @@ func TestRBAC(t *testing.T) {
"user-with-no-permissions": {Name: "user-with-no-permissions"},
})))
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
var tearDownAuthorizerFn func()
defer func() {
if tearDownAuthorizerFn != nil {
@ -549,7 +545,7 @@ func TestRBAC(t *testing.T) {
}
}()
_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
_, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
// Also disable namespace lifecycle to workaroung the test limitation that first creates
@ -675,23 +671,20 @@ func TestRBAC(t *testing.T) {
}
func TestBootstrapping(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
clientset, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
clientset, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.Authorization.Modes = []string{"RBAC"}
},
})
defer tearDownFn()
watcher, err := clientset.RbacV1().ClusterRoles().Watch(ctx, metav1.ListOptions{ResourceVersion: "0"})
watcher, err := clientset.RbacV1().ClusterRoles().Watch(tCtx, metav1.ListOptions{ResourceVersion: "0"})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
_, err = watchtools.UntilWithoutRetry(ctx, watcher, func(event watch.Event) (bool, error) {
_, err = watchtools.UntilWithoutRetry(tCtx, watcher, func(event watch.Event) (bool, error) {
if event.Type != watch.Added {
return false, nil
}
@ -701,7 +694,7 @@ func TestBootstrapping(t *testing.T) {
t.Fatalf("unexpected error: %v", err)
}
clusterRoles, err := clientset.RbacV1().ClusterRoles().List(ctx, metav1.ListOptions{})
clusterRoles, err := clientset.RbacV1().ClusterRoles().List(tCtx, metav1.ListOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@ -717,7 +710,7 @@ func TestBootstrapping(t *testing.T) {
t.Errorf("missing cluster-admin: %v", clusterRoles)
healthBytes, err := clientset.Discovery().RESTClient().Get().AbsPath("/healthz/poststarthook/rbac/bootstrap-roles").DoRaw(ctx)
healthBytes, err := clientset.Discovery().RESTClient().Get().AbsPath("/healthz/poststarthook/rbac/bootstrap-roles").DoRaw(tCtx)
if err != nil {
t.Error(err)
}
@ -736,11 +729,8 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) {
etcdConfig := framework.SharedEtcd()
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Ensure we're using the same etcd across apiserver restarts.
opts.Etcd.StorageConfig = *etcdConfig
@ -751,7 +741,7 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) {
// Modify the default RBAC discovery ClusterRoleBidnings to look more like the defaults that
// existed prior to v1.14, but with user modifications.
t.Logf("Modifying default `system:discovery` ClusterRoleBinding")
discRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:discovery", metav1.GetOptions{})
discRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(tCtx, "system:discovery", metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get `system:discovery` ClusterRoleBinding: %v", err)
}
@ -764,21 +754,21 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) {
APIGroup: "rbac.authorization.k8s.io",
},
}
if discRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(ctx, discRoleBinding, metav1.UpdateOptions{}); err != nil {
if discRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(tCtx, discRoleBinding, metav1.UpdateOptions{}); err != nil {
t.Fatalf("Failed to update `system:discovery` ClusterRoleBinding: %v", err)
}
t.Logf("Modifying default `system:basic-user` ClusterRoleBinding")
basicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:basic-user", metav1.GetOptions{})
basicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(tCtx, "system:basic-user", metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get `system:basic-user` ClusterRoleBinding: %v", err)
}
basicUserRoleBinding.Annotations["rbac.authorization.kubernetes.io/autoupdate"] = "false"
basicUserRoleBinding.Annotations["rbac-discovery-upgrade-test"] = "pass"
if basicUserRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(ctx, basicUserRoleBinding, metav1.UpdateOptions{}); err != nil {
if basicUserRoleBinding, err = client.RbacV1().ClusterRoleBindings().Update(tCtx, basicUserRoleBinding, metav1.UpdateOptions{}); err != nil {
t.Fatalf("Failed to update `system:basic-user` ClusterRoleBinding: %v", err)
}
t.Logf("Deleting default `system:public-info-viewer` ClusterRoleBinding")
if err = client.RbacV1().ClusterRoleBindings().Delete(ctx, "system:public-info-viewer", metav1.DeleteOptions{}); err != nil {
if err = client.RbacV1().ClusterRoleBindings().Delete(tCtx, "system:public-info-viewer", metav1.DeleteOptions{}); err != nil {
t.Fatalf("Failed to delete `system:public-info-viewer` ClusterRoleBinding: %v", err)
}
@ -788,7 +778,7 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) {
// Check that upgraded API servers inherit `system:public-info-viewer` settings from
// `system:discovery`, and respect auto-reconciliation annotations.
client, _, tearDownFn = framework.StartTestServer(ctx, t, framework.TestServerSetup{
client, _, tearDownFn = framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Ensure we're using the same etcd across apiserver restarts.
opts.Etcd.StorageConfig = *etcdConfig
@ -796,21 +786,21 @@ func TestDiscoveryUpgradeBootstrapping(t *testing.T) {
},
})
newDiscRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:discovery", metav1.GetOptions{})
newDiscRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(tCtx, "system:discovery", metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get `system:discovery` ClusterRoleBinding: %v", err)
}
if !reflect.DeepEqual(newDiscRoleBinding, discRoleBinding) {
t.Errorf("`system:discovery` should have been unmodified. Wanted: %v, got %v", discRoleBinding, newDiscRoleBinding)
}
newBasicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:basic-user", metav1.GetOptions{})
newBasicUserRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(tCtx, "system:basic-user", metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get `system:basic-user` ClusterRoleBinding: %v", err)
}
if !reflect.DeepEqual(newBasicUserRoleBinding, basicUserRoleBinding) {
t.Errorf("`system:basic-user` should have been unmodified. Wanted: %v, got %v", basicUserRoleBinding, newBasicUserRoleBinding)
}
publicInfoViewerRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(ctx, "system:public-info-viewer", metav1.GetOptions{})
publicInfoViewerRoleBinding, err := client.RbacV1().ClusterRoleBindings().Get(tCtx, "system:public-info-viewer", metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get `system:public-info-viewer` ClusterRoleBinding: %v", err)
}

View File

@ -86,16 +86,13 @@ func TestGetsSelfAttributes(t *testing.T) {
},
}
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
var respMu sync.RWMutex
response := &user.DefaultInfo{
Name: "stub",
}
kubeClient, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
kubeClient, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.APIEnablement.RuntimeConfig.Set("authentication.k8s.io/v1alpha1=true")
opts.APIEnablement.RuntimeConfig.Set("authentication.k8s.io/v1beta1=true")
@ -122,7 +119,7 @@ func TestGetsSelfAttributes(t *testing.T) {
res, err := kubeClient.AuthenticationV1alpha1().
SelfSubjectReviews().
Create(ctx, &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{})
Create(tCtx, &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@ -149,7 +146,7 @@ func TestGetsSelfAttributes(t *testing.T) {
res2, err := kubeClient.AuthenticationV1beta1().
SelfSubjectReviews().
Create(ctx, &authenticationv1beta1.SelfSubjectReview{}, metav1.CreateOptions{})
Create(tCtx, &authenticationv1beta1.SelfSubjectReview{}, metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
@ -208,11 +205,8 @@ func TestGetsSelfAttributesError(t *testing.T) {
toggle := &atomic.Value{}
toggle.Store(true)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
kubeClient, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
kubeClient, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.APIEnablement.RuntimeConfig.Set("authentication.k8s.io/v1alpha1=true")
opts.APIEnablement.RuntimeConfig.Set("authentication.k8s.io/v1beta1=true")
@ -244,7 +238,7 @@ func TestGetsSelfAttributesError(t *testing.T) {
_, err := kubeClient.AuthenticationV1alpha1().
SelfSubjectReviews().
Create(ctx, &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{})
Create(tCtx, &authenticationv1alpha1.SelfSubjectReview{}, metav1.CreateOptions{})
if err == nil {
t.Fatalf("expected error: %v, got nil", err)
}
@ -260,7 +254,7 @@ func TestGetsSelfAttributesError(t *testing.T) {
_, err := kubeClient.AuthenticationV1beta1().
SelfSubjectReviews().
Create(ctx, &authenticationv1beta1.SelfSubjectReview{}, metav1.CreateOptions{})
Create(tCtx, &authenticationv1beta1.SelfSubjectReview{}, metav1.CreateOptions{})
if err == nil {
t.Fatalf("expected error: %v, got nil", err)
}

View File

@ -79,9 +79,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
var tokenGenerator serviceaccount.TokenGenerator
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
// Enable the node token improvements feature gates prior to starting the apiserver, as the node getter is
// conditionally passed to the service account token generator based on feature enablement.
@ -91,7 +89,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
// Start the server
var serverAddress string
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
kubeClient, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount"}
@ -193,7 +191,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
}
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
@ -202,18 +200,18 @@ func TestServiceAccountTokenCreate(t *testing.T) {
treqWithBadName := treq.DeepCopy()
treqWithBadName.Name = "invalid-name"
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treqWithBadName, metav1.CreateOptions{}); err == nil || !strings.Contains(err.Error(), "must match the service account name") {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treqWithBadName, metav1.CreateOptions{}); err == nil || !strings.Contains(err.Error(), "must match the service account name") {
t.Fatalf("expected err creating token with mismatched name but got: %#v", resp)
}
treqWithBadNamespace := treq.DeepCopy()
treqWithBadNamespace.Namespace = "invalid-namespace"
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treqWithBadNamespace, metav1.CreateOptions{}); err == nil || !strings.Contains(err.Error(), "does not match the namespace") {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treqWithBadNamespace, metav1.CreateOptions{}); err == nil || !strings.Contains(err.Error(), "does not match the namespace") {
t.Fatalf("expected err creating token with mismatched namespace but got: %#v, %v", resp, err)
}
warningHandler.clear()
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{})
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{})
if err != nil {
t.Fatalf("err: %v", err)
}
@ -259,7 +257,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
}
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
@ -267,7 +265,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
defer del()
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token bound to nonexistant pod but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
@ -277,21 +275,21 @@ func TestServiceAccountTokenCreate(t *testing.T) {
// right uid
treq.Spec.BoundObjectRef.UID = pod.UID
warningHandler.clear()
if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
t.Fatalf("err: %v", err)
}
warningHandler.assertEqual(t, nil)
// wrong uid
treq.Spec.BoundObjectRef.UID = wrongUID
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token bound to pod with wrong uid but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
// no uid
treq.Spec.BoundObjectRef.UID = noUID
warningHandler.clear()
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{})
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{})
if err != nil {
t.Fatalf("err: %v", err)
}
@ -333,7 +331,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
}
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
@ -341,7 +339,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
defer del()
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token bound to nonexistant pod but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
@ -357,21 +355,21 @@ func TestServiceAccountTokenCreate(t *testing.T) {
// right uid
treq.Spec.BoundObjectRef.UID = pod.UID
warningHandler.clear()
if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
t.Fatalf("err: %v", err)
}
warningHandler.assertEqual(t, nil)
// wrong uid
treq.Spec.BoundObjectRef.UID = wrongUID
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token bound to pod with wrong uid but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
// no uid
treq.Spec.BoundObjectRef.UID = noUID
warningHandler.clear()
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{})
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{})
if err != nil {
t.Fatalf("err: %v", err)
}
@ -437,7 +435,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
},
}
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token with featuregate disabled but got: %#v", resp)
} else if err.Error() != "cannot bind token to a Node object as the \"ServiceAccountTokenNodeBinding\" feature-gate is disabled" {
t.Fatalf("expected error due to feature gate being disabled, but got: %s", err.Error())
@ -459,7 +457,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
}
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
@ -467,7 +465,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
defer del()
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token bound to nonexistant node but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
@ -477,21 +475,21 @@ func TestServiceAccountTokenCreate(t *testing.T) {
// right uid
treq.Spec.BoundObjectRef.UID = node.UID
warningHandler.clear()
if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
t.Fatalf("err: %v", err)
}
warningHandler.assertEqual(t, nil)
// wrong uid
treq.Spec.BoundObjectRef.UID = wrongUID
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token bound to node with wrong uid but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
// no uid
treq.Spec.BoundObjectRef.UID = noUID
warningHandler.clear()
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{})
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{})
if err != nil {
t.Fatalf("err: %v", err)
}
@ -523,7 +521,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
}
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token for nonexistant svcacct but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
@ -531,7 +529,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
defer del()
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token bound to nonexistant secret but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
@ -541,21 +539,21 @@ func TestServiceAccountTokenCreate(t *testing.T) {
// right uid
treq.Spec.BoundObjectRef.UID = secret.UID
warningHandler.clear()
if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
if _, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
t.Fatalf("err: %v", err)
}
warningHandler.assertEqual(t, nil)
// wrong uid
treq.Spec.BoundObjectRef.UID = wrongUID
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err creating token bound to secret with wrong uid but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
// no uid
treq.Spec.BoundObjectRef.UID = noUID
warningHandler.clear()
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{})
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{})
if err != nil {
t.Fatalf("err: %v", err)
}
@ -591,7 +589,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
defer del()
warningHandler.clear()
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
if resp, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err == nil {
t.Fatalf("expected err but got: %#v", resp)
}
warningHandler.assertEqual(t, nil)
@ -608,7 +606,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
defer del()
warningHandler.clear()
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{})
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{})
if err != nil {
t.Fatalf("err: %v", err)
}
@ -662,7 +660,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
treq.Spec.BoundObjectRef.UID = pod.UID
warningHandler.clear()
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{})
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{})
if err != nil {
t.Fatalf("err: %v", err)
}
@ -718,7 +716,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
treq.Spec.BoundObjectRef.UID = pod.UID
warningHandler.clear()
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{})
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{})
if err != nil {
t.Fatalf("err: %v", err)
}
@ -760,7 +758,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
defer del()
warningHandler.clear()
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{})
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{})
if err != nil {
t.Fatalf("err: %v", err)
}
@ -778,7 +776,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
defer del()
warningHandler.clear()
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{})
treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{})
if err != nil {
t.Fatalf("err: %v", err)
}
@ -808,7 +806,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
treq.Spec.BoundObjectRef.UID = originalPod.UID
warningHandler.clear()
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
t.Fatalf("err: %v", err)
}
warningHandler.assertEqual(t, nil)
@ -851,7 +849,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
treq.Spec.BoundObjectRef.UID = originalSecret.UID
warningHandler.clear()
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
t.Fatalf("err: %v", err)
}
warningHandler.assertEqual(t, nil)
@ -896,7 +894,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
treq.Spec.BoundObjectRef.UID = originalSecret.UID
warningHandler.clear()
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
t.Fatalf("err: %v", err)
}
warningHandler.assertEqual(t, nil)
@ -942,7 +940,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
treq.Spec.BoundObjectRef.UID = originalSecret.UID
warningHandler.clear()
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
if treq, err = cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name, treq, metav1.CreateOptions{}); err != nil {
t.Fatalf("err: %v", err)
}
warningHandler.assertEqual(t, []string{fmt.Sprintf("requested expiration of %d seconds shortened to %d seconds", tooLongExpirationTime, maxExpirationSeconds)})
@ -971,7 +969,7 @@ func TestServiceAccountTokenCreate(t *testing.T) {
t.Log("get token")
warningHandler.clear()
tokenRequest, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(ctx, sa.Name,
tokenRequest, err := cs.CoreV1().ServiceAccounts(sa.Namespace).CreateToken(tCtx, sa.Name,
&authenticationv1.TokenRequest{
Spec: authenticationv1.TokenRequestSpec{
Audiences: []string{"api"},

View File

@ -190,11 +190,8 @@ func TestStatus(t *testing.T) {
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
_, kubeConfig, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(options *options.ServerRunOptions) {
if tc.modifyOptions != nil {
tc.modifyOptions(options)

View File

@ -62,9 +62,7 @@ func setup(t *testing.T) (context.Context, kubeapiservertesting.TearDownFunc, *d
}
func setupWithServerSetup(t *testing.T, serverSetup framework.TestServerSetup) (context.Context, kubeapiservertesting.TearDownFunc, *daemon.DaemonSetsController, informers.SharedInformerFactory, clientset.Interface) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
tCtx := ktesting.Init(t)
modifyServerRunOptions := serverSetup.ModifyServerRunOptions
serverSetup.ModifyServerRunOptions = func(opts *options.ServerRunOptions) {
if modifyServerRunOptions != nil {
@ -79,12 +77,12 @@ func setupWithServerSetup(t *testing.T, serverSetup framework.TestServerSetup) (
)
}
clientSet, config, closeFn := framework.StartTestServer(ctx, t, serverSetup)
clientSet, config, closeFn := framework.StartTestServer(tCtx, t, serverSetup)
resyncPeriod := 12 * time.Hour
informers := informers.NewSharedInformerFactory(clientset.NewForConfigOrDie(restclient.AddUserAgent(config, "daemonset-informers")), resyncPeriod)
dc, err := daemon.NewDaemonSetsController(
ctx,
tCtx,
informers.Apps().V1().DaemonSets(),
informers.Apps().V1().ControllerRevisions(),
informers.Core().V1().Pods(),
@ -101,7 +99,7 @@ func setupWithServerSetup(t *testing.T, serverSetup framework.TestServerSetup) (
})
sched, err := scheduler.New(
ctx,
tCtx,
clientSet,
informers,
nil,
@ -111,16 +109,16 @@ func setupWithServerSetup(t *testing.T, serverSetup framework.TestServerSetup) (
t.Fatalf("Couldn't create scheduler: %v", err)
}
eventBroadcaster.StartRecordingToSink(ctx.Done())
go sched.Run(ctx)
eventBroadcaster.StartRecordingToSink(tCtx.Done())
go sched.Run(tCtx)
tearDownFn := func() {
cancel()
tCtx.Cancel("tearing down apiserver")
closeFn()
eventBroadcaster.Shutdown()
}
return ctx, tearDownFn, dc, informers, clientSet
return tCtx, tearDownFn, dc, informers, clientSet
}
func testLabels() map[string]string {

View File

@ -17,7 +17,6 @@ limitations under the License.
package defaulttolerationseconds
import (
"context"
"testing"
v1 "k8s.io/api/core/v1"
@ -30,11 +29,8 @@ import (
)
func TestAdmission(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerConfig: func(cfg *controlplane.Config) {
cfg.GenericConfig.EnableProfiling = true
cfg.GenericConfig.AdmissionControl = defaulttolerationseconds.NewDefaultTolerationSeconds()
@ -60,7 +56,7 @@ func TestAdmission(t *testing.T) {
},
}
updatedPod, err := client.CoreV1().Pods(pod.Namespace).Create(ctx, &pod, metav1.CreateOptions{})
updatedPod, err := client.CoreV1().Pods(pod.Namespace).Create(tCtx, &pod, metav1.CreateOptions{})
if err != nil {
t.Fatalf("error creating pod: %v", err)
}

View File

@ -114,18 +114,16 @@ func setup(ctx context.Context, t *testing.T) (*kubeapiservertesting.TestServer,
}
func TestPDBWithScaleSubresource(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
s, pdbc, informers, clientSet, apiExtensionClient, dynamicClient := setup(ctx, t)
tCtx := ktesting.Init(t)
s, pdbc, informers, clientSet, apiExtensionClient, dynamicClient := setup(tCtx, t)
defer s.TearDownFn()
defer cancel()
defer tCtx.Cancel("test has completed")
nsName := "pdb-scale-subresource"
createNs(ctx, t, nsName, clientSet)
createNs(tCtx, t, nsName, clientSet)
informers.Start(ctx.Done())
go pdbc.Run(ctx)
informers.Start(tCtx.Done())
go pdbc.Run(tCtx)
crdDefinition := newCustomResourceDefinition()
etcd.CreateTestCRDs(t, apiExtensionClient, true, crdDefinition)
@ -148,7 +146,7 @@ func TestPDBWithScaleSubresource(t *testing.T) {
},
},
}
createdResource, err := resourceClient.Create(ctx, resource, metav1.CreateOptions{})
createdResource, err := resourceClient.Create(tCtx, resource, metav1.CreateOptions{})
if err != nil {
t.Error(err)
}
@ -164,7 +162,7 @@ func TestPDBWithScaleSubresource(t *testing.T) {
},
}
for i := 0; i < replicas; i++ {
createPod(ctx, t, fmt.Sprintf("pod-%d", i), nsName, map[string]string{"app": "test-crd"}, clientSet, ownerRefs)
createPod(tCtx, t, fmt.Sprintf("pod-%d", i), nsName, map[string]string{"app": "test-crd"}, clientSet, ownerRefs)
}
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4, v1.PodRunning)
@ -183,13 +181,13 @@ func TestPDBWithScaleSubresource(t *testing.T) {
},
},
}
if _, err := clientSet.PolicyV1().PodDisruptionBudgets(nsName).Create(ctx, pdb, metav1.CreateOptions{}); err != nil {
if _, err := clientSet.PolicyV1().PodDisruptionBudgets(nsName).Create(tCtx, pdb, metav1.CreateOptions{}); err != nil {
t.Errorf("Error creating PodDisruptionBudget: %v", err)
}
waitPDBStable(ctx, t, clientSet, 4, nsName, pdb.Name)
waitPDBStable(tCtx, t, clientSet, 4, nsName, pdb.Name)
newPdb, err := clientSet.PolicyV1().PodDisruptionBudgets(nsName).Get(ctx, pdb.Name, metav1.GetOptions{})
newPdb, err := clientSet.PolicyV1().PodDisruptionBudgets(nsName).Get(tCtx, pdb.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Error getting PodDisruptionBudget: %v", err)
}
@ -248,37 +246,35 @@ func TestEmptySelector(t *testing.T) {
for i, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
s, pdbc, informers, clientSet, _, _ := setup(ctx, t)
tCtx := ktesting.Init(t)
s, pdbc, informers, clientSet, _, _ := setup(tCtx, t)
defer s.TearDownFn()
defer cancel()
defer tCtx.Cancel("test has completed")
nsName := fmt.Sprintf("pdb-empty-selector-%d", i)
createNs(ctx, t, nsName, clientSet)
createNs(tCtx, t, nsName, clientSet)
informers.Start(ctx.Done())
go pdbc.Run(ctx)
informers.Start(tCtx.Done())
go pdbc.Run(tCtx)
replicas := 4
minAvailable := intstr.FromInt32(2)
for j := 0; j < replicas; j++ {
createPod(ctx, t, fmt.Sprintf("pod-%d", j), nsName, map[string]string{"app": "test-crd"},
createPod(tCtx, t, fmt.Sprintf("pod-%d", j), nsName, map[string]string{"app": "test-crd"},
clientSet, []metav1.OwnerReference{})
}
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 4, v1.PodRunning)
pdbName := "test-pdb"
if err := tc.createPDBFunc(ctx, clientSet, s.EtcdClient, s.EtcdStoragePrefix, pdbName, nsName, minAvailable); err != nil {
if err := tc.createPDBFunc(tCtx, clientSet, s.EtcdClient, s.EtcdStoragePrefix, pdbName, nsName, minAvailable); err != nil {
t.Errorf("Error creating PodDisruptionBudget: %v", err)
}
waitPDBStable(ctx, t, clientSet, tc.expectedCurrentHealthy, nsName, pdbName)
waitPDBStable(tCtx, t, clientSet, tc.expectedCurrentHealthy, nsName, pdbName)
newPdb, err := clientSet.PolicyV1().PodDisruptionBudgets(nsName).Get(ctx, pdbName, metav1.GetOptions{})
newPdb, err := clientSet.PolicyV1().PodDisruptionBudgets(nsName).Get(tCtx, pdbName, metav1.GetOptions{})
if err != nil {
t.Errorf("Error getting PodDisruptionBudget: %v", err)
}
@ -364,36 +360,34 @@ func TestSelectorsForPodsWithoutLabels(t *testing.T) {
for i, tc := range testcases {
t.Run(tc.name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
s, pdbc, informers, clientSet, _, _ := setup(ctx, t)
tCtx := ktesting.Init(t)
s, pdbc, informers, clientSet, _, _ := setup(tCtx, t)
defer s.TearDownFn()
defer cancel()
defer tCtx.Cancel("test has completed")
nsName := fmt.Sprintf("pdb-selectors-%d", i)
createNs(ctx, t, nsName, clientSet)
createNs(tCtx, t, nsName, clientSet)
informers.Start(ctx.Done())
go pdbc.Run(ctx)
informers.Start(tCtx.Done())
go pdbc.Run(tCtx)
minAvailable := intstr.FromInt32(1)
// Create the PDB first and wait for it to settle.
pdbName := "test-pdb"
if err := tc.createPDBFunc(ctx, clientSet, s.EtcdClient, s.EtcdStoragePrefix, pdbName, nsName, minAvailable); err != nil {
if err := tc.createPDBFunc(tCtx, clientSet, s.EtcdClient, s.EtcdStoragePrefix, pdbName, nsName, minAvailable); err != nil {
t.Errorf("Error creating PodDisruptionBudget: %v", err)
}
waitPDBStable(ctx, t, clientSet, 0, nsName, pdbName)
waitPDBStable(tCtx, t, clientSet, 0, nsName, pdbName)
// Create a pod and wait for it be reach the running phase.
createPod(ctx, t, "pod", nsName, map[string]string{}, clientSet, []metav1.OwnerReference{})
createPod(tCtx, t, "pod", nsName, map[string]string{}, clientSet, []metav1.OwnerReference{})
waitToObservePods(t, informers.Core().V1().Pods().Informer(), 1, v1.PodRunning)
// Then verify that the added pod are picked up by the disruption controller.
waitPDBStable(ctx, t, clientSet, 1, nsName, pdbName)
waitPDBStable(tCtx, t, clientSet, 1, nsName, pdbName)
newPdb, err := clientSet.PolicyV1().PodDisruptionBudgets(nsName).Get(ctx, pdbName, metav1.GetOptions{})
newPdb, err := clientSet.PolicyV1().PodDisruptionBudgets(nsName).Get(tCtx, pdbName, metav1.GetOptions{})
if err != nil {
t.Errorf("Error getting PodDisruptionBudget: %v", err)
}
@ -642,19 +636,17 @@ func TestPatchCompatibility(t *testing.T) {
}
func TestStalePodDisruption(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
s, pdbc, informers, clientSet, _, _ := setup(ctx, t)
tCtx := ktesting.Init(t)
s, pdbc, informers, clientSet, _, _ := setup(tCtx, t)
defer s.TearDownFn()
defer cancel()
defer tCtx.Cancel("test has completed")
nsName := "pdb-stale-pod-disruption"
createNs(ctx, t, nsName, clientSet)
createNs(tCtx, t, nsName, clientSet)
informers.Start(ctx.Done())
informers.WaitForCacheSync(ctx.Done())
go pdbc.Run(ctx)
informers.Start(tCtx.Done())
informers.WaitForCacheSync(tCtx.Done())
go pdbc.Run(tCtx)
cases := map[string]struct {
deletePod bool
@ -723,20 +715,20 @@ func TestStalePodDisruption(t *testing.T) {
Reason: tc.reason,
LastTransitionTime: metav1.Now(),
})
pod, err = clientSet.CoreV1().Pods(nsName).UpdateStatus(ctx, pod, metav1.UpdateOptions{})
pod, err = clientSet.CoreV1().Pods(nsName).UpdateStatus(tCtx, pod, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("Failed updating pod: %v", err)
}
if tc.deletePod {
if err := clientSet.CoreV1().Pods(nsName).Delete(ctx, name, metav1.DeleteOptions{}); err != nil {
if err := clientSet.CoreV1().Pods(nsName).Delete(tCtx, name, metav1.DeleteOptions{}); err != nil {
t.Fatalf("Failed to delete pod: %v", err)
}
}
time.Sleep(stalePodDisruptionTimeout)
diff := ""
if err := wait.PollImmediate(100*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) {
pod, err = clientSet.CoreV1().Pods(nsName).Get(ctx, name, metav1.GetOptions{})
pod, err = clientSet.CoreV1().Pods(nsName).Get(tCtx, name, metav1.GetOptions{})
if err != nil {
return false, err
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package dualstack
import (
"context"
"encoding/json"
"fmt"
"reflect"
@ -46,11 +45,8 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) {
// Create an IPv4 single stack control-plane
serviceCIDR := "10.0.0.0/16"
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = serviceCIDR
},
@ -59,7 +55,7 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -252,7 +248,7 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) {
}
// create the service
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc, metav1.CreateOptions{})
if (err != nil) != tc.expectError {
t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err)
}
@ -261,7 +257,7 @@ func TestCreateServiceSingleStackIPv4(t *testing.T) {
return
}
// validate the service was created correctly if it was not expected to fail
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -277,11 +273,8 @@ func TestCreateServiceDualStackIPv6(t *testing.T) {
// Create an IPv6 only dual stack control-plane
serviceCIDR := "2001:db8:1::/112"
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = serviceCIDR
opts.GenericServerRunOptions.AdvertiseAddress = netutils.ParseIPSloppy("2001:db8::10")
@ -291,7 +284,7 @@ func TestCreateServiceDualStackIPv6(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -471,7 +464,7 @@ func TestCreateServiceDualStackIPv6(t *testing.T) {
}
// create the service
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc, metav1.CreateOptions{})
if (err != nil) != tc.expectError {
t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err)
}
@ -480,7 +473,7 @@ func TestCreateServiceDualStackIPv6(t *testing.T) {
return
}
// validate the service was created correctly if it was not expected to fail
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -497,11 +490,8 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) {
serviceCIDR := "10.0.0.0/16"
secondaryServiceCIDR := "2001:db8:1::/112"
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
},
@ -510,7 +500,7 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -737,7 +727,7 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) {
}
// create a service
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc, metav1.CreateOptions{})
if (err != nil) != tc.expectError {
t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err)
}
@ -746,7 +736,7 @@ func TestCreateServiceDualStackIPv4IPv6(t *testing.T) {
return
}
// validate the service was created correctly if it was not expected to fail
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -764,11 +754,8 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) {
serviceCIDR := "2001:db8:1::/112"
secondaryServiceCIDR := "10.0.0.0/16"
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
opts.GenericServerRunOptions.AdvertiseAddress = netutils.ParseIPSloppy("2001:db8::10")
@ -778,7 +765,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -789,7 +776,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) {
// verify client is working
if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
_, err := client.CoreV1().Endpoints("default").Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Endpoints("default").Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil {
t.Logf("error fetching endpoints: %v", err)
return false, nil
@ -960,7 +947,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) {
}
// create a service
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc, metav1.CreateOptions{})
if (err != nil) != tc.expectError {
t.Errorf("Test failed expected result: %v received %v ", tc.expectError, err)
}
@ -969,7 +956,7 @@ func TestCreateServiceDualStackIPv6IPv4(t *testing.T) {
return
}
// validate the service was created correctly if it was not expected to fail
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -987,11 +974,8 @@ func TestUpgradeDowngrade(t *testing.T) {
serviceCIDR := "10.0.0.0/16"
secondaryServiceCIDR := "2001:db8:1::/112"
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
},
@ -1000,7 +984,7 @@ func TestUpgradeDowngrade(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -1027,12 +1011,12 @@ func TestUpgradeDowngrade(t *testing.T) {
}
// create a service
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc, metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error while creating service:%v", err)
}
// validate the service was created correctly if it was not expected to fail
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -1044,7 +1028,7 @@ func TestUpgradeDowngrade(t *testing.T) {
// upgrade it
requireDualStack := v1.IPFamilyPolicyRequireDualStack
svc.Spec.IPFamilyPolicy = &requireDualStack
upgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, svc, metav1.UpdateOptions{})
upgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(tCtx, svc, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unexpected error upgrading service to dual stack. %v", err)
}
@ -1057,7 +1041,7 @@ func TestUpgradeDowngrade(t *testing.T) {
upgraded.Spec.IPFamilyPolicy = &singleStack
upgraded.Spec.ClusterIPs = upgraded.Spec.ClusterIPs[0:1]
upgraded.Spec.IPFamilies = upgraded.Spec.IPFamilies[0:1]
downgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, upgraded, metav1.UpdateOptions{})
downgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(tCtx, upgraded, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unexpected error downgrading service to single stack. %v", err)
}
@ -1067,7 +1051,7 @@ func TestUpgradeDowngrade(t *testing.T) {
// run test again this time without removing secondary IPFamily or ClusterIP
downgraded.Spec.IPFamilyPolicy = &requireDualStack
upgradedAgain, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, downgraded, metav1.UpdateOptions{})
upgradedAgain, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(tCtx, downgraded, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unexpected error upgrading service to dual stack. %v", err)
}
@ -1078,7 +1062,7 @@ func TestUpgradeDowngrade(t *testing.T) {
upgradedAgain.Spec.IPFamilyPolicy = &singleStack
// api-server automatically removes the secondary ClusterIP and IPFamily
// when a servie is downgraded.
downgradedAgain, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, upgradedAgain, metav1.UpdateOptions{})
downgradedAgain, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(tCtx, upgradedAgain, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unexpected error downgrading service to single stack. %v", err)
}
@ -1094,11 +1078,8 @@ func TestConvertToFromExternalName(t *testing.T) {
serviceCIDR := "10.0.0.0/16"
secondaryServiceCIDR := "2001:db8:1::/112"
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
},
@ -1107,7 +1088,7 @@ func TestConvertToFromExternalName(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -1133,12 +1114,12 @@ func TestConvertToFromExternalName(t *testing.T) {
}
// create a service
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc, metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error while creating service:%v", err)
}
// validate the service was created correctly if it was not expected to fail
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -1152,7 +1133,7 @@ func TestConvertToFromExternalName(t *testing.T) {
svc.Spec.ClusterIP = "" // not clearing ClusterIPs
svc.Spec.ExternalName = "something.somewhere"
externalNameSvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, svc, metav1.UpdateOptions{})
externalNameSvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(tCtx, svc, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unexpected error converting service to external name. %v", err)
}
@ -1164,7 +1145,7 @@ func TestConvertToFromExternalName(t *testing.T) {
// convert to a ClusterIP service
externalNameSvc.Spec.Type = v1.ServiceTypeClusterIP
externalNameSvc.Spec.ExternalName = ""
clusterIPSvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, externalNameSvc, metav1.UpdateOptions{})
clusterIPSvc, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(tCtx, externalNameSvc, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unexpected error converting service to ClusterIP. %v", err)
}
@ -1179,11 +1160,8 @@ func TestPreferDualStack(t *testing.T) {
serviceCIDR := "10.0.0.0/16"
secondaryServiceCIDR := "2001:db8:1::/112"
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
},
@ -1192,7 +1170,7 @@ func TestPreferDualStack(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -1222,12 +1200,12 @@ func TestPreferDualStack(t *testing.T) {
}
// create a service
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc, metav1.CreateOptions{})
if err != nil {
t.Fatalf("unexpected error while creating service:%v", err)
}
// validate the service was created correctly if it was not expected to fail
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -1238,7 +1216,7 @@ func TestPreferDualStack(t *testing.T) {
// update it
svc.Spec.Selector = map[string]string{"foo": "bar"}
upgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, svc, metav1.UpdateOptions{})
upgraded, err := client.CoreV1().Services(metav1.NamespaceDefault).Update(tCtx, svc, metav1.UpdateOptions{})
if err != nil {
t.Fatalf("unexpected error upgrading service to dual stack. %v", err)
}
@ -1256,11 +1234,8 @@ func TestServiceUpdate(t *testing.T) {
// Create an IPv4 single stack control-plane
serviceCIDR := "10.0.0.0/16"
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = serviceCIDR
},
@ -1269,7 +1244,7 @@ func TestServiceUpdate(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -1295,26 +1270,26 @@ func TestServiceUpdate(t *testing.T) {
}
// create the service
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc, metav1.CreateOptions{})
// if no error was expected validate the service otherwise return
if err != nil {
t.Errorf("unexpected error creating service:%v", err)
return
}
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Unexpected error to get the service %s %v", svc.Name, err)
}
// update using put
svc.Labels = map[string]string{"x": "y"}
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Update(ctx, svc, metav1.UpdateOptions{})
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Update(tCtx, svc, metav1.UpdateOptions{})
if err != nil {
t.Errorf("Unexpected error updating the service %s %v", svc.Name, err)
}
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -1329,12 +1304,12 @@ func TestServiceUpdate(t *testing.T) {
t.Fatalf("failed to json.Marshal labels: %v", err)
}
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(ctx, svc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(tCtx, svc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
t.Fatalf("unexpected error patching service using strategic merge patch. %v", err)
}
current, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
current, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -1356,13 +1331,13 @@ func TestServiceUpdate(t *testing.T) {
t.Fatalf("unexpected error creating json patch. %v", err)
}
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(ctx, svc.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{})
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(tCtx, svc.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
t.Fatalf("unexpected error patching service using merge patch. %v", err)
}
// validate the service was created correctly if it was not expected to fail
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -1418,14 +1393,12 @@ func validateServiceAndClusterIPFamily(svc *v1.Service, expectedIPFamilies []v1.
func TestUpgradeServicePreferToDualStack(t *testing.T) {
sharedEtcd := framework.SharedEtcd()
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
// Create an IPv4 only dual stack control-plane
serviceCIDR := "192.168.0.0/24"
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.Etcd.StorageConfig = *sharedEtcd
opts.ServiceClusterIPRanges = serviceCIDR
@ -1434,7 +1407,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -1465,12 +1438,12 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
}
// create the service
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// validate the service was created correctly if it was not expected to fail
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -1483,7 +1456,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
secondaryServiceCIDR := "2001:db8:1::/112"
client, _, tearDownFn = framework.StartTestServer(ctx, t, framework.TestServerSetup{
client, _, tearDownFn = framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.Etcd.StorageConfig = *sharedEtcd
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
@ -1493,7 +1466,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -1502,7 +1475,7 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
t.Fatalf("creating kubernetes service timed out")
}
// validate the service was created correctly if it was not expected to fail
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -1515,15 +1488,13 @@ func TestUpgradeServicePreferToDualStack(t *testing.T) {
func TestDowngradeServicePreferToDualStack(t *testing.T) {
sharedEtcd := framework.SharedEtcd()
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
tCtx := ktesting.Init(t)
// Create a dual stack control-plane
serviceCIDR := "192.168.0.0/24"
secondaryServiceCIDR := "2001:db8:1::/112"
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.Etcd.StorageConfig = *sharedEtcd
opts.ServiceClusterIPRanges = fmt.Sprintf("%s,%s", serviceCIDR, secondaryServiceCIDR)
@ -1532,7 +1503,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -1561,12 +1532,12 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) {
},
}
// create the service
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc, metav1.CreateOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Unexpected error: %v", err)
}
// validate the service was created correctly if it was not expected to fail
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -1577,7 +1548,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) {
tearDownFn()
// reset secondary
client, _, tearDownFn = framework.StartTestServer(ctx, t, framework.TestServerSetup{
client, _, tearDownFn = framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.Etcd.StorageConfig = *sharedEtcd
opts.ServiceClusterIPRanges = serviceCIDR
@ -1587,7 +1558,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err = wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -1596,7 +1567,7 @@ func TestDowngradeServicePreferToDualStack(t *testing.T) {
t.Fatalf("creating kubernetes service timed out")
}
// validate the service is still there.
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svc.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svc.Name, metav1.GetOptions{})
if err != nil {
t.Fatalf("Unexpected error to get the service %s %v", svc.Name, err)
}
@ -1616,11 +1587,8 @@ type specMergePatch struct {
// tests success when converting ClusterIP:Headless service to ExternalName
func Test_ServiceChangeTypeHeadlessToExternalNameWithPatch(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{})
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{})
defer tearDownFn()
ns := framework.CreateNamespaceOrDie(client, "test-service-allocate-node-ports", t)
@ -1638,7 +1606,7 @@ func Test_ServiceChangeTypeHeadlessToExternalNameWithPatch(t *testing.T) {
}
var err error
service, err = client.CoreV1().Services(ns.Name).Create(ctx, service, metav1.CreateOptions{})
service, err = client.CoreV1().Services(ns.Name).Create(tCtx, service, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Error creating test service: %v", err)
}
@ -1654,7 +1622,7 @@ func Test_ServiceChangeTypeHeadlessToExternalNameWithPatch(t *testing.T) {
t.Fatalf("failed to json.Marshal ports: %v", err)
}
_, err = client.CoreV1().Services(ns.Name).Patch(ctx, service.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
_, err = client.CoreV1().Services(ns.Name).Patch(tCtx, service.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
t.Fatalf("unexpected error patching service using strategic merge patch. %v", err)
}

View File

@ -69,18 +69,16 @@ const (
func TestConcurrentEvictionRequests(t *testing.T) {
podNameFormat := "test-pod-%d"
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
closeFn, rm, informers, _, clientSet := rmSetup(ctx, t)
tCtx := ktesting.Init(t)
closeFn, rm, informers, _, clientSet := rmSetup(tCtx, t)
defer closeFn()
ns := framework.CreateNamespaceOrDie(clientSet, "concurrent-eviction-requests", t)
defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
defer cancel()
defer tCtx.Cancel("test has completed")
informers.Start(ctx.Done())
go rm.Run(ctx)
informers.Start(tCtx.Done())
go rm.Run(tCtx)
var gracePeriodSeconds int64 = 30
deleteOption := metav1.DeleteOptions{
@ -184,18 +182,16 @@ func TestConcurrentEvictionRequests(t *testing.T) {
// TestTerminalPodEviction ensures that PDB is not checked for terminal pods.
func TestTerminalPodEviction(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
closeFn, rm, informers, _, clientSet := rmSetup(ctx, t)
tCtx := ktesting.Init(t)
closeFn, rm, informers, _, clientSet := rmSetup(tCtx, t)
defer closeFn()
ns := framework.CreateNamespaceOrDie(clientSet, "terminalpod-eviction", t)
defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
defer cancel()
defer tCtx.Cancel("test has completed")
informers.Start(ctx.Done())
go rm.Run(ctx)
informers.Start(tCtx.Done())
go rm.Run(tCtx)
var gracePeriodSeconds int64 = 30
deleteOption := metav1.DeleteOptions{
@ -260,15 +256,13 @@ func TestTerminalPodEviction(t *testing.T) {
// TestEvictionVersions ensures the eviction endpoint accepts and returns the correct API versions
func TestEvictionVersions(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
closeFn, rm, informers, config, clientSet := rmSetup(ctx, t)
tCtx := ktesting.Init(t)
closeFn, rm, informers, config, clientSet := rmSetup(tCtx, t)
defer closeFn()
defer cancel()
defer tCtx.Cancel("test has completed")
informers.Start(ctx.Done())
go rm.Run(ctx)
informers.Start(tCtx.Done())
go rm.Run(tCtx)
ns := "default"
subresource := "eviction"
@ -386,29 +380,27 @@ func TestEvictionWithFinalizers(t *testing.T) {
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
closeFn, rm, informers, _, clientSet := rmSetup(ctx, t)
tCtx := ktesting.Init(t)
closeFn, rm, informers, _, clientSet := rmSetup(tCtx, t)
defer closeFn()
ns := framework.CreateNamespaceOrDie(clientSet, "eviction-with-finalizers", t)
defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PodDisruptionConditions, tc.enablePodDisruptionConditions)()
defer cancel()
defer tCtx.Cancel("test has completed")
informers.Start(ctx.Done())
go rm.Run(ctx)
informers.Start(tCtx.Done())
go rm.Run(tCtx)
pod := newPod("pod")
pod.ObjectMeta.Finalizers = []string{"test.k8s.io/finalizer"}
if _, err := clientSet.CoreV1().Pods(ns.Name).Create(ctx, pod, metav1.CreateOptions{}); err != nil {
if _, err := clientSet.CoreV1().Pods(ns.Name).Create(tCtx, pod, metav1.CreateOptions{}); err != nil {
t.Errorf("Failed to create pod: %v", err)
}
pod.Status.Phase = tc.phase
addPodConditionReady(pod)
if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(ctx, pod, metav1.UpdateOptions{}); err != nil {
if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(tCtx, pod, metav1.UpdateOptions{}); err != nil {
t.Fatal(err)
}
@ -420,12 +412,12 @@ func TestEvictionWithFinalizers(t *testing.T) {
eviction := newV1Eviction(ns.Name, pod.Name, deleteOption)
err := clientSet.PolicyV1().Evictions(ns.Name).Evict(ctx, eviction)
err := clientSet.PolicyV1().Evictions(ns.Name).Evict(tCtx, eviction)
if err != nil {
t.Fatalf("Eviction of pod failed %v", err)
}
updatedPod, e := clientSet.CoreV1().Pods(ns.Name).Get(ctx, pod.Name, metav1.GetOptions{})
updatedPod, e := clientSet.CoreV1().Pods(ns.Name).Get(tCtx, pod.Name, metav1.GetOptions{})
if e != nil {
t.Fatalf("Failed to get the pod %q with error: %q", klog.KObj(pod), e)
}
@ -474,19 +466,17 @@ func TestEvictionWithUnhealthyPodEvictionPolicy(t *testing.T) {
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
tCtx := ktesting.Init(t)
defer featuregatetesting.SetFeatureGateDuringTest(t, feature.DefaultFeatureGate, features.PDBUnhealthyPodEvictionPolicy, tc.enableUnhealthyPodEvictionPolicy)()
closeFn, rm, informers, _, clientSet := rmSetup(ctx, t)
closeFn, rm, informers, _, clientSet := rmSetup(tCtx, t)
defer closeFn()
ns := framework.CreateNamespaceOrDie(clientSet, "eviction-with-pdb-pod-healthy-policy", t)
defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
defer cancel()
defer tCtx.Cancel("test has completed")
informers.Start(ctx.Done())
go rm.Run(ctx)
informers.Start(tCtx.Done())
go rm.Run(tCtx)
pod := newPod("pod")
if _, err := clientSet.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{}); err != nil {
@ -498,7 +488,7 @@ func TestEvictionWithUnhealthyPodEvictionPolicy(t *testing.T) {
addPodConditionReady(pod)
}
if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(ctx, pod, metav1.UpdateOptions{}); err != nil {
if _, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(tCtx, pod, metav1.UpdateOptions{}); err != nil {
t.Fatal(err)
}
@ -524,7 +514,7 @@ func TestEvictionWithUnhealthyPodEvictionPolicy(t *testing.T) {
deleteOption := metav1.DeleteOptions{}
eviction := newV1Eviction(ns.Name, pod.Name, deleteOption)
err := policyV1NoRetriesClient.Evictions(ns.Name).Evict(ctx, eviction)
err := policyV1NoRetriesClient.Evictions(ns.Name).Evict(tCtx, eviction)
if err != nil {
t.Fatalf("Eviction of pod failed %v", err)
}
@ -570,18 +560,16 @@ func TestEvictionWithPrecondition(t *testing.T) {
}
for name, tc := range cases {
t.Run(name, func(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
closeFn, rm, informers, _, clientSet := rmSetup(ctx, t)
tCtx := ktesting.Init(t)
closeFn, rm, informers, _, clientSet := rmSetup(tCtx, t)
defer closeFn()
ns := framework.CreateNamespaceOrDie(clientSet, "eviction-with-preconditions", t)
defer framework.DeleteNamespaceOrDie(clientSet, ns, t)
defer cancel()
informers.Start(ctx.Done())
go rm.Run(ctx)
defer tCtx.Cancel("test has completed")
informers.Start(tCtx.Done())
go rm.Run(tCtx)
pod := newPod("pod")
pod, err := clientSet.CoreV1().Pods(ns.Name).Create(context.TODO(), pod, metav1.CreateOptions{})
@ -593,7 +581,7 @@ func TestEvictionWithPrecondition(t *testing.T) {
addPodConditionReady(pod)
// generate a new resource version
updatedPod, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(ctx, pod, metav1.UpdateOptions{})
updatedPod, err := clientSet.CoreV1().Pods(ns.Name).UpdateStatus(tCtx, pod, metav1.UpdateOptions{})
if err != nil {
t.Fatal(err)
}
@ -629,7 +617,7 @@ func TestEvictionWithPrecondition(t *testing.T) {
policyV1NoRetriesClient := policyv1client.New(policyV1NoRetriesRESTClient)
eviction := newV1Eviction(ns.Name, updatedPod.Name, deleteOption)
err = policyV1NoRetriesClient.Evictions(ns.Name).Evict(ctx, eviction)
err = policyV1NoRetriesClient.Evictions(ns.Name).Evict(tCtx, eviction)
if err != nil && !tc.shouldErr {
t.Fatalf("Eviction of pod failed %q", err)
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package apiserver
import (
"context"
"sync/atomic"
"testing"
"time"
@ -42,11 +41,8 @@ func TestWebhookLoopback(t *testing.T) {
called := int32(0)
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
},
ModifyServerConfig: func(config *controlplane.Config) {
@ -72,7 +68,7 @@ func TestWebhookLoopback(t *testing.T) {
fail := admissionregistrationv1.Fail
noSideEffects := admissionregistrationv1.SideEffectClassNone
_, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(ctx, &admissionregistrationv1.MutatingWebhookConfiguration{
_, err := client.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(tCtx, &admissionregistrationv1.MutatingWebhookConfiguration{
ObjectMeta: metav1.ObjectMeta{Name: "webhooktest.example.com"},
Webhooks: []admissionregistrationv1.MutatingWebhook{{
Name: "webhooktest.example.com",
@ -93,7 +89,7 @@ func TestWebhookLoopback(t *testing.T) {
}
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (done bool, err error) {
_, err = client.CoreV1().ConfigMaps("default").Create(ctx, &v1.ConfigMap{
_, err = client.CoreV1().ConfigMaps("default").Create(tCtx, &v1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "webhook-test"},
Data: map[string]string{"invalid key": "value"},
}, metav1.CreateOptions{})

View File

@ -59,6 +59,7 @@ type TestServerSetup struct {
type TearDownFunc func()
// StartTestServer runs a kube-apiserver, optionally calling out to the setup.ModifyServerRunOptions and setup.ModifyServerConfig functions
// TODO (pohly): convert to ktesting contexts
func StartTestServer(ctx context.Context, t testing.TB, setup TestServerSetup) (client.Interface, *rest.Config, TearDownFunc) {
ctx, cancel := context.WithCancel(ctx)

View File

@ -248,12 +248,12 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
sharedInformers := informers.NewSharedInformerFactory(clientSet, 0)
metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
logger, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
tCtx := ktesting.Init(t)
logger := tCtx.Logger()
alwaysStarted := make(chan struct{})
close(alwaysStarted)
gc, err := garbagecollector.NewGarbageCollector(
ctx,
tCtx,
clientSet,
metadataClient,
restMapper,
@ -266,7 +266,7 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
}
tearDown := func() {
cancel()
tCtx.Cancel("tearing down")
result.TearDownFn()
}
syncPeriod := 5 * time.Second
@ -276,9 +276,9 @@ func setupWithServer(t *testing.T, result *kubeapiservertesting.TestServer, work
// client. This is a leaky abstraction and assumes behavior about the REST
// mapper, but we'll deal with it for now.
restMapper.Reset()
}, syncPeriod, ctx.Done())
go gc.Run(ctx, workers)
go gc.Sync(ctx, clientSet.Discovery(), syncPeriod)
}, syncPeriod, tCtx.Done())
go gc.Run(tCtx, workers)
go gc.Sync(tCtx, clientSet.Discovery(), syncPeriod)
}
if workerCount > 0 {

View File

@ -17,7 +17,6 @@ limitations under the License.
package network
import (
"context"
"encoding/json"
"fmt"
"testing"
@ -45,11 +44,8 @@ func TestServicesFinalizersRepairLoop(t *testing.T) {
clusterIP := "10.0.0.20"
interval := 5 * time.Second
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = serviceCIDR
},
@ -61,7 +57,7 @@ func TestServicesFinalizersRepairLoop(t *testing.T) {
// verify client is working
if err := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
_, err := client.CoreV1().Endpoints(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Endpoints(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil {
t.Logf("error fetching endpoints: %v", err)
return false, nil
@ -90,20 +86,20 @@ func TestServicesFinalizersRepairLoop(t *testing.T) {
}
// Create service
if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, &svcNodePort, metav1.CreateOptions{}); err != nil {
if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, &svcNodePort, metav1.CreateOptions{}); err != nil {
t.Errorf("unexpected error creating service: %v", err)
}
t.Logf("Created service: %s", svcNodePort.Name)
// Check the service has been created correctly
svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svcNodePort.Name, metav1.GetOptions{})
svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svcNodePort.Name, metav1.GetOptions{})
if err != nil || svc.Spec.ClusterIP != clusterIP {
t.Errorf("created service is not correct: %v", err)
}
t.Logf("Service created successfully: %v", svc)
// Delete service
if err := client.CoreV1().Services(metav1.NamespaceDefault).Delete(ctx, svcNodePort.Name, metav1.DeleteOptions{}); err != nil {
if err := client.CoreV1().Services(metav1.NamespaceDefault).Delete(tCtx, svcNodePort.Name, metav1.DeleteOptions{}); err != nil {
t.Errorf("unexpected error deleting service: %v", err)
}
t.Logf("Deleted service: %s", svcNodePort.Name)
@ -112,26 +108,26 @@ func TestServicesFinalizersRepairLoop(t *testing.T) {
time.Sleep(interval + 1)
// Check that the service was not deleted and the IP is already allocated
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svcNodePort.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svcNodePort.Name, metav1.GetOptions{})
if err != nil || svc.Spec.ClusterIP != clusterIP {
t.Errorf("created service is not correct: %v", err)
}
t.Logf("Service after Delete: %v", svc)
// Remove the finalizer
if _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(ctx, svcNodePort.Name, types.JSONPatchType, []byte(`[{"op":"remove","path":"/metadata/finalizers"}]`), metav1.PatchOptions{}); err != nil {
if _, err = client.CoreV1().Services(metav1.NamespaceDefault).Patch(tCtx, svcNodePort.Name, types.JSONPatchType, []byte(`[{"op":"remove","path":"/metadata/finalizers"}]`), metav1.PatchOptions{}); err != nil {
t.Errorf("unexpected error removing finalizer: %v", err)
}
t.Logf("Removed service finalizer: %s", svcNodePort.Name)
// Check that the service was deleted
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, svcNodePort.Name, metav1.GetOptions{})
_, err = client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, svcNodePort.Name, metav1.GetOptions{})
if err == nil {
t.Errorf("service was not delete: %v", err)
}
// Try to create service again
if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, &svcNodePort, metav1.CreateOptions{}); err != nil {
if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, &svcNodePort, metav1.CreateOptions{}); err != nil {
t.Errorf("unexpected error creating service: %v", err)
}
t.Logf("Created service: %s", svcNodePort.Name)
@ -141,11 +137,8 @@ func TestServicesFinalizersPatchStatus(t *testing.T) {
serviceCIDR := "10.0.0.0/16"
clusterIP := "10.0.0.21"
nodePort := 30443
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = serviceCIDR
},
@ -176,26 +169,26 @@ func TestServicesFinalizersPatchStatus(t *testing.T) {
defer framework.DeleteNamespaceOrDie(client, ns, t)
// Create service
if _, err := client.CoreV1().Services(ns.Name).Create(ctx, &svcNodePort, metav1.CreateOptions{}); err != nil {
if _, err := client.CoreV1().Services(ns.Name).Create(tCtx, &svcNodePort, metav1.CreateOptions{}); err != nil {
t.Fatalf("unexpected error creating service: %v", err)
}
t.Logf("Created service: %s", svcNodePort.Name)
// Check the service has been created correctly
svc, err := client.CoreV1().Services(ns.Name).Get(ctx, svcNodePort.Name, metav1.GetOptions{})
svc, err := client.CoreV1().Services(ns.Name).Get(tCtx, svcNodePort.Name, metav1.GetOptions{})
if err != nil || svc.Spec.ClusterIP != clusterIP {
t.Fatalf("created service is not correct: %v", err)
}
t.Logf("Service created successfully: %+v", svc)
// Delete service
if err := client.CoreV1().Services(ns.Name).Delete(ctx, svcNodePort.Name, metav1.DeleteOptions{}); err != nil {
if err := client.CoreV1().Services(ns.Name).Delete(tCtx, svcNodePort.Name, metav1.DeleteOptions{}); err != nil {
t.Fatalf("unexpected error deleting service: %v", err)
}
t.Logf("Deleted service: %s", svcNodePort.Name)
// Check that the service was not deleted and the IP is already allocated
svc, err = client.CoreV1().Services(ns.Name).Get(ctx, svcNodePort.Name, metav1.GetOptions{})
svc, err = client.CoreV1().Services(ns.Name).Get(tCtx, svcNodePort.Name, metav1.GetOptions{})
if err != nil ||
svc.Spec.ClusterIP != clusterIP ||
int(svc.Spec.Ports[0].NodePort) != nodePort ||
@ -214,18 +207,18 @@ func TestServicesFinalizersPatchStatus(t *testing.T) {
}
if testcase == "spec" {
if _, err = client.CoreV1().Services(ns.Name).Patch(ctx, svcNodePort.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
if _, err = client.CoreV1().Services(ns.Name).Patch(tCtx, svcNodePort.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}); err != nil {
t.Fatalf("unexpected error removing finalizer: %v", err)
}
} else {
if _, err = client.CoreV1().Services(ns.Name).Patch(ctx, svcNodePort.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status"); err != nil {
if _, err = client.CoreV1().Services(ns.Name).Patch(tCtx, svcNodePort.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status"); err != nil {
t.Fatalf("unexpected error removing finalizer: %v", err)
}
}
t.Logf("Removed service finalizer: %s", svcNodePort.Name)
// Check that the service was deleted
_, err = client.CoreV1().Services(ns.Name).Get(ctx, svcNodePort.Name, metav1.GetOptions{})
_, err = client.CoreV1().Services(ns.Name).Get(tCtx, svcNodePort.Name, metav1.GetOptions{})
if err == nil {
t.Fatalf("service was not delete: %v", err)
}
@ -233,11 +226,11 @@ func TestServicesFinalizersPatchStatus(t *testing.T) {
// Try to create service again without the finalizer to check the ClusterIP and NodePort are deallocated
svc = svcNodePort.DeepCopy()
svc.Finalizers = []string{}
if _, err := client.CoreV1().Services(ns.Name).Create(ctx, svc, metav1.CreateOptions{}); err != nil {
if _, err := client.CoreV1().Services(ns.Name).Create(tCtx, svc, metav1.CreateOptions{}); err != nil {
t.Fatalf("unexpected error creating service: %v", err)
}
// Delete service
if err := client.CoreV1().Services(ns.Name).Delete(ctx, svc.Name, metav1.DeleteOptions{}); err != nil {
if err := client.CoreV1().Services(ns.Name).Delete(tCtx, svc.Name, metav1.DeleteOptions{}); err != nil {
t.Fatalf("unexpected error deleting service: %v", err)
}
})
@ -248,11 +241,8 @@ func TestServicesFinalizersPatchStatus(t *testing.T) {
func TestServiceCIDR28bits(t *testing.T) {
serviceCIDR := "10.0.0.0/28"
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = serviceCIDR
},
@ -261,7 +251,7 @@ func TestServiceCIDR28bits(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil {
return false, err
}
@ -288,7 +278,7 @@ func TestServiceCIDR28bits(t *testing.T) {
},
}
_, err := client.CoreV1().Services(ns.Name).Create(ctx, service, metav1.CreateOptions{})
_, err := client.CoreV1().Services(ns.Name).Create(tCtx, service, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Error creating test service: %v", err)
}

View File

@ -60,9 +60,7 @@ const (
// quota_test.go:100: Took 4.196205966s to scale up without quota
// quota_test.go:115: Took 12.021640372s to scale up with quota
func TestQuota(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
ctx := ktesting.Init(t)
// Set up a API server
_, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{

View File

@ -55,11 +55,8 @@ const (
)
func TestServiceAccountAutoCreate(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
c, _, stopFunc, _, err := startServiceAccountTestServerAndWaitForCaches(ctx, t)
tCtx := ktesting.Init(t)
c, _, stopFunc, _, err := startServiceAccountTestServerAndWaitForCaches(tCtx, t)
defer stopFunc()
if err != nil {
t.Fatalf("failed to setup ServiceAccounts server: %v", err)
@ -68,7 +65,7 @@ func TestServiceAccountAutoCreate(t *testing.T) {
ns := "test-service-account-creation"
// Create namespace
_, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{})
_, err = c.CoreV1().Namespaces().Create(tCtx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{})
if err != nil {
t.Fatalf("could not create namespace: %v", err)
}
@ -80,7 +77,7 @@ func TestServiceAccountAutoCreate(t *testing.T) {
}
// Delete service account
err = c.CoreV1().ServiceAccounts(ns).Delete(ctx, defaultUser.Name, metav1.DeleteOptions{})
err = c.CoreV1().ServiceAccounts(ns).Delete(tCtx, defaultUser.Name, metav1.DeleteOptions{})
if err != nil {
t.Fatalf("Could not delete default serviceaccount: %v", err)
}
@ -96,11 +93,8 @@ func TestServiceAccountAutoCreate(t *testing.T) {
}
func TestServiceAccountTokenAutoMount(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
c, _, stopFunc, _, err := startServiceAccountTestServerAndWaitForCaches(ctx, t)
tCtx := ktesting.Init(t)
c, _, stopFunc, _, err := startServiceAccountTestServerAndWaitForCaches(tCtx, t)
defer stopFunc()
if err != nil {
t.Fatalf("failed to setup ServiceAccounts server: %v", err)
@ -109,7 +103,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
ns := "auto-mount-ns"
// Create "my" namespace
_, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{})
_, err = c.CoreV1().Namespaces().Create(tCtx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: ns}}, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
t.Fatalf("could not create namespace: %v", err)
}
@ -127,7 +121,7 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
},
}
createdPod, err := c.CoreV1().Pods(ns).Create(ctx, &protoPod, metav1.CreateOptions{})
createdPod, err := c.CoreV1().Pods(ns).Create(tCtx, &protoPod, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
@ -142,11 +136,8 @@ func TestServiceAccountTokenAutoMount(t *testing.T) {
}
func TestServiceAccountTokenAuthentication(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
c, config, stopFunc, _, err := startServiceAccountTestServerAndWaitForCaches(ctx, t)
tCtx := ktesting.Init(t)
c, config, stopFunc, _, err := startServiceAccountTestServerAndWaitForCaches(tCtx, t)
defer stopFunc()
if err != nil {
t.Fatalf("failed to setup ServiceAccounts server: %v", err)
@ -156,19 +147,19 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
otherns := "other-ns"
// Create "my" namespace
_, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{})
_, err = c.CoreV1().Namespaces().Create(tCtx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
t.Fatalf("could not create namespace: %v", err)
}
// Create "other" namespace
_, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}, metav1.CreateOptions{})
_, err = c.CoreV1().Namespaces().Create(tCtx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: otherns}}, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
t.Fatalf("could not create namespace: %v", err)
}
// Create "ro" user in myns
roSA, err := c.CoreV1().ServiceAccounts(myns).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{})
roSA, err := c.CoreV1().ServiceAccounts(myns).Create(tCtx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Service Account not created: %v", err)
}
@ -183,13 +174,13 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
roClient := clientset.NewForConfigOrDie(&roClientConfig)
doServiceAccountAPIRequests(t, roClient, myns, true, true, false)
doServiceAccountAPIRequests(t, roClient, otherns, true, false, false)
err = c.CoreV1().Secrets(myns).Delete(ctx, roTokenName, metav1.DeleteOptions{})
err = c.CoreV1().Secrets(myns).Delete(tCtx, roTokenName, metav1.DeleteOptions{})
if err != nil {
t.Fatalf("could not delete token: %v", err)
}
// wait for delete to be observed and reacted to via watch
err = wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) {
_, err := roClient.CoreV1().Secrets(myns).List(ctx, metav1.ListOptions{})
_, err := roClient.CoreV1().Secrets(myns).List(tCtx, metav1.ListOptions{})
if err == nil {
t.Logf("token is still valid, waiting")
return false, nil
@ -206,7 +197,7 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
doServiceAccountAPIRequests(t, roClient, myns, false, false, false)
// Create "rw" user in myns
rwSA, err := c.CoreV1().ServiceAccounts(myns).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}, metav1.CreateOptions{})
rwSA, err := c.CoreV1().ServiceAccounts(myns).Create(tCtx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readWriteServiceAccountName}}, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Service Account not created: %v", err)
}
@ -223,11 +214,8 @@ func TestServiceAccountTokenAuthentication(t *testing.T) {
}
func TestLegacyServiceAccountTokenTracking(t *testing.T) {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
c, config, stopFunc, _, err := startServiceAccountTestServerAndWaitForCaches(ctx, t)
tCtx := ktesting.Init(t)
c, config, stopFunc, _, err := startServiceAccountTestServerAndWaitForCaches(tCtx, t)
defer stopFunc()
if err != nil {
t.Fatalf("failed to setup ServiceAccounts server: %v", err)
@ -235,11 +223,11 @@ func TestLegacyServiceAccountTokenTracking(t *testing.T) {
// create service account
myns := "auth-ns"
_, err = c.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{})
_, err = c.CoreV1().Namespaces().Create(tCtx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: myns}}, metav1.CreateOptions{})
if err != nil && !apierrors.IsAlreadyExists(err) {
t.Fatalf("could not create namespace: %v", err)
}
mysa, err := c.CoreV1().ServiceAccounts(myns).Create(ctx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{})
mysa, err := c.CoreV1().ServiceAccounts(myns).Create(tCtx, &v1.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: readOnlyServiceAccountName}}, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Service Account not created: %v", err)
}
@ -298,7 +286,7 @@ func TestLegacyServiceAccountTokenTracking(t *testing.T) {
}
wg.Wait()
dateAfter := time.Now().UTC().Format(dateFormat)
liveSecret, err := c.CoreV1().Secrets(myns).Get(ctx, test.secretName, metav1.GetOptions{})
liveSecret, err := c.CoreV1().Secrets(myns).Get(tCtx, test.secretName, metav1.GetOptions{})
if err != nil {
t.Fatalf("Could not get secret: %v", err)
}
@ -325,7 +313,7 @@ func TestLegacyServiceAccountTokenTracking(t *testing.T) {
// configmap should exist with 'since' timestamp.
if err = wait.PollImmediate(time.Millisecond*10, wait.ForeverTestTimeout, func() (bool, error) {
dateBefore := time.Now().UTC().Format("2006-01-02")
configMap, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(ctx, legacytokentracking.ConfigMapName, metav1.GetOptions{})
configMap, err := c.CoreV1().ConfigMaps(metav1.NamespaceSystem).Get(tCtx, legacytokentracking.ConfigMapName, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get %q configmap, err %w", legacytokentracking.ConfigMapDataKey, err)
}

View File

@ -44,11 +44,8 @@ func TestServiceAlloc(t *testing.T) {
// Create an IPv4 single stack control-plane
serviceCIDR := "192.168.0.0/29"
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = serviceCIDR
},
@ -120,11 +117,8 @@ func TestServiceAllocIPAddress(t *testing.T) {
serviceCIDR := "2001:db8::/64"
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRServiceAllocator, true)()
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
client, _, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{
tCtx := ktesting.Init(t)
client, _, tearDownFn := framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
opts.ServiceClusterIPRanges = serviceCIDR
opts.GenericServerRunOptions.AdvertiseAddress = netutils.ParseIPSloppy("2001:db8::10")
@ -149,7 +143,7 @@ func TestServiceAllocIPAddress(t *testing.T) {
// Wait until the default "kubernetes" service is created.
if err := wait.Poll(250*time.Millisecond, time.Minute, func() (bool, error) {
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(ctx, "kubernetes", metav1.GetOptions{})
_, err := client.CoreV1().Services(metav1.NamespaceDefault).Get(tCtx, "kubernetes", metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
return false, err
}
@ -160,11 +154,11 @@ func TestServiceAllocIPAddress(t *testing.T) {
// create 5 random services and check that the Services have an IP associated
for i := 0; i < 5; i++ {
svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(ctx, svc(i), metav1.CreateOptions{})
svc, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(tCtx, svc(i), metav1.CreateOptions{})
if err != nil {
t.Error(err)
}
_, err = client.NetworkingV1alpha1().IPAddresses().Get(ctx, svc.Spec.ClusterIP, metav1.GetOptions{})
_, err = client.NetworkingV1alpha1().IPAddresses().Get(tCtx, svc.Spec.ClusterIP, metav1.GetOptions{})
if err != nil {
t.Error(err)
}

View File

@ -504,11 +504,10 @@ func UpdateNodeStatus(cs clientset.Interface, node *v1.Node) error {
// It registers cleanup functions to t.Cleanup(), they will be called when the test completes,
// no need to do this again.
func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interface) *TestContext {
_, ctx := ktesting.NewTestContext(t)
ctx, cancel := context.WithCancel(ctx)
testCtx := &TestContext{Ctx: ctx}
tCtx := ktesting.Init(t)
testCtx := &TestContext{Ctx: tCtx}
testCtx.ClientSet, testCtx.KubeConfig, testCtx.CloseFn = framework.StartTestServer(ctx, t, framework.TestServerSetup{
testCtx.ClientSet, testCtx.KubeConfig, testCtx.CloseFn = framework.StartTestServer(tCtx, t, framework.TestServerSetup{
ModifyServerRunOptions: func(options *options.ServerRunOptions) {
options.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition", "Priority", "StorageObjectInUseProtection"}
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) {
@ -536,7 +535,7 @@ func InitTestAPIServer(t *testing.T, nsPrefix string, admission admission.Interf
oldCloseFn := testCtx.CloseFn
testCtx.CloseFn = func() {
cancel()
tCtx.Cancel("tearing down apiserver")
oldCloseFn()
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package ktesting
import (
"context"
"flag"
"fmt"
"testing"
@ -41,14 +40,9 @@ func SetDefaultVerbosity(v int) {
_ = f.Value.Set(fmt.Sprintf("%d", v))
}
// NewTestContext is a drop-in replacement for ktesting.NewTestContext
// NewTestContext is a replacement for ktesting.NewTestContext
// which returns a more versatile context.
//
// The type of that context is still context.Context because replacing
// it with TContext breaks tests which use `WithCancel`.
//
// TODO(pohly): change all of that code together with changing the return type.
func NewTestContext(tb testing.TB) (klog.Logger, context.Context) {
func NewTestContext(tb testing.TB) (klog.Logger, TContext) {
tCtx := Init(tb)
return tCtx.Logger(), tCtx
}