mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
fix: enable empty and len rules from testifylint on pkg package
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> Co-authored-by: Patrick Ohly <patrick.ohly@intel.com>
This commit is contained in:
parent
40225a788c
commit
f014b754fb
@ -237,12 +237,10 @@ linters-settings: # please keep this alphabetized
|
||||
disable: # TODO: remove each disabled rule and fix it
|
||||
- blank-import
|
||||
- compares
|
||||
- empty
|
||||
- error-is-as
|
||||
- error-nil
|
||||
- expected-actual
|
||||
- float-compare
|
||||
- go-require
|
||||
- len
|
||||
- nil-compare
|
||||
- require-error
|
||||
|
@ -213,13 +213,11 @@ linters-settings: # please keep this alphabetized
|
||||
disable: # TODO: remove each disabled rule and fix it
|
||||
- blank-import
|
||||
- compares
|
||||
- empty
|
||||
- error-is-as
|
||||
- error-nil
|
||||
- expected-actual
|
||||
- float-compare
|
||||
- go-require
|
||||
- len
|
||||
- nil-compare
|
||||
- require-error
|
||||
{{- end}}
|
||||
|
@ -195,7 +195,7 @@ func TestSyncServiceNoSelector(t *testing.T) {
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, client.Actions(), 0)
|
||||
assert.Empty(t, client.Actions())
|
||||
}
|
||||
|
||||
func TestServiceExternalNameTypeSync(t *testing.T) {
|
||||
@ -262,11 +262,11 @@ func TestServiceExternalNameTypeSync(t *testing.T) {
|
||||
|
||||
err = esController.syncService(logger, fmt.Sprintf("%s/%s", namespace, serviceName))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, client.Actions(), 0)
|
||||
assert.Empty(t, client.Actions())
|
||||
|
||||
sliceList, err := client.DiscoveryV1().EndpointSlices(namespace).List(context.TODO(), metav1.ListOptions{})
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, sliceList.Items, 0, "Expected 0 endpoint slices")
|
||||
assert.Empty(t, sliceList.Items, "Expected 0 endpoint slices")
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -288,7 +288,7 @@ func TestSyncServicePendingDeletion(t *testing.T) {
|
||||
logger, _ := ktesting.NewTestContext(t)
|
||||
err := esController.syncService(logger, fmt.Sprintf("%s/%s", ns, serviceName))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, client.Actions(), 0)
|
||||
assert.Empty(t, client.Actions())
|
||||
}
|
||||
|
||||
// Ensure SyncService for service with selector but no pods results in placeholder EndpointSlice
|
||||
@ -341,7 +341,7 @@ func TestSyncServiceMissing(t *testing.T) {
|
||||
assert.Nil(t, err, "Expected no error syncing service")
|
||||
|
||||
// That should mean no client actions were performed
|
||||
assert.Len(t, client.Actions(), 0)
|
||||
assert.Empty(t, client.Actions())
|
||||
|
||||
// TriggerTimeTracker should have removed the reference to the missing service
|
||||
assert.NotContains(t, esController.triggerTimeTracker.ServiceStates, missingServiceKey)
|
||||
|
@ -104,7 +104,7 @@ func TestGarbageCollectorConstruction(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, 0, len(gc.dependencyGraphBuilder.monitors))
|
||||
assert.Empty(t, gc.dependencyGraphBuilder.monitors)
|
||||
|
||||
// Make sure resource monitor syncing creates and stops resource monitors.
|
||||
tweakableRM.Add(schema.GroupVersionKind{Group: "tpr.io", Version: "v1", Kind: "unknown"}, nil)
|
||||
@ -112,13 +112,13 @@ func TestGarbageCollectorConstruction(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Failed adding a monitor: %v", err)
|
||||
}
|
||||
assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
|
||||
assert.Len(t, gc.dependencyGraphBuilder.monitors, 2)
|
||||
|
||||
err = gc.resyncMonitors(logger, podResource)
|
||||
if err != nil {
|
||||
t.Errorf("Failed removing a monitor: %v", err)
|
||||
}
|
||||
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
|
||||
assert.Len(t, gc.dependencyGraphBuilder.monitors, 1)
|
||||
|
||||
go gc.Run(tCtx, 1)
|
||||
|
||||
@ -126,13 +126,13 @@ func TestGarbageCollectorConstruction(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Failed adding a monitor: %v", err)
|
||||
}
|
||||
assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
|
||||
assert.Len(t, gc.dependencyGraphBuilder.monitors, 2)
|
||||
|
||||
err = gc.resyncMonitors(logger, podResource)
|
||||
if err != nil {
|
||||
t.Errorf("Failed removing a monitor: %v", err)
|
||||
}
|
||||
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
|
||||
assert.Len(t, gc.dependencyGraphBuilder.monitors, 1)
|
||||
}
|
||||
|
||||
// fakeAction records information about requests to aid in testing.
|
||||
|
@ -5293,5 +5293,5 @@ func TestMultipleHPAs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
assert.Equal(t, hpaCount, len(processedHPA), "Expected to process all HPAs")
|
||||
assert.Len(t, processedHPA, hpaCount, "Expected to process all HPAs")
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ func TestPatchNode(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
actions := fakeClient.Actions()
|
||||
assert.Equal(t, 1, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 1, "unexpected actions: %#v", actions)
|
||||
patchAction := actions[0].(core.PatchActionImpl)
|
||||
assert.Equal(t, testCase.patch, string(patchAction.Patch), "%d: unexpected patch: %s", i, string(patchAction.Patch))
|
||||
}
|
||||
@ -145,9 +145,9 @@ func TestUpdateNodeIfNeeded(t *testing.T) {
|
||||
}
|
||||
actions := fakeClient.Actions()
|
||||
if testCase.patch == "" {
|
||||
assert.Equal(t, 0, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Empty(t, actions, "unexpected actions")
|
||||
} else {
|
||||
assert.Equal(t, 1, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 1, "unexpected actions: %#v", actions)
|
||||
patchAction := actions[0].(core.PatchActionImpl)
|
||||
assert.Equal(t, testCase.patch, string(patchAction.Patch), "%d: unexpected patch: %s", i, string(patchAction.Patch))
|
||||
}
|
||||
|
@ -365,7 +365,7 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
as.True(ok)
|
||||
as.Equal(int64(3), resource1Capacity.Value())
|
||||
as.Equal(int64(2), resource1Allocatable.Value())
|
||||
as.Equal(0, len(removedResources))
|
||||
as.Empty(removedResources)
|
||||
|
||||
// Deletes an unhealthy device should NOT change allocatable but change capacity.
|
||||
devs1 := devs[:len(devs)-1]
|
||||
@ -377,7 +377,7 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
as.True(ok)
|
||||
as.Equal(int64(2), resource1Capacity.Value())
|
||||
as.Equal(int64(2), resource1Allocatable.Value())
|
||||
as.Equal(0, len(removedResources))
|
||||
as.Empty(removedResources)
|
||||
|
||||
// Updates a healthy device to unhealthy should reduce allocatable by 1.
|
||||
devs[1].Health = pluginapi.Unhealthy
|
||||
@ -389,7 +389,7 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
as.True(ok)
|
||||
as.Equal(int64(3), resource1Capacity.Value())
|
||||
as.Equal(int64(1), resource1Allocatable.Value())
|
||||
as.Equal(0, len(removedResources))
|
||||
as.Empty(removedResources)
|
||||
|
||||
// Deletes a healthy device should reduce capacity and allocatable by 1.
|
||||
devs2 := devs[1:]
|
||||
@ -401,7 +401,7 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
as.True(ok)
|
||||
as.Equal(int64(0), resource1Allocatable.Value())
|
||||
as.Equal(int64(2), resource1Capacity.Value())
|
||||
as.Equal(0, len(removedResources))
|
||||
as.Empty(removedResources)
|
||||
|
||||
// Tests adding another resource.
|
||||
resourceName2 := "resource2"
|
||||
@ -410,14 +410,14 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
testManager.endpoints[resourceName2] = endpointInfo{e: e2, opts: nil}
|
||||
callback(resourceName2, devs)
|
||||
capacity, allocatable, removedResources = testManager.GetCapacity()
|
||||
as.Equal(2, len(capacity))
|
||||
as.Len(capacity, 2)
|
||||
resource2Capacity, ok := capacity[v1.ResourceName(resourceName2)]
|
||||
as.True(ok)
|
||||
resource2Allocatable, ok := allocatable[v1.ResourceName(resourceName2)]
|
||||
as.True(ok)
|
||||
as.Equal(int64(3), resource2Capacity.Value())
|
||||
as.Equal(int64(1), resource2Allocatable.Value())
|
||||
as.Equal(0, len(removedResources))
|
||||
as.Empty(removedResources)
|
||||
|
||||
// Expires resourceName1 endpoint. Verifies testManager.GetCapacity() reports that resourceName1
|
||||
// is removed from capacity and it no longer exists in healthyDevices after the call.
|
||||
@ -432,7 +432,7 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
as.NotContains(testManager.healthyDevices, resourceName1)
|
||||
as.NotContains(testManager.unhealthyDevices, resourceName1)
|
||||
as.NotContains(testManager.endpoints, resourceName1)
|
||||
as.Equal(1, len(testManager.endpoints))
|
||||
as.Len(testManager.endpoints, 1)
|
||||
|
||||
// Stops resourceName2 endpoint. Verifies its stopTime is set, allocate and
|
||||
// preStartContainer calls return errors.
|
||||
@ -464,7 +464,7 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
|
||||
testManager.unhealthyDevices = make(map[string]sets.Set[string])
|
||||
err = testManager.readCheckpoint()
|
||||
as.Nil(err)
|
||||
as.Equal(1, len(testManager.endpoints))
|
||||
as.Len(testManager.endpoints, 1)
|
||||
as.Contains(testManager.endpoints, resourceName2)
|
||||
capacity, allocatable, removed = testManager.GetCapacity()
|
||||
val, ok = capacity[v1.ResourceName(resourceName2)]
|
||||
@ -506,7 +506,7 @@ func TestGetAllocatableDevicesMultipleResources(t *testing.T) {
|
||||
testManager.genericDeviceUpdateCallback(resourceName2, resource2Devs)
|
||||
|
||||
allocatableDevs := testManager.GetAllocatableDevices()
|
||||
as.Equal(2, len(allocatableDevs))
|
||||
as.Len(allocatableDevs, 2)
|
||||
|
||||
devInstances1, ok := allocatableDevs[resourceName1]
|
||||
as.True(ok)
|
||||
@ -543,7 +543,7 @@ func TestGetAllocatableDevicesHealthTransition(t *testing.T) {
|
||||
testManager.genericDeviceUpdateCallback(resourceName1, resource1Devs)
|
||||
|
||||
allocatableDevs := testManager.GetAllocatableDevices()
|
||||
as.Equal(1, len(allocatableDevs))
|
||||
as.Len(allocatableDevs, 1)
|
||||
devInstances, ok := allocatableDevs[resourceName1]
|
||||
as.True(ok)
|
||||
checkAllocatableDevicesConsistsOf(as, devInstances, []string{"R1Device1", "R1Device2"})
|
||||
@ -557,7 +557,7 @@ func TestGetAllocatableDevicesHealthTransition(t *testing.T) {
|
||||
testManager.genericDeviceUpdateCallback(resourceName1, resource1Devs)
|
||||
|
||||
allocatableDevs = testManager.GetAllocatableDevices()
|
||||
as.Equal(1, len(allocatableDevs))
|
||||
as.Len(allocatableDevs, 1)
|
||||
devInstances, ok = allocatableDevs[resourceName1]
|
||||
as.True(ok)
|
||||
checkAllocatableDevicesConsistsOf(as, devInstances, []string{"R1Device1", "R1Device2", "R1Device3"})
|
||||
@ -1293,9 +1293,9 @@ func TestGetDeviceRunContainerOptions(t *testing.T) {
|
||||
// when pod is in activePods, GetDeviceRunContainerOptions should return
|
||||
runContainerOpts, err := testManager.GetDeviceRunContainerOptions(pod1, &pod1.Spec.Containers[0])
|
||||
as.Nil(err)
|
||||
as.Equal(len(runContainerOpts.Devices), 3)
|
||||
as.Equal(len(runContainerOpts.Mounts), 2)
|
||||
as.Equal(len(runContainerOpts.Envs), 2)
|
||||
as.Len(runContainerOpts.Devices, 3)
|
||||
as.Len(runContainerOpts.Mounts, 2)
|
||||
as.Len(runContainerOpts.Envs, 2)
|
||||
|
||||
activePods = []*v1.Pod{pod2}
|
||||
podsStub.updateActivePods(activePods)
|
||||
@ -1643,7 +1643,7 @@ func TestDevicePreStartContainer(t *testing.T) {
|
||||
|
||||
expectedResps, err := allocateStubFunc()([]string{"dev1", "dev2"})
|
||||
as.Nil(err)
|
||||
as.Equal(1, len(expectedResps.ContainerResponses))
|
||||
as.Len(expectedResps.ContainerResponses, 1)
|
||||
expectedResp := expectedResps.ContainerResponses[0]
|
||||
as.Equal(len(runContainerOpts.Devices), len(expectedResp.Devices))
|
||||
as.Equal(len(runContainerOpts.Mounts), len(expectedResp.Mounts))
|
||||
|
@ -1428,7 +1428,7 @@ func TestGetContainerClaimInfos(t *testing.T) {
|
||||
|
||||
fakeClaimInfos, err := manager.GetContainerClaimInfos(test.pod, test.container)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(fakeClaimInfos))
|
||||
assert.Len(t, fakeClaimInfos, 1)
|
||||
assert.Equal(t, test.expectedClaimName, fakeClaimInfos[0].ClaimInfoState.ClaimName)
|
||||
|
||||
manager.cache.delete(test.pod.Spec.ResourceClaims[0].Name, "default")
|
||||
|
@ -780,7 +780,7 @@ func TestGarbageCollectImageTooOld(t *testing.T) {
|
||||
t.Log(fakeClock.Now())
|
||||
images, err := manager.imagesInEvictionOrder(ctx, fakeClock.Now())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(images), 1)
|
||||
require.Len(t, images, 1)
|
||||
// Simulate pod having just used this image, but having been GC'd
|
||||
images[0].lastUsed = fakeClock.Now()
|
||||
|
||||
@ -796,7 +796,7 @@ func TestGarbageCollectImageTooOld(t *testing.T) {
|
||||
fakeClock.Step(policy.MaxAge + 1)
|
||||
images, err = manager.freeOldImages(ctx, images, fakeClock.Now(), oldStartTime)
|
||||
require.NoError(t, err)
|
||||
assert.Len(images, 0)
|
||||
assert.Empty(images)
|
||||
assert.Len(fakeRuntime.ImageList, 1)
|
||||
}
|
||||
|
||||
@ -837,7 +837,7 @@ func TestGarbageCollectImageMaxAgeDisabled(t *testing.T) {
|
||||
t.Log(fakeClock.Now())
|
||||
images, err := manager.imagesInEvictionOrder(ctx, fakeClock.Now())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(images), 1)
|
||||
require.Len(t, images, 1)
|
||||
assert.Len(fakeRuntime.ImageList, 2)
|
||||
|
||||
oldStartTime := fakeClock.Now()
|
||||
|
@ -374,7 +374,7 @@ func TestPullAndListImageWithPodAnnotations(t *testing.T) {
|
||||
assert.Equal(t, c.expected[0].shouldRecordFinishedPullingTime, fakePodPullingTimeRecorder.finishedPullingRecorded)
|
||||
|
||||
images, _ := fakeRuntime.ListImages(ctx)
|
||||
assert.Equal(t, 1, len(images), "ListImages() count")
|
||||
assert.Len(t, images, 1, "ListImages() count")
|
||||
|
||||
image := images[0]
|
||||
assert.Equal(t, "missing_image:latest", image.ID, "Image ID")
|
||||
@ -431,7 +431,7 @@ func TestPullAndListImageWithRuntimeHandlerInImageCriAPIFeatureGate(t *testing.T
|
||||
assert.Equal(t, c.expected[0].shouldRecordFinishedPullingTime, fakePodPullingTimeRecorder.finishedPullingRecorded)
|
||||
|
||||
images, _ := fakeRuntime.ListImages(ctx)
|
||||
assert.Equal(t, 1, len(images), "ListImages() count")
|
||||
assert.Len(t, images, 1, "ListImages() count")
|
||||
|
||||
image := images[0]
|
||||
assert.Equal(t, "missing_image:latest", image.ID, "Image ID")
|
||||
|
@ -1283,7 +1283,7 @@ func TestFastStatusUpdateOnce(t *testing.T) {
|
||||
|
||||
actions := kubeClient.Actions()
|
||||
if tc.wantPatches == 0 {
|
||||
require.Len(t, actions, 0)
|
||||
require.Empty(t, actions)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -6114,9 +6114,9 @@ func TestGetNonExistentImagePullSecret(t *testing.T) {
|
||||
}
|
||||
|
||||
pullSecrets := testKubelet.kubelet.getPullSecretsForPod(testPod)
|
||||
assert.Equal(t, 0, len(pullSecrets))
|
||||
assert.Empty(t, pullSecrets)
|
||||
|
||||
assert.Equal(t, 1, len(fakeRecorder.Events))
|
||||
assert.Len(t, fakeRecorder.Events, 1)
|
||||
event := <-fakeRecorder.Events
|
||||
assert.Equal(t, event, expectedEvent)
|
||||
}
|
||||
|
@ -1460,7 +1460,7 @@ func TestDeleteOrphanedMirrorPods(t *testing.T) {
|
||||
|
||||
// Sync with an empty pod list to delete all mirror pods.
|
||||
kl.HandlePodCleanups(ctx)
|
||||
assert.Len(t, manager.GetPods(), 0, "Expected 0 mirror pods")
|
||||
assert.Empty(t, manager.GetPods(), "Expected no mirror pods")
|
||||
for i, pod := range orphanPods {
|
||||
name := kubecontainer.GetPodFullName(pod)
|
||||
creates, deletes := manager.GetCounts(name)
|
||||
|
@ -471,7 +471,7 @@ func TestVolumeUnmountAndDetachControllerDisabled(t *testing.T) {
|
||||
podVolumes = kubelet.volumeManager.GetMountedVolumesForPod(
|
||||
util.GetUniquePodName(pod))
|
||||
|
||||
assert.Len(t, podVolumes, 0,
|
||||
assert.Empty(t, podVolumes,
|
||||
"Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
|
||||
|
||||
assert.NoError(t, volumetest.VerifyTearDownCallCount(
|
||||
@ -667,8 +667,8 @@ func TestVolumeUnmountAndDetachControllerEnabled(t *testing.T) {
|
||||
util.GetUniquePodName(pod))
|
||||
assert.Equal(t, podVolumes, allPodVolumes, "GetMountedVolumesForPod and GetPossiblyMountedVolumesForPod should return the same volumes")
|
||||
|
||||
assert.Len(t, podVolumes, 0,
|
||||
"Expected volumes to be unmounted and detached. But some volumes are still mounted: %#v", podVolumes)
|
||||
assert.Empty(t, podVolumes,
|
||||
"Expected volumes to be unmounted and detached. But some volumes are still mounted")
|
||||
|
||||
assert.NoError(t, volumetest.VerifyTearDownCallCount(
|
||||
1 /* expectedTearDownCallCount */, testKubelet.volumePlugin))
|
||||
|
@ -69,7 +69,7 @@ func TestRemoveContainer(t *testing.T) {
|
||||
|
||||
// Create fake sandbox and container
|
||||
_, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
assert.Equal(t, len(fakeContainers), 1)
|
||||
assert.Len(t, fakeContainers, 1)
|
||||
|
||||
containerID := fakeContainers[0].Id
|
||||
fakeOS := m.osInterface.(*containertest.FakeOS)
|
||||
@ -956,7 +956,7 @@ func TestUpdateContainerResources(t *testing.T) {
|
||||
|
||||
// Create fake sandbox and container
|
||||
_, fakeContainers := makeAndSetFakePod(t, m, fakeRuntime, pod)
|
||||
assert.Equal(t, len(fakeContainers), 1)
|
||||
assert.Len(t, fakeContainers, 1)
|
||||
|
||||
ctx := context.Background()
|
||||
cStatus, err := m.getPodContainerStatuses(ctx, pod.UID, pod.Name, pod.Namespace)
|
||||
|
@ -43,7 +43,7 @@ func TestPullImage(t *testing.T) {
|
||||
|
||||
images, err := fakeManager.ListImages(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(images))
|
||||
assert.Len(t, images, 1)
|
||||
assert.Equal(t, images[0].RepoTags, []string{"busybox"})
|
||||
}
|
||||
|
||||
@ -64,7 +64,7 @@ func TestPullImageWithError(t *testing.T) {
|
||||
|
||||
images, err := fakeManager.ListImages(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(images))
|
||||
assert.Empty(t, images)
|
||||
}
|
||||
|
||||
func TestPullImageWithInvalidImageName(t *testing.T) {
|
||||
@ -198,11 +198,11 @@ func TestRemoveImage(t *testing.T) {
|
||||
|
||||
_, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(fakeImageService.Images))
|
||||
assert.Len(t, fakeImageService.Images, 1)
|
||||
|
||||
err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(fakeImageService.Images))
|
||||
assert.Empty(t, fakeImageService.Images)
|
||||
}
|
||||
|
||||
func TestRemoveImageNoOpIfImageNotLocal(t *testing.T) {
|
||||
@ -221,13 +221,13 @@ func TestRemoveImageWithError(t *testing.T) {
|
||||
|
||||
_, err = fakeManager.PullImage(ctx, kubecontainer.ImageSpec{Image: "busybox"}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(fakeImageService.Images))
|
||||
assert.Len(t, fakeImageService.Images, 1)
|
||||
|
||||
fakeImageService.InjectError("RemoveImage", fmt.Errorf("test-failure"))
|
||||
|
||||
err = fakeManager.RemoveImage(ctx, kubecontainer.ImageSpec{Image: "busybox"})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, 1, len(fakeImageService.Images))
|
||||
assert.Len(t, fakeImageService.Images, 1)
|
||||
}
|
||||
|
||||
func TestImageStats(t *testing.T) {
|
||||
@ -381,7 +381,7 @@ func TestPullWithSecretsWithError(t *testing.T) {
|
||||
|
||||
images, err := fakeManager.ListImages(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(images))
|
||||
assert.Empty(t, images)
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -403,6 +403,6 @@ func TestPullThenListWithAnnotations(t *testing.T) {
|
||||
|
||||
images, err := fakeManager.ListImages(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(images))
|
||||
assert.Len(t, images, 1)
|
||||
assert.Equal(t, images[0].Spec, imageSpec)
|
||||
}
|
||||
|
@ -600,8 +600,8 @@ func TestKillPod(t *testing.T) {
|
||||
|
||||
err = m.KillPod(ctx, pod, runningPod, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 3, len(fakeRuntime.Containers))
|
||||
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
|
||||
assert.Len(t, fakeRuntime.Containers, 3)
|
||||
assert.Len(t, fakeRuntime.Sandboxes, 1)
|
||||
for _, sandbox := range fakeRuntime.Sandboxes {
|
||||
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State)
|
||||
}
|
||||
@ -640,9 +640,9 @@ func TestSyncPod(t *testing.T) {
|
||||
backOff := flowcontrol.NewBackOff(time.Second, time.Minute)
|
||||
result := m.SyncPod(context.Background(), pod, &kubecontainer.PodStatus{}, []v1.Secret{}, backOff)
|
||||
assert.NoError(t, result.Error())
|
||||
assert.Equal(t, 2, len(fakeRuntime.Containers))
|
||||
assert.Equal(t, 2, len(fakeImage.Images))
|
||||
assert.Equal(t, 1, len(fakeRuntime.Sandboxes))
|
||||
assert.Len(t, fakeRuntime.Containers, 2)
|
||||
assert.Len(t, fakeImage.Images, 2)
|
||||
assert.Len(t, fakeRuntime.Sandboxes, 1)
|
||||
for _, sandbox := range fakeRuntime.Sandboxes {
|
||||
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandbox.State)
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ func TestCreatePodSandbox(t *testing.T) {
|
||||
assert.Contains(t, fakeRuntime.Called, "RunPodSandbox")
|
||||
sandboxes, err := fakeRuntime.ListPodSandbox(ctx, &runtimeapi.PodSandboxFilter{Id: id})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(sandboxes), 1)
|
||||
assert.Len(t, sandboxes, 1)
|
||||
assert.Equal(t, sandboxes[0].Id, fmt.Sprintf("%s_%s_%s_1", pod.Name, pod.Namespace, pod.UID))
|
||||
assert.Equal(t, sandboxes[0].State, runtimeapi.PodSandboxState_SANDBOX_READY)
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ func TestWatcherRecordsEventsForOomEvents(t *testing.T) {
|
||||
assert.NoError(t, oomWatcher.Start(node))
|
||||
|
||||
eventsRecorded := getRecordedEvents(fakeRecorder, numExpectedOomEvents)
|
||||
assert.Equal(t, numExpectedOomEvents, len(eventsRecorded))
|
||||
assert.Len(t, eventsRecorded, numExpectedOomEvents)
|
||||
}
|
||||
|
||||
func getRecordedEvents(fakeRecorder *record.FakeRecorder, numExpectedOomEvents int) []string {
|
||||
@ -125,7 +125,7 @@ func TestWatcherRecordsEventsForOomEventsCorrectContainerName(t *testing.T) {
|
||||
assert.NoError(t, oomWatcher.Start(node))
|
||||
|
||||
eventsRecorded := getRecordedEvents(fakeRecorder, numExpectedOomEvents)
|
||||
assert.Equal(t, numExpectedOomEvents, len(eventsRecorded))
|
||||
assert.Len(t, eventsRecorded, numExpectedOomEvents)
|
||||
}
|
||||
|
||||
// TestWatcherRecordsEventsForOomEventsWithAdditionalInfo verifies that our the
|
||||
@ -161,7 +161,7 @@ func TestWatcherRecordsEventsForOomEventsWithAdditionalInfo(t *testing.T) {
|
||||
|
||||
eventsRecorded := getRecordedEvents(fakeRecorder, numExpectedOomEvents)
|
||||
|
||||
assert.Equal(t, numExpectedOomEvents, len(eventsRecorded))
|
||||
assert.Len(t, eventsRecorded, numExpectedOomEvents)
|
||||
assert.Contains(t, eventsRecorded[0], systemOOMEvent)
|
||||
assert.Contains(t, eventsRecorded[0], fmt.Sprintf("pid: %d", eventPid))
|
||||
assert.Contains(t, eventsRecorded[0], fmt.Sprintf("victim process: %s", processName))
|
||||
|
@ -139,7 +139,7 @@ func TestRelisting(t *testing.T) {
|
||||
// changed.
|
||||
pleg.Relist()
|
||||
actual = getEventsFromChannel(ch)
|
||||
assert.True(t, len(actual) == 0, "no container has changed, event length should be 0")
|
||||
assert.Empty(t, actual, "no container has changed, event length should be 0")
|
||||
|
||||
runtime.AllPodList = []*containertest.FakePod{
|
||||
{Pod: &kubecontainer.Pod{
|
||||
@ -227,7 +227,7 @@ func TestEventChannelFull(t *testing.T) {
|
||||
}
|
||||
// event channel is full, discard events
|
||||
actual = getEventsFromChannel(ch)
|
||||
assert.True(t, len(actual) == 4, "channel length should be 4")
|
||||
assert.Len(t, actual, 4, "channel length should be 4")
|
||||
assert.Subsetf(t, allEvents, actual, "actual events should in all events")
|
||||
}
|
||||
|
||||
|
@ -102,7 +102,7 @@ func TestSummaryProviderGetStatsNoSplitFileSystem(t *testing.T) {
|
||||
assert.Equal(summary.Node.Fs, rootFsStats)
|
||||
assert.Equal(summary.Node.Runtime, &statsapi.RuntimeStats{ContainerFs: imageFsStats, ImageFs: imageFsStats})
|
||||
|
||||
assert.Equal(len(summary.Node.SystemContainers), 4)
|
||||
assert.Len(summary.Node.SystemContainers, 4)
|
||||
assert.Contains(summary.Node.SystemContainers, statsapi.ContainerStats{
|
||||
Name: "kubelet",
|
||||
StartTime: kubeletCreationTime,
|
||||
@ -199,7 +199,7 @@ func TestSummaryProviderGetStatsSplitImageFs(t *testing.T) {
|
||||
// Since we are a split filesystem we want root filesystem to be container fs and image to be image filesystem
|
||||
assert.Equal(summary.Node.Runtime, &statsapi.RuntimeStats{ContainerFs: rootFsStats, ImageFs: imageFsStats})
|
||||
|
||||
assert.Equal(len(summary.Node.SystemContainers), 4)
|
||||
assert.Len(summary.Node.SystemContainers, 4)
|
||||
assert.Contains(summary.Node.SystemContainers, statsapi.ContainerStats{
|
||||
Name: "kubelet",
|
||||
StartTime: kubeletCreationTime,
|
||||
@ -284,7 +284,7 @@ func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) {
|
||||
assert.Nil(summary.Node.Fs)
|
||||
assert.Nil(summary.Node.Runtime)
|
||||
|
||||
assert.Equal(len(summary.Node.SystemContainers), 4)
|
||||
assert.Len(summary.Node.SystemContainers, 4)
|
||||
assert.Contains(summary.Node.SystemContainers, statsapi.ContainerStats{
|
||||
Name: "kubelet",
|
||||
StartTime: cgroupStatsMap["/kubelet"].cs.StartTime,
|
||||
|
@ -275,7 +275,7 @@ func TestCadvisorListPodStats(t *testing.T) {
|
||||
pods, err := p.ListPodStats(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 4, len(pods))
|
||||
assert.Len(t, pods, 4)
|
||||
indexPods := make(map[statsapi.PodReference]statsapi.PodStats, len(pods))
|
||||
for _, pod := range pods {
|
||||
indexPods[pod.PodRef] = pod
|
||||
@ -448,7 +448,7 @@ func TestCadvisorListPodCPUAndMemoryStats(t *testing.T) {
|
||||
pods, err := p.ListPodCPUAndMemoryStats(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 3, len(pods))
|
||||
assert.Len(t, pods, 3)
|
||||
indexPods := make(map[statsapi.PodReference]statsapi.PodStats, len(pods))
|
||||
for _, pod := range pods {
|
||||
indexPods[pod.PodRef] = pod
|
||||
@ -742,7 +742,7 @@ func TestCadvisorListPodStatsWhenContainerLogFound(t *testing.T) {
|
||||
pods, err := p.ListPodStats(ctx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(pods))
|
||||
assert.Len(t, pods, 1)
|
||||
// Validate Pod0 Results
|
||||
checkEphemeralStats(t, "Pod0", []int{seedPod0Container0, seedPod0Container1}, nil, fakeStatsSlice, pods[0].EphemeralStorage)
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ func TestCRIListPodStats(t *testing.T) {
|
||||
stats, err := provider.ListPodStats(ctx)
|
||||
assert := assert.New(t)
|
||||
assert.NoError(err)
|
||||
assert.Equal(4, len(stats))
|
||||
assert.Len(stats, 4)
|
||||
|
||||
podStatsMap := make(map[statsapi.PodReference]statsapi.PodStats)
|
||||
for _, s := range stats {
|
||||
@ -252,7 +252,7 @@ func TestCRIListPodStats(t *testing.T) {
|
||||
|
||||
p0 := podStatsMap[statsapi.PodReference{Name: "sandbox0-name", UID: "sandbox0-uid", Namespace: "sandbox0-ns"}]
|
||||
assert.Equal(sandbox0.CreatedAt, p0.StartTime.UnixNano())
|
||||
assert.Equal(2, len(p0.Containers))
|
||||
assert.Len(p0.Containers, 2)
|
||||
|
||||
checkEphemeralStorageStats(assert, p0, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats0, containerStats1},
|
||||
[]*volume.Metrics{containerLogStats0, containerLogStats1}, podLogStats0)
|
||||
@ -281,7 +281,7 @@ func TestCRIListPodStats(t *testing.T) {
|
||||
|
||||
p1 := podStatsMap[statsapi.PodReference{Name: "sandbox1-name", UID: "sandbox1-uid", Namespace: "sandbox1-ns"}]
|
||||
assert.Equal(sandbox1.CreatedAt, p1.StartTime.UnixNano())
|
||||
assert.Equal(1, len(p1.Containers))
|
||||
assert.Len(p1.Containers, 1)
|
||||
|
||||
checkEphemeralStorageStats(assert, p1, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats2},
|
||||
[]*volume.Metrics{containerLogStats2}, podLogStats1)
|
||||
@ -298,7 +298,7 @@ func TestCRIListPodStats(t *testing.T) {
|
||||
|
||||
p2 := podStatsMap[statsapi.PodReference{Name: "sandbox2-name", UID: "sandbox2-uid", Namespace: "sandbox2-ns"}]
|
||||
assert.Equal(sandbox2.CreatedAt, p2.StartTime.UnixNano())
|
||||
assert.Equal(1, len(p2.Containers))
|
||||
assert.Len(p2.Containers, 1)
|
||||
|
||||
checkEphemeralStorageStats(assert, p2, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats4},
|
||||
[]*volume.Metrics{containerLogStats4}, nil)
|
||||
@ -317,7 +317,7 @@ func TestCRIListPodStats(t *testing.T) {
|
||||
|
||||
p3 := podStatsMap[statsapi.PodReference{Name: "sandbox3-name", UID: "sandbox3-uid", Namespace: "sandbox3-ns"}]
|
||||
assert.Equal(sandbox3.CreatedAt, p3.StartTime.UnixNano())
|
||||
assert.Equal(1, len(p3.Containers))
|
||||
assert.Len(p3.Containers, 1)
|
||||
|
||||
c8 := p3.Containers[0]
|
||||
assert.Equal(cName8, c8.Name)
|
||||
@ -468,14 +468,14 @@ func TestListPodStatsStrictlyFromCRI(t *testing.T) {
|
||||
stats, err := provider.ListPodStats(ctx)
|
||||
assert := assert.New(t)
|
||||
assert.NoError(err)
|
||||
assert.Equal(2, len(stats))
|
||||
assert.Len(stats, 2)
|
||||
podStatsMap := make(map[statsapi.PodReference]statsapi.PodStats)
|
||||
for _, s := range stats {
|
||||
podStatsMap[s.PodRef] = s
|
||||
}
|
||||
p0 := podStatsMap[prf0]
|
||||
assert.Equal(sandbox0.CreatedAt, p0.StartTime.UnixNano())
|
||||
assert.Equal(2, len(p0.Containers))
|
||||
assert.Len(p0.Containers, 2)
|
||||
|
||||
checkEphemeralStorageStats(assert, p0, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats0, containerStats1},
|
||||
[]*volume.Metrics{containerLogStats0, containerLogStats1}, podLogStats0)
|
||||
@ -504,7 +504,7 @@ func TestListPodStatsStrictlyFromCRI(t *testing.T) {
|
||||
|
||||
p1 := podStatsMap[prf1]
|
||||
assert.Equal(sandbox1.CreatedAt, p1.StartTime.UnixNano())
|
||||
assert.Equal(1, len(p1.Containers))
|
||||
assert.Len(p1.Containers, 1)
|
||||
|
||||
checkEphemeralStorageStats(assert, p1, ephemeralVolumes, []*runtimeapi.ContainerStats{containerStats2},
|
||||
[]*volume.Metrics{containerLogStats2}, podLogStats1)
|
||||
@ -637,7 +637,7 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) {
|
||||
stats, err := provider.ListPodCPUAndMemoryStats(ctx)
|
||||
assert := assert.New(t)
|
||||
assert.NoError(err)
|
||||
assert.Equal(5, len(stats))
|
||||
assert.Len(stats, 5)
|
||||
|
||||
podStatsMap := make(map[statsapi.PodReference]statsapi.PodStats)
|
||||
for _, s := range stats {
|
||||
@ -646,7 +646,7 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) {
|
||||
|
||||
p0 := podStatsMap[statsapi.PodReference{Name: "sandbox0-name", UID: "sandbox0-uid", Namespace: "sandbox0-ns"}]
|
||||
assert.Equal(sandbox0.CreatedAt, p0.StartTime.UnixNano())
|
||||
assert.Equal(2, len(p0.Containers))
|
||||
assert.Len(p0.Containers, 2)
|
||||
assert.Nil(p0.EphemeralStorage)
|
||||
assert.Nil(p0.VolumeStats)
|
||||
assert.Nil(p0.Network)
|
||||
@ -674,7 +674,7 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) {
|
||||
|
||||
p1 := podStatsMap[statsapi.PodReference{Name: "sandbox1-name", UID: "sandbox1-uid", Namespace: "sandbox1-ns"}]
|
||||
assert.Equal(sandbox1.CreatedAt, p1.StartTime.UnixNano())
|
||||
assert.Equal(1, len(p1.Containers))
|
||||
assert.Len(p1.Containers, 1)
|
||||
assert.Nil(p1.EphemeralStorage)
|
||||
assert.Nil(p1.VolumeStats)
|
||||
assert.Nil(p1.Network)
|
||||
@ -691,7 +691,7 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) {
|
||||
|
||||
p2 := podStatsMap[statsapi.PodReference{Name: "sandbox2-name", UID: "sandbox2-uid", Namespace: "sandbox2-ns"}]
|
||||
assert.Equal(sandbox2.CreatedAt, p2.StartTime.UnixNano())
|
||||
assert.Equal(1, len(p2.Containers))
|
||||
assert.Len(p2.Containers, 1)
|
||||
assert.Nil(p2.EphemeralStorage)
|
||||
assert.Nil(p2.VolumeStats)
|
||||
assert.Nil(p2.Network)
|
||||
@ -708,7 +708,7 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) {
|
||||
|
||||
p3 := podStatsMap[statsapi.PodReference{Name: "sandbox3-name", UID: "sandbox3-uid", Namespace: "sandbox3-ns"}]
|
||||
assert.Equal(sandbox3.CreatedAt, p3.StartTime.UnixNano())
|
||||
assert.Equal(1, len(p3.Containers))
|
||||
assert.Len(p3.Containers, 1)
|
||||
|
||||
c8 := p3.Containers[0]
|
||||
assert.Equal(cName8, c8.Name)
|
||||
@ -719,7 +719,7 @@ func TestCRIListPodCPUAndMemoryStats(t *testing.T) {
|
||||
|
||||
p6 := podStatsMap[statsapi.PodReference{Name: "sandbox6-name", UID: "sandbox6-uid", Namespace: "sandbox6-ns"}]
|
||||
assert.Equal(sandbox6.CreatedAt, p6.StartTime.UnixNano())
|
||||
assert.Equal(1, len(p6.Containers))
|
||||
assert.Len(p6.Containers, 1)
|
||||
|
||||
c9 := p6.Containers[0]
|
||||
assert.Equal(cName9, c9.Name)
|
||||
|
@ -427,7 +427,7 @@ func checkNetworkStats(t *testing.T, label string, seed int, stats *statsapi.Net
|
||||
assert.EqualValues(t, seed+offsetNetTxBytes, *stats.TxBytes, label+".Net.TxBytes")
|
||||
assert.EqualValues(t, seed+offsetNetTxErrors, *stats.TxErrors, label+".Net.TxErrors")
|
||||
|
||||
assert.EqualValues(t, 2, len(stats.Interfaces), "network interfaces should contain 2 elements")
|
||||
assert.Len(t, stats.Interfaces, 2, "network interfaces should contain 2 elements")
|
||||
|
||||
assert.EqualValues(t, "eth0", stats.Interfaces[0].Name, "default interface name is not eth0")
|
||||
assert.EqualValues(t, seed+offsetNetRxBytes, *stats.Interfaces[0].RxBytes, label+".Net.TxErrors")
|
||||
|
@ -552,7 +552,7 @@ func TestStaticPod(t *testing.T) {
|
||||
|
||||
t.Logf("Should not sync pod in syncBatch because there is no corresponding mirror pod for the static pod.")
|
||||
m.syncBatch(true)
|
||||
assert.Equal(t, len(m.kubeClient.(*fake.Clientset).Actions()), 0, "Expected no updates after syncBatch, got %+v", m.kubeClient.(*fake.Clientset).Actions())
|
||||
assert.Empty(t, m.kubeClient.(*fake.Clientset).Actions(), "Expected no updates after syncBatch")
|
||||
|
||||
t.Logf("Create the mirror pod")
|
||||
m.podManager.(mutablePodManager).AddPod(mirrorPod)
|
||||
|
@ -99,7 +99,7 @@ func TestSecretStore(t *testing.T) {
|
||||
|
||||
// Adds don't issue Get requests.
|
||||
actions := fakeClient.Actions()
|
||||
assert.Equal(t, 0, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Empty(t, actions, "unexpected actions")
|
||||
// Should issue Get request
|
||||
store.Get("ns1", "name1")
|
||||
// Shouldn't issue Get request, as secret is not registered
|
||||
@ -108,7 +108,7 @@ func TestSecretStore(t *testing.T) {
|
||||
store.Get("ns3", "name3")
|
||||
|
||||
actions = fakeClient.Actions()
|
||||
assert.Equal(t, 2, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 2, "unexpected actions")
|
||||
|
||||
for _, a := range actions {
|
||||
assert.True(t, a.Matches("get", "secrets"), "unexpected actions: %#v", a)
|
||||
@ -169,7 +169,7 @@ func TestSecretStoreGetAlwaysRefresh(t *testing.T) {
|
||||
}
|
||||
wg.Wait()
|
||||
actions := fakeClient.Actions()
|
||||
assert.Equal(t, 100, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 100, "unexpected actions")
|
||||
|
||||
for _, a := range actions {
|
||||
assert.True(t, a.Matches("get", "secrets"), "unexpected actions: %#v", a)
|
||||
@ -197,7 +197,7 @@ func TestSecretStoreGetNeverRefresh(t *testing.T) {
|
||||
wg.Wait()
|
||||
actions := fakeClient.Actions()
|
||||
// Only first Get, should forward the Get request.
|
||||
assert.Equal(t, 10, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 10, "unexpected actions")
|
||||
}
|
||||
|
||||
func TestCustomTTL(t *testing.T) {
|
||||
@ -220,24 +220,24 @@ func TestCustomTTL(t *testing.T) {
|
||||
ttlExists = true
|
||||
store.Get("ns", "name")
|
||||
actions := fakeClient.Actions()
|
||||
assert.Equal(t, 1, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 1, "unexpected actions")
|
||||
fakeClient.ClearActions()
|
||||
|
||||
// Set 5-minute ttl and see if this works.
|
||||
ttl = time.Duration(5) * time.Minute
|
||||
store.Get("ns", "name")
|
||||
actions = fakeClient.Actions()
|
||||
assert.Equal(t, 0, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Empty(t, actions, "unexpected actions")
|
||||
// Still no effect after 4 minutes.
|
||||
fakeClock.Step(4 * time.Minute)
|
||||
store.Get("ns", "name")
|
||||
actions = fakeClient.Actions()
|
||||
assert.Equal(t, 0, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Empty(t, actions, "unexpected actions")
|
||||
// Now it should have an effect.
|
||||
fakeClock.Step(time.Minute)
|
||||
store.Get("ns", "name")
|
||||
actions = fakeClient.Actions()
|
||||
assert.Equal(t, 1, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 1, "unexpected actions")
|
||||
fakeClient.ClearActions()
|
||||
|
||||
// Now remove the custom ttl and see if that works.
|
||||
@ -245,12 +245,12 @@ func TestCustomTTL(t *testing.T) {
|
||||
fakeClock.Step(55 * time.Second)
|
||||
store.Get("ns", "name")
|
||||
actions = fakeClient.Actions()
|
||||
assert.Equal(t, 0, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Empty(t, actions, "unexpected action")
|
||||
// Pass the minute and it should be triggered now.
|
||||
fakeClock.Step(5 * time.Second)
|
||||
store.Get("ns", "name")
|
||||
actions = fakeClient.Actions()
|
||||
assert.Equal(t, 1, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 1, "unexpected actions")
|
||||
}
|
||||
|
||||
func TestParseNodeAnnotation(t *testing.T) {
|
||||
@ -402,7 +402,7 @@ func TestCacheInvalidation(t *testing.T) {
|
||||
store.Get("ns1", "s10")
|
||||
store.Get("ns1", "s2")
|
||||
actions := fakeClient.Actions()
|
||||
assert.Equal(t, 3, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 3, "unexpected number of actions")
|
||||
fakeClient.ClearActions()
|
||||
|
||||
// Update a pod with a new secret.
|
||||
@ -421,7 +421,7 @@ func TestCacheInvalidation(t *testing.T) {
|
||||
store.Get("ns1", "s20")
|
||||
store.Get("ns1", "s3")
|
||||
actions = fakeClient.Actions()
|
||||
assert.Equal(t, 2, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 2, "unexpected actions")
|
||||
fakeClient.ClearActions()
|
||||
|
||||
// Create a new pod that is refencing the first three secrets - those should
|
||||
@ -433,7 +433,7 @@ func TestCacheInvalidation(t *testing.T) {
|
||||
store.Get("ns1", "s20")
|
||||
store.Get("ns1", "s3")
|
||||
actions = fakeClient.Actions()
|
||||
assert.Equal(t, 3, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 3, "unexpected actions")
|
||||
fakeClient.ClearActions()
|
||||
}
|
||||
|
||||
|
@ -189,7 +189,7 @@ func TestSecretCacheMultipleRegistrations(t *testing.T) {
|
||||
store.DeleteReference("ns", "name", types.UID(fmt.Sprintf("pod-%d", i)))
|
||||
}
|
||||
actions := fakeClient.Actions()
|
||||
assert.Equal(t, 2, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 2, "unexpected actions")
|
||||
|
||||
// Final delete also doesn't trigger any action.
|
||||
store.DeleteReference("ns", "name", "pod")
|
||||
@ -198,7 +198,7 @@ func TestSecretCacheMultipleRegistrations(t *testing.T) {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
actions = fakeClient.Actions()
|
||||
assert.Equal(t, 2, len(actions), "unexpected actions: %#v", actions)
|
||||
assert.Len(t, actions, 2, "unexpected actions")
|
||||
}
|
||||
|
||||
func TestImmutableSecretStopsTheReflector(t *testing.T) {
|
||||
|
@ -113,5 +113,5 @@ func testStore(t *testing.T, store Store) {
|
||||
// Test list keys.
|
||||
keys, err = store.List()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(keys), 0)
|
||||
assert.Empty(t, keys)
|
||||
}
|
||||
|
@ -2389,7 +2389,7 @@ func TestReconcileWithUpdateReconstructedFromAPIServer(t *testing.T) {
|
||||
assert.Empty(t, reconciler.volumesNeedUpdateFromNodeStatus)
|
||||
|
||||
attachedVolumes := asw.GetAttachedVolumes()
|
||||
assert.Equalf(t, len(attachedVolumes), 2, "two volumes in ASW expected")
|
||||
assert.Lenf(t, attachedVolumes, 2, "two volumes in ASW expected")
|
||||
for _, vol := range attachedVolumes {
|
||||
if vol.VolumeName == volumeName1 {
|
||||
// devicePath + attachability must have been updated from node.status
|
||||
|
@ -102,7 +102,7 @@ func TestGrpcProber_Probe(t *testing.T) {
|
||||
fmt.Fprint(w, "res")
|
||||
}))
|
||||
u := strings.Split(server.URL, ":")
|
||||
assert.Equal(t, 3, len(u))
|
||||
assert.Len(t, u, 3)
|
||||
|
||||
port, err := strconv.Atoi(u[2])
|
||||
assert.Equal(t, nil, err)
|
||||
|
@ -4431,7 +4431,7 @@ func TestEndpointSliceE2E(t *testing.T) {
|
||||
assert.Len(t, virtualServers2, 1, "Expected 1 virtual server")
|
||||
realServers2, rsErr2 := ipvs.GetRealServers(virtualServers2[0])
|
||||
assert.Nil(t, rsErr2, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers2, 0, "Expected 0 real servers")
|
||||
assert.Empty(t, realServers2, "Expected 0 real servers")
|
||||
}
|
||||
|
||||
func TestHealthCheckNodePortE2E(t *testing.T) {
|
||||
@ -4829,7 +4829,7 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) {
|
||||
assert.Len(t, virtualServers2, 1, "Expected 1 virtual server")
|
||||
realServers2, rsErr2 := ipvs.GetRealServers(virtualServers2[0])
|
||||
assert.Nil(t, rsErr2, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers2, 0, "Expected 0 real servers")
|
||||
assert.Empty(t, realServers2, "Expected 0 real servers")
|
||||
}
|
||||
}
|
||||
|
||||
@ -4999,11 +4999,11 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) {
|
||||
|
||||
realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer)
|
||||
assert.Nil(t, rsErr1, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers1, 0, "Expected 0 real servers")
|
||||
assert.Empty(t, realServers1, "Expected 0 real servers")
|
||||
|
||||
realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer)
|
||||
assert.Nil(t, rsErr2, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers2, 0, "Expected 0 real servers")
|
||||
assert.Empty(t, realServers2, "Expected 0 real servers")
|
||||
}
|
||||
|
||||
// Test_EndpointSliceReadyAndTerminatingLocal tests that when there are local ready and ready + terminating
|
||||
@ -5171,11 +5171,11 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) {
|
||||
|
||||
realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer)
|
||||
assert.Nil(t, rsErr1, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers1, 0, "Expected 0 real servers")
|
||||
assert.Empty(t, realServers1, "Expected 0 real servers")
|
||||
|
||||
realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer)
|
||||
assert.Nil(t, rsErr2, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers2, 0, "Expected 0 real servers")
|
||||
assert.Empty(t, realServers2, "Expected 0 real servers")
|
||||
}
|
||||
|
||||
// Test_EndpointSliceOnlyReadyTerminatingCluster tests that when there are only ready terminating
|
||||
@ -5343,11 +5343,11 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) {
|
||||
|
||||
realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer)
|
||||
assert.Nil(t, rsErr1, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers1, 0, "Expected 0 real servers")
|
||||
assert.Empty(t, realServers1, "Expected 0 real servers")
|
||||
|
||||
realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer)
|
||||
assert.Nil(t, rsErr2, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers2, 0, "Expected 0 real servers")
|
||||
assert.Empty(t, realServers2, "Expected 0 real servers")
|
||||
}
|
||||
|
||||
// Test_EndpointSliceOnlyReadyTerminatingLocal tests that when there are only local ready terminating
|
||||
@ -5512,11 +5512,11 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) {
|
||||
|
||||
realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer)
|
||||
assert.Nil(t, rsErr1, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers1, 0, "Expected 0 real servers")
|
||||
assert.Empty(t, realServers1, "Expected 0 real servers")
|
||||
|
||||
realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer)
|
||||
assert.Nil(t, rsErr2, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers2, 0, "Expected 0 real servers")
|
||||
assert.Empty(t, realServers2, "Expected 0 real servers")
|
||||
}
|
||||
|
||||
func TestIpIsValidForSet(t *testing.T) {
|
||||
|
@ -158,7 +158,7 @@ func TestRunner_Add(t *testing.T) {
|
||||
}
|
||||
|
||||
// validate number of requests
|
||||
assert.Equal(t, tc.netlinkCalls, len(tc.handler.requests))
|
||||
assert.Len(t, tc.handler.requests, tc.netlinkCalls)
|
||||
|
||||
if tc.netlinkCalls > 0 {
|
||||
// validate request
|
||||
@ -166,7 +166,7 @@ func TestRunner_Add(t *testing.T) {
|
||||
assert.Equal(t, uint16(unix.NLM_F_REQUEST|unix.NLM_F_CREATE|unix.NLM_F_ACK), tc.handler.requests[0].flags)
|
||||
|
||||
// validate attribute(NFACCT_NAME)
|
||||
assert.Equal(t, 1, len(tc.handler.requests[0].data))
|
||||
assert.Len(t, tc.handler.requests[0].data, 1)
|
||||
assert.Equal(t,
|
||||
tc.handler.requests[0].data[0].Serialize(),
|
||||
nl.NewRtAttr(attrName, nl.ZeroTerminated(tc.counterName)).Serialize(),
|
||||
@ -343,14 +343,14 @@ func TestRunner_Get(t *testing.T) {
|
||||
counter, err := rnr.Get(tc.counterName)
|
||||
|
||||
// validate number of requests
|
||||
assert.Equal(t, tc.netlinkCalls, len(tc.handler.requests))
|
||||
assert.Len(t, tc.handler.requests, tc.netlinkCalls)
|
||||
if tc.netlinkCalls > 0 {
|
||||
// validate request
|
||||
assert.Equal(t, cmdGet, tc.handler.requests[0].cmd)
|
||||
assert.Equal(t, uint16(unix.NLM_F_REQUEST|unix.NLM_F_ACK), tc.handler.requests[0].flags)
|
||||
|
||||
// validate attribute(NFACCT_NAME)
|
||||
assert.Equal(t, 1, len(tc.handler.requests[0].data))
|
||||
assert.Len(t, tc.handler.requests[0].data, 1)
|
||||
assert.Equal(t,
|
||||
tc.handler.requests[0].data[0].Serialize(),
|
||||
nl.NewRtAttr(attrName, nl.ZeroTerminated(tc.counterName)).Serialize())
|
||||
@ -417,7 +417,7 @@ func TestRunner_Ensure(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
// validate number of netlink requests
|
||||
assert.Equal(t, tc.netlinkCalls, len(tc.handler.requests))
|
||||
assert.Len(t, tc.handler.requests, tc.netlinkCalls)
|
||||
})
|
||||
}
|
||||
|
||||
@ -481,12 +481,12 @@ func TestRunner_List(t *testing.T) {
|
||||
counters, err := rnr.List()
|
||||
|
||||
// validate request(NFNL_MSG_ACCT_GET)
|
||||
assert.Equal(t, 1, len(hndlr.requests))
|
||||
assert.Len(t, hndlr.requests, 1)
|
||||
assert.Equal(t, cmdGet, hndlr.requests[0].cmd)
|
||||
assert.Equal(t, uint16(unix.NLM_F_REQUEST|unix.NLM_F_DUMP), hndlr.requests[0].flags)
|
||||
|
||||
// validate attributes
|
||||
assert.Equal(t, 0, len(hndlr.requests[0].data))
|
||||
assert.Empty(t, hndlr.requests[0].data)
|
||||
|
||||
// validate response
|
||||
assert.NoError(t, err)
|
||||
|
@ -1990,7 +1990,7 @@ func TestApplyAppArmorVersionSkew(t *testing.T) {
|
||||
},
|
||||
},
|
||||
validation: func(t *testing.T, pod *api.Pod) {
|
||||
assert.Len(t, pod.Annotations, 0)
|
||||
assert.Empty(t, pod.Annotations)
|
||||
},
|
||||
}, {
|
||||
description: "Container security context not nil",
|
||||
@ -2003,7 +2003,7 @@ func TestApplyAppArmorVersionSkew(t *testing.T) {
|
||||
},
|
||||
},
|
||||
validation: func(t *testing.T, pod *api.Pod) {
|
||||
assert.Len(t, pod.Annotations, 0)
|
||||
assert.Empty(t, pod.Annotations)
|
||||
},
|
||||
}, {
|
||||
description: "Container field RuntimeDefault and no annotation present",
|
||||
|
@ -107,11 +107,11 @@ func TestShrink(t *testing.T) {
|
||||
verifyPop(t, i, true, &queue)
|
||||
}
|
||||
require.Equal(t, 0, queue.Len())
|
||||
require.Equal(t, normalSize, len(queue.elements))
|
||||
require.Len(t, queue.elements, normalSize)
|
||||
|
||||
// Still usable after shrinking?
|
||||
queue.Push(42)
|
||||
verifyPop(t, 42, true, &queue)
|
||||
require.Equal(t, 0, queue.Len())
|
||||
require.Equal(t, normalSize, len(queue.elements))
|
||||
require.Len(t, queue.elements, normalSize)
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ func TestProberExistingDriverBeforeInit(t *testing.T) {
|
||||
// Assert
|
||||
// Probe occurs, 1 plugin should be returned, and 2 watches (pluginDir and all its
|
||||
// current subdirectories) registered.
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Len(t, events, 1)
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op)
|
||||
plugDir := pluginDir
|
||||
if goruntime.GOOS == "windows" {
|
||||
@ -66,7 +66,7 @@ func TestProberExistingDriverBeforeInit(t *testing.T) {
|
||||
// Act
|
||||
events, err = prober.Probe()
|
||||
// Assert
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.Empty(t, events)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -77,7 +77,7 @@ func TestProberAddRemoveDriver(t *testing.T) {
|
||||
prober.Probe()
|
||||
events, err := prober.Probe()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.Empty(t, events)
|
||||
|
||||
// Call probe after a file is added. Should return 1 event.
|
||||
|
||||
@ -93,7 +93,7 @@ func TestProberAddRemoveDriver(t *testing.T) {
|
||||
events, err = prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Len(t, events, 1)
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op) // 1 newly added
|
||||
assertPathSuffix(t, driverPath, watcher.watches[len(watcher.watches)-1]) // Checks most recent watch
|
||||
assert.NoError(t, err)
|
||||
@ -103,7 +103,7 @@ func TestProberAddRemoveDriver(t *testing.T) {
|
||||
// Act
|
||||
events, err = prober.Probe()
|
||||
// Assert
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.Empty(t, events)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Call probe after a non-driver file is added in a subdirectory. should return 1 event.
|
||||
@ -115,7 +115,7 @@ func TestProberAddRemoveDriver(t *testing.T) {
|
||||
events, err = prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Len(t, events, 1)
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -123,7 +123,7 @@ func TestProberAddRemoveDriver(t *testing.T) {
|
||||
// Act
|
||||
events, err = prober.Probe()
|
||||
// Assert
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.Empty(t, events)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Call probe after a subdirectory is added in a driver directory. should return 1 event.
|
||||
@ -135,7 +135,7 @@ func TestProberAddRemoveDriver(t *testing.T) {
|
||||
events, err = prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Len(t, events, 1)
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -143,7 +143,7 @@ func TestProberAddRemoveDriver(t *testing.T) {
|
||||
// Act
|
||||
events, err = prober.Probe()
|
||||
// Assert
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.Empty(t, events)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Call probe after a subdirectory is removed in a driver directory. should return 1 event.
|
||||
@ -154,7 +154,7 @@ func TestProberAddRemoveDriver(t *testing.T) {
|
||||
events, err = prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Len(t, events, 1)
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op)
|
||||
assert.NoError(t, err)
|
||||
|
||||
@ -162,7 +162,7 @@ func TestProberAddRemoveDriver(t *testing.T) {
|
||||
// Act
|
||||
events, err = prober.Probe()
|
||||
// Assert
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.Empty(t, events)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Call probe after a driver executable and driver directory is remove. should return 1 event.
|
||||
@ -172,13 +172,13 @@ func TestProberAddRemoveDriver(t *testing.T) {
|
||||
watcher.TriggerEvent(fsnotify.Remove, driverPath)
|
||||
// Act and Assert: 1 ProbeRemove event
|
||||
events, err = prober.Probe()
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Len(t, events, 1)
|
||||
assert.Equal(t, volume.ProbeRemove, events[0].Op)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Act and Assert: 0 event
|
||||
events, err = prober.Probe()
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.Empty(t, events)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -199,7 +199,7 @@ func TestEmptyPluginDir(t *testing.T) {
|
||||
events, err := prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.Empty(t, events)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
@ -216,7 +216,7 @@ func TestRemovePluginDir(t *testing.T) {
|
||||
// Act: The handler triggered by the above events should have already handled the event appropriately.
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 3, len(watcher.watches)) // 2 from initial setup, 1 from new watch.
|
||||
assert.Len(t, watcher.watches, 3) // 2 from initial setup, 1 from new watch.
|
||||
plugDir := pluginDir
|
||||
if goruntime.GOOS == "windows" {
|
||||
plugDir = "\\flexvolume"
|
||||
@ -229,7 +229,7 @@ func TestNestedDriverDir(t *testing.T) {
|
||||
// Arrange
|
||||
_, fs, watcher, _ := initTestEnvironment(t)
|
||||
// Assert
|
||||
assert.Equal(t, 2, len(watcher.watches)) // 2 from initial setup
|
||||
assert.Len(t, watcher.watches, 2) // 2 from initial setup
|
||||
|
||||
// test add testDriverName
|
||||
testDriverName := "testDriverName"
|
||||
@ -237,7 +237,7 @@ func TestNestedDriverDir(t *testing.T) {
|
||||
fs.MkdirAll(testDriverPath, 0777)
|
||||
watcher.TriggerEvent(fsnotify.Create, testDriverPath)
|
||||
// Assert
|
||||
assert.Equal(t, 3, len(watcher.watches)) // 2 from initial setup, 1 from new watch.
|
||||
assert.Len(t, watcher.watches, 3) // 2 from initial setup, 1 from new watch.
|
||||
assertPathSuffix(t, testDriverPath, watcher.watches[len(watcher.watches)-1])
|
||||
|
||||
// test add nested subdir inside testDriverName
|
||||
@ -248,7 +248,7 @@ func TestNestedDriverDir(t *testing.T) {
|
||||
fs.MkdirAll(subdirPath, 0777)
|
||||
watcher.TriggerEvent(fsnotify.Create, subdirPath)
|
||||
// Assert
|
||||
assert.Equal(t, 4+i, len(watcher.watches)) // 3 + newly added
|
||||
assert.Len(t, watcher.watches, 4+i) // 3 + newly added
|
||||
assertPathSuffix(t, subdirPath, watcher.watches[len(watcher.watches)-1])
|
||||
basePath = subdirPath
|
||||
}
|
||||
@ -272,13 +272,13 @@ func TestProberMultipleEvents(t *testing.T) {
|
||||
events, err := prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 2, len(events))
|
||||
assert.Len(t, events, 2)
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op)
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[1].Op)
|
||||
assert.NoError(t, err)
|
||||
for i := 0; i < iterations-1; i++ {
|
||||
events, err = prober.Probe()
|
||||
assert.Equal(t, 0, len(events))
|
||||
assert.Empty(t, events)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
}
|
||||
@ -321,7 +321,7 @@ func TestProberSuccessAndError(t *testing.T) {
|
||||
events, err := prober.Probe()
|
||||
|
||||
// Assert
|
||||
assert.Equal(t, 1, len(events))
|
||||
assert.Len(t, events, 1)
|
||||
assert.Equal(t, volume.ProbeAddOrUpdate, events[0].Op)
|
||||
assert.Equal(t, driverName, events[0].PluginName)
|
||||
assert.Error(t, err)
|
||||
|
@ -516,7 +516,7 @@ func TestBasicPolicyDefinitionFailure(t *testing.T) {
|
||||
require.Equal(t, 0, warningRecorder.len())
|
||||
|
||||
annotations := attr.GetAnnotations(auditinternal.LevelMetadata)
|
||||
require.Equal(t, 0, len(annotations))
|
||||
require.Empty(t, annotations)
|
||||
|
||||
require.ErrorContains(t, err, `Denied`)
|
||||
}
|
||||
@ -824,7 +824,7 @@ func TestInvalidParamSourceGVK(t *testing.T) {
|
||||
`failed to configure policy: failed to find resource referenced by paramKind: 'example.com/v1, Kind=BadParamKind'`)
|
||||
|
||||
close(passedParams)
|
||||
require.Len(t, passedParams, 0)
|
||||
require.Empty(t, passedParams)
|
||||
}
|
||||
|
||||
// Shows that an error is surfaced if a param specified in a binding does not
|
||||
@ -868,7 +868,7 @@ func TestInvalidParamSourceInstanceName(t *testing.T) {
|
||||
// is not existing
|
||||
require.ErrorContains(t, err,
|
||||
"no params found for policy binding with `Deny` parameterNotFoundAction")
|
||||
require.Len(t, passedParams, 0)
|
||||
require.Empty(t, passedParams)
|
||||
}
|
||||
|
||||
// Show that policy still gets evaluated with `nil` param if paramRef & namespaceParamRef
|
||||
@ -1199,7 +1199,7 @@ func TestAuditValidationAction(t *testing.T) {
|
||||
require.Equal(t, 0, warningRecorder.len())
|
||||
|
||||
annotations := attr.GetAnnotations(auditinternal.LevelMetadata)
|
||||
require.Equal(t, 1, len(annotations))
|
||||
require.Len(t, annotations, 1)
|
||||
valueJson, ok := annotations["validation.policy.admission.k8s.io/validation_failure"]
|
||||
require.True(t, ok)
|
||||
var value []validating.ValidationFailureValue
|
||||
@ -1254,7 +1254,7 @@ func TestWarnValidationAction(t *testing.T) {
|
||||
require.True(t, warningRecorder.hasWarning("Validation failed for ValidatingAdmissionPolicy 'denypolicy.example.com' with binding 'denybinding.example.com': I'm sorry Dave"))
|
||||
|
||||
annotations := attr.GetAnnotations(auditinternal.LevelMetadata)
|
||||
require.Equal(t, 0, len(annotations))
|
||||
require.Empty(t, annotations)
|
||||
|
||||
require.NoError(t, err)
|
||||
}
|
||||
@ -1296,7 +1296,7 @@ func TestAllValidationActions(t *testing.T) {
|
||||
require.True(t, warningRecorder.hasWarning("Validation failed for ValidatingAdmissionPolicy 'denypolicy.example.com' with binding 'denybinding.example.com': I'm sorry Dave"))
|
||||
|
||||
annotations := attr.GetAnnotations(auditinternal.LevelMetadata)
|
||||
require.Equal(t, 1, len(annotations))
|
||||
require.Len(t, annotations, 1)
|
||||
valueJson, ok := annotations["validation.policy.admission.k8s.io/validation_failure"]
|
||||
require.True(t, ok)
|
||||
var value []validating.ValidationFailureValue
|
||||
@ -1926,10 +1926,10 @@ func TestAuditAnnotations(t *testing.T) {
|
||||
)
|
||||
|
||||
annotations := attr.GetAnnotations(auditinternal.LevelMetadata)
|
||||
require.Equal(t, 1, len(annotations))
|
||||
require.Len(t, annotations, 1)
|
||||
value := annotations[policy.Name+"/example-key"]
|
||||
parts := strings.Split(value, ", ")
|
||||
require.Equal(t, 2, len(parts))
|
||||
require.Len(t, parts, 2)
|
||||
require.Contains(t, parts, "normal-value", "special-value")
|
||||
|
||||
require.ErrorContains(t, err, "example error")
|
||||
|
@ -385,7 +385,7 @@ func TestAddRemove(t *testing.T) {
|
||||
require.NotNil(t, initialDocument, "initial document should parse")
|
||||
require.NotNil(t, secondDocument, "second document should parse")
|
||||
assert.Len(t, initialDocument.Items, len(apis.Items), "initial document should have set number of groups")
|
||||
assert.Len(t, secondDocument.Items, 0, "second document should have no groups")
|
||||
assert.Empty(t, secondDocument.Items, "second document should have no groups")
|
||||
}
|
||||
|
||||
// Show that updating an existing service replaces and does not add the entry
|
||||
|
@ -105,7 +105,7 @@ func TestDiscoveryAtAPIS(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
assert.Equal(t, 0, len(groupList.Groups))
|
||||
assert.Empty(t, groupList.Groups)
|
||||
|
||||
// Add a Group.
|
||||
extensionsGroupName := "extensions"
|
||||
@ -130,7 +130,7 @@ func TestDiscoveryAtAPIS(t *testing.T) {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, len(groupList.Groups))
|
||||
assert.Len(t, groupList.Groups, 1)
|
||||
groupListGroup := groupList.Groups[0]
|
||||
assert.Equal(t, extensionsGroupName, groupListGroup.Name)
|
||||
assert.Equal(t, extensionsVersions, groupListGroup.Versions)
|
||||
@ -144,7 +144,7 @@ func TestDiscoveryAtAPIS(t *testing.T) {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
assert.Equal(t, 0, len(groupList.Groups))
|
||||
assert.Empty(t, groupList.Groups)
|
||||
}
|
||||
|
||||
func TestDiscoveryOrdering(t *testing.T) {
|
||||
@ -155,7 +155,7 @@ func TestDiscoveryOrdering(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
assert.Equal(t, 0, len(groupList.Groups))
|
||||
assert.Empty(t, groupList.Groups)
|
||||
|
||||
// Register three groups
|
||||
handler.AddGroup(metav1.APIGroup{Name: "x"})
|
||||
@ -173,7 +173,7 @@ func TestDiscoveryOrdering(t *testing.T) {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
assert.Equal(t, 6, len(groupList.Groups))
|
||||
assert.Len(t, groupList.Groups, 6)
|
||||
assert.Equal(t, "x", groupList.Groups[0].Name)
|
||||
assert.Equal(t, "y", groupList.Groups[1].Name)
|
||||
assert.Equal(t, "z", groupList.Groups[2].Name)
|
||||
@ -187,7 +187,7 @@ func TestDiscoveryOrdering(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
assert.Equal(t, 5, len(groupList.Groups))
|
||||
assert.Len(t, groupList.Groups, 5)
|
||||
|
||||
// Re-adding should move to the end.
|
||||
handler.AddGroup(metav1.APIGroup{Name: "a"})
|
||||
@ -195,7 +195,7 @@ func TestDiscoveryOrdering(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
assert.Equal(t, 6, len(groupList.Groups))
|
||||
assert.Len(t, groupList.Groups, 6)
|
||||
assert.Equal(t, "x", groupList.Groups[0].Name)
|
||||
assert.Equal(t, "y", groupList.Groups[1].Name)
|
||||
assert.Equal(t, "z", groupList.Groups[2].Name)
|
||||
|
@ -2067,8 +2067,8 @@ func TestForgetWatcher(t *testing.T) {
|
||||
cacher.Lock()
|
||||
defer cacher.Unlock()
|
||||
|
||||
require.Equal(t, expectedWatchersCounter, len(cacher.watchers.allWatchers))
|
||||
require.Equal(t, expectedValueWatchersCounter, len(cacher.watchers.valueWatchers))
|
||||
require.Len(t, cacher.watchers.allWatchers, expectedWatchersCounter)
|
||||
require.Len(t, cacher.watchers.valueWatchers, expectedValueWatchersCounter)
|
||||
}
|
||||
assertCacherInternalState(0, 0)
|
||||
|
||||
|
@ -314,11 +314,11 @@ func TestTunnelingHandler_HeaderInterceptingConn(t *testing.T) {
|
||||
for i, chunk := range strings.Split(responseHeadersAndBody, "split") {
|
||||
if i > 0 {
|
||||
n, err := hic.Write([]byte("split"))
|
||||
require.Equal(t, n, len("split"))
|
||||
require.Len(t, "split", n)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
n, err := hic.Write([]byte(chunk))
|
||||
require.Equal(t, n, len(chunk))
|
||||
require.Len(t, chunk, n)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.True(t, hic.initialized)
|
||||
@ -392,11 +392,11 @@ func TestTunnelingHandler_HeaderInterceptingConn(t *testing.T) {
|
||||
for i, chunk := range strings.Split(contentLengthHeadersAndBody, "split") {
|
||||
if i > 0 {
|
||||
n, err := hic.Write([]byte("split"))
|
||||
require.Equal(t, n, len("split"))
|
||||
require.Len(t, "split", n)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
n, err := hic.Write([]byte(chunk))
|
||||
require.Equal(t, n, len(chunk))
|
||||
require.Len(t, chunk, n)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
assert.True(t, hic.initialized, "successfully parsed http response headers")
|
||||
|
@ -78,7 +78,7 @@ func TestTruncatingEvents(t *testing.T) {
|
||||
|
||||
fb := &fake.Backend{
|
||||
OnRequest: func(events []*auditinternal.Event) {
|
||||
require.Equal(t, 1, len(events), "Expected single event in batch")
|
||||
require.Len(t, events, 1, "Expected single event in batch")
|
||||
event = events[0]
|
||||
},
|
||||
}
|
||||
|
@ -152,7 +152,7 @@ func TestOpenAPIDiskCache(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer fakeServer.HttpServer.Close()
|
||||
|
||||
require.Greater(t, len(fakeServer.ServedDocuments), 0)
|
||||
require.NotEmpty(t, fakeServer.ServedDocuments)
|
||||
|
||||
client, err := NewCachedDiscoveryClientForConfig(
|
||||
&restclient.Config{Host: fakeServer.HttpServer.URL},
|
||||
@ -175,7 +175,7 @@ func TestOpenAPIDiskCache(t *testing.T) {
|
||||
paths, err := openapiClient.Paths()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, 1, fakeServer.RequestCounters["/openapi/v3"])
|
||||
require.Greater(t, len(paths), 0)
|
||||
require.NotEmpty(t, paths)
|
||||
|
||||
contentTypes := []string{
|
||||
runtime.ContentTypeJSON, openapi.ContentTypeOpenAPIV3PB,
|
||||
|
@ -411,7 +411,7 @@ func TestOpenAPIMemCache(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer fakeServer.HttpServer.Close()
|
||||
|
||||
require.Greater(t, len(fakeServer.ServedDocuments), 0)
|
||||
require.NotEmpty(t, fakeServer.ServedDocuments)
|
||||
|
||||
client := NewMemCacheClient(
|
||||
discovery.NewDiscoveryClientForConfigOrDie(
|
||||
@ -604,7 +604,7 @@ func TestMemCacheGroupsAndMaybeResources(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
// "Unaggregated" discovery always returns nil for resources.
|
||||
assert.Nil(t, resourcesMap)
|
||||
assert.True(t, len(failedGVs) == 0, "expected empty failed GroupVersions, got (%d)", len(failedGVs))
|
||||
assert.Emptyf(t, failedGVs, "expected empty failed GroupVersions, got (%d)", len(failedGVs))
|
||||
assert.False(t, memClient.receivedAggregatedDiscovery)
|
||||
assert.True(t, memClient.Fresh())
|
||||
// Test the expected groups are returned for the aggregated format.
|
||||
|
@ -194,7 +194,7 @@ func TestReflectorWatchStoppedAfter(t *testing.T) {
|
||||
|
||||
err := target.watch(nil, stopCh, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(watchers))
|
||||
require.Len(t, watchers, 1)
|
||||
require.True(t, watchers[0].IsStopped())
|
||||
}
|
||||
|
||||
|
@ -132,7 +132,7 @@ func TestCheckListFromCacheDataConsistencyIfRequestedInternalHappyPath(t *testin
|
||||
checkListFromCacheDataConsistencyIfRequestedInternal(ctx, "", fakeLister.List, listOptions, scenario.retrievedList)
|
||||
|
||||
require.Equal(t, 1, fakeLister.counter)
|
||||
require.Equal(t, 1, len(fakeLister.requestOptions))
|
||||
require.Len(t, fakeLister.requestOptions, 1)
|
||||
require.Equal(t, fakeLister.requestOptions[0], scenario.expectedRequestOptions)
|
||||
})
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ func TestCounter(t *testing.T) {
|
||||
mfs, err := registry.Gather()
|
||||
var buf bytes.Buffer
|
||||
enc := expfmt.NewEncoder(&buf, "text/plain; version=0.0.4; charset=utf-8")
|
||||
assert.Equalf(t, test.expectedMetricCount, len(mfs), "Got %v metrics, Want: %v metrics", len(mfs), test.expectedMetricCount)
|
||||
assert.Lenf(t, mfs, test.expectedMetricCount, "Got %v metrics, Want: %v metrics", len(mfs), test.expectedMetricCount)
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
for _, metric := range mfs {
|
||||
err := enc.Encode(metric)
|
||||
@ -186,12 +186,12 @@ func TestCounterVec(t *testing.T) {
|
||||
registry.MustRegister(c)
|
||||
c.WithLabelValues("1", "2").Inc()
|
||||
mfs, err := registry.Gather()
|
||||
assert.Equalf(t, test.expectedMetricFamilyCount, len(mfs), "Got %v metric families, Want: %v metric families", len(mfs), test.expectedMetricFamilyCount)
|
||||
assert.Lenf(t, mfs, test.expectedMetricFamilyCount, "Got %v metric families, Want: %v metric families", len(mfs), test.expectedMetricFamilyCount)
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
|
||||
// this no-opts here when there are no metric families (i.e. when the metric is hidden)
|
||||
for _, mf := range mfs {
|
||||
assert.Equalf(t, 1, len(mf.GetMetric()), "Got %v metrics, wanted 1 as the count", len(mf.GetMetric()))
|
||||
assert.Lenf(t, mf.GetMetric(), 1, "Got %v metrics, wanted 1 as the count", len(mf.GetMetric()))
|
||||
assert.Equalf(t, test.expectedHelp, mf.GetHelp(), "Got %s as help message, want %s", mf.GetHelp(), test.expectedHelp)
|
||||
}
|
||||
|
||||
@ -203,7 +203,7 @@ func TestCounterVec(t *testing.T) {
|
||||
|
||||
// this no-opts here when there are no metric families (i.e. when the metric is hidden)
|
||||
for _, mf := range mfs {
|
||||
assert.Equalf(t, 3, len(mf.GetMetric()), "Got %v metrics, wanted 3 as the count", len(mf.GetMetric()))
|
||||
assert.Lenf(t, mf.GetMetric(), 3, "Got %v metrics, wanted 3 as the count", len(mf.GetMetric()))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ func TestGauge(t *testing.T) {
|
||||
registry.MustRegister(c)
|
||||
|
||||
ms, err := registry.Gather()
|
||||
assert.Equalf(t, test.expectedMetricCount, len(ms), "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Lenf(t, ms, test.expectedMetricCount, "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
|
||||
for _, metric := range ms {
|
||||
@ -175,7 +175,7 @@ func TestGaugeVec(t *testing.T) {
|
||||
registry.MustRegister(c)
|
||||
c.WithLabelValues("1", "2").Set(1.0)
|
||||
ms, err := registry.Gather()
|
||||
assert.Equalf(t, test.expectedMetricCount, len(ms), "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Lenf(t, ms, test.expectedMetricCount, "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
for _, metric := range ms {
|
||||
assert.Equalf(t, test.expectedHelp, metric.GetHelp(), "Got %s as help message, want %s", metric.GetHelp(), test.expectedHelp)
|
||||
@ -188,7 +188,7 @@ func TestGaugeVec(t *testing.T) {
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
|
||||
for _, mf := range ms {
|
||||
assert.Equalf(t, 3, len(mf.GetMetric()), "Got %v metrics, wanted 3 as the count", len(mf.GetMetric()))
|
||||
assert.Lenf(t, mf.GetMetric(), 3, "Got %v metrics, wanted 3 as the count", len(mf.GetMetric()))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -102,7 +102,7 @@ func TestHistogram(t *testing.T) {
|
||||
}
|
||||
|
||||
ms, err := registry.Gather()
|
||||
assert.Equalf(t, test.expectedMetricCount, len(ms), "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Lenf(t, ms, test.expectedMetricCount, "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
|
||||
for _, metric := range ms {
|
||||
@ -211,7 +211,7 @@ func TestHistogramVec(t *testing.T) {
|
||||
}
|
||||
|
||||
ms, err := registry.Gather()
|
||||
assert.Equalf(t, test.expectedMetricCount, len(ms), "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Lenf(t, ms, test.expectedMetricCount, "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
for _, metric := range ms {
|
||||
if metric.GetHelp() != test.expectedHelp {
|
||||
@ -226,7 +226,7 @@ func TestHistogramVec(t *testing.T) {
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
|
||||
for _, mf := range ms {
|
||||
assert.Equalf(t, 3, len(mf.GetMetric()), "Got %v metrics, wanted 3 as the count", len(mf.GetMetric()))
|
||||
assert.Lenf(t, mf.GetMetric(), 3, "Got %v metrics, wanted 3 as the count", len(mf.GetMetric()))
|
||||
for _, m := range mf.GetMetric() {
|
||||
assert.Equalf(t, uint64(1), m.GetHistogram().GetSampleCount(), "Got %v metrics, expected histogram sample count to equal 1", m.GetHistogram().GetSampleCount())
|
||||
}
|
||||
|
@ -235,7 +235,7 @@ func TestShowHiddenMetric(t *testing.T) {
|
||||
|
||||
ms, err := registry.Gather()
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
assert.Equalf(t, expectedMetricCount, len(ms), "Got %v metrics, Want: %v metrics", len(ms), expectedMetricCount)
|
||||
assert.Lenf(t, ms, expectedMetricCount, "Got %v metrics, Want: %v metrics", len(ms), expectedMetricCount)
|
||||
|
||||
showHidden.Store(true)
|
||||
defer showHidden.Store(false)
|
||||
@ -253,7 +253,7 @@ func TestShowHiddenMetric(t *testing.T) {
|
||||
|
||||
ms, err = registry.Gather()
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
assert.Equalf(t, expectedMetricCount, len(ms), "Got %v metrics, Want: %v metrics", len(ms), expectedMetricCount)
|
||||
assert.Lenf(t, ms, expectedMetricCount, "Got %v metrics, Want: %v metrics", len(ms), expectedMetricCount)
|
||||
}
|
||||
|
||||
func TestValidateShowHiddenMetricsVersion(t *testing.T) {
|
||||
|
@ -87,7 +87,7 @@ func TestSummary(t *testing.T) {
|
||||
registry.MustRegister(c)
|
||||
|
||||
ms, err := registry.Gather()
|
||||
assert.Equalf(t, test.expectedMetricCount, len(ms), "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Lenf(t, ms, test.expectedMetricCount, "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
|
||||
for _, metric := range ms {
|
||||
@ -176,7 +176,7 @@ func TestSummaryVec(t *testing.T) {
|
||||
registry.MustRegister(c)
|
||||
c.WithLabelValues("1", "2").Observe(1.0)
|
||||
ms, err := registry.Gather()
|
||||
assert.Equalf(t, test.expectedMetricCount, len(ms), "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Lenf(t, ms, test.expectedMetricCount, "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
|
||||
for _, metric := range ms {
|
||||
@ -190,7 +190,7 @@ func TestSummaryVec(t *testing.T) {
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
|
||||
for _, mf := range ms {
|
||||
assert.Equalf(t, 3, len(mf.GetMetric()), "Got %v metrics, wanted 2 as the count", len(mf.GetMetric()))
|
||||
assert.Lenf(t, mf.GetMetric(), 3, "Got %v metrics, wanted 2 as the count", len(mf.GetMetric()))
|
||||
for _, m := range mf.GetMetric() {
|
||||
assert.Equalf(t, uint64(1), m.GetSummary().GetSampleCount(), "Got %v metrics, wanted 1 as the summary sample count", m.GetSummary().GetSampleCount())
|
||||
}
|
||||
|
@ -111,7 +111,7 @@ func TestTimingHistogram(t *testing.T) {
|
||||
}
|
||||
|
||||
ms, err := registry.Gather()
|
||||
assert.Equalf(t, test.expectedMetricCount, len(ms), "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Lenf(t, ms, test.expectedMetricCount, "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
|
||||
for _, metric := range ms {
|
||||
@ -242,7 +242,7 @@ func TestTimingHistogramVec(t *testing.T) {
|
||||
}
|
||||
|
||||
ms, err := registry.Gather()
|
||||
assert.Equalf(t, test.expectedMetricCount, len(ms), "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Lenf(t, ms, test.expectedMetricCount, "Got %v metrics, Want: %v metrics", len(ms), test.expectedMetricCount)
|
||||
assert.Nil(t, err, "Gather failed %v", err)
|
||||
for _, metric := range ms {
|
||||
if metric.GetHelp() != test.expectedHelp {
|
||||
@ -264,7 +264,7 @@ func TestTimingHistogramVec(t *testing.T) {
|
||||
|
||||
for _, mf := range ms {
|
||||
t.Logf("Considering metric family %s", mf.String())
|
||||
assert.Equalf(t, 3, len(mf.GetMetric()), "Got %v metrics, wanted 3 as the count for family %#+v", len(mf.GetMetric()), mf)
|
||||
assert.Lenf(t, mf.GetMetric(), 3, "Got %v metrics, wanted 3 as the count for family %#+v", len(mf.GetMetric()), mf)
|
||||
for _, m := range mf.GetMetric() {
|
||||
expectedCount := uint64(dt1)
|
||||
expectedSum := float64(dt1) * v0
|
||||
|
@ -792,7 +792,7 @@ func TestReconcileEndpointSlicesSomePreexisting(t *testing.T) {
|
||||
reconcileHelper(t, r, &svc, pods, existingSlices, time.Now())
|
||||
|
||||
actions := client.Actions()
|
||||
assert.Equal(t, numActionsBefore+2, len(actions), "Expected 2 additional client actions as part of reconcile")
|
||||
assert.Len(t, actions, numActionsBefore+2, "Expected 2 additional client actions as part of reconcile")
|
||||
assert.True(t, actions[numActionsBefore].Matches("create", "endpointslices"), "First action should be create endpoint slice")
|
||||
assert.True(t, actions[numActionsBefore+1].Matches("update", "endpointslices"), "Second action should be update endpoint slice")
|
||||
|
||||
@ -848,7 +848,7 @@ func TestReconcileEndpointSlicesSomePreexistingWorseAllocation(t *testing.T) {
|
||||
reconcileHelper(t, r, &svc, pods, existingSlices, time.Now())
|
||||
|
||||
actions := client.Actions()
|
||||
assert.Equal(t, numActionsBefore+2, len(actions), "Expected 2 additional client actions as part of reconcile")
|
||||
assert.Len(t, actions, numActionsBefore+2, "Expected 2 additional client actions as part of reconcile")
|
||||
expectActions(t, client.Actions(), 2, "create", "endpointslices")
|
||||
|
||||
// 2 new slices (100, 52) in addition to existing slices (74, 74)
|
||||
@ -1001,14 +1001,14 @@ func TestReconcileEndpointSlicesRecycling(t *testing.T) {
|
||||
r := newReconciler(client, []*corev1.Node{{ObjectMeta: metav1.ObjectMeta{Name: "node-1"}}}, defaultMaxEndpointsPerSlice)
|
||||
reconcileHelper(t, r, &svc, pods, existingSlices, time.Now())
|
||||
// initial reconcile should be a no op, all pods are accounted for in slices, no repacking should be done
|
||||
assert.Equal(t, numActionsBefore+0, len(client.Actions()), "Expected 0 additional client actions as part of reconcile")
|
||||
assert.Len(t, client.Actions(), numActionsBefore+0, "Expected 0 additional client actions as part of reconcile")
|
||||
|
||||
// changing a service port should require all slices to be updated, time for a repack
|
||||
svc.Spec.Ports[0].TargetPort.IntVal = 81
|
||||
reconcileHelper(t, r, &svc, pods, existingSlices, time.Now())
|
||||
|
||||
// this should reflect 3 updates + 7 deletes
|
||||
assert.Equal(t, numActionsBefore+10, len(client.Actions()), "Expected 10 additional client actions as part of reconcile")
|
||||
assert.Len(t, client.Actions(), numActionsBefore+10, "Expected 10 additional client actions as part of reconcile")
|
||||
|
||||
// thanks to recycling, we get a free repack of endpoints, resulting in 3 full slices instead of 10 mostly empty slices
|
||||
expectUnorderedSlicesWithLengths(t, fetchEndpointSlices(t, client, namespace), []int{100, 100, 100})
|
||||
@ -1242,7 +1242,7 @@ func TestReconcileEndpointSlicesNamedPorts(t *testing.T) {
|
||||
reconcileHelper(t, r, &svc, pods, []*discovery.EndpointSlice{}, time.Now())
|
||||
|
||||
// reconcile should create 5 endpoint slices
|
||||
assert.Equal(t, 5, len(client.Actions()), "Expected 5 client actions as part of reconcile")
|
||||
assert.Len(t, client.Actions(), 5, "Expected 5 client actions as part of reconcile")
|
||||
expectActions(t, client.Actions(), 5, "create", "endpointslices")
|
||||
expectMetrics(t, expectedMetrics{desiredSlices: 5, actualSlices: 5, desiredEndpoints: 300, addedPerSync: 300, removedPerSync: 0, numCreated: 5, numUpdated: 0, numDeleted: 0, slicesChangedPerSync: 5})
|
||||
|
||||
@ -1338,7 +1338,7 @@ func TestReconcileEndpointSlicesMetrics(t *testing.T) {
|
||||
reconcileHelper(t, r, &svc, pods, []*discovery.EndpointSlice{}, time.Now())
|
||||
|
||||
actions := client.Actions()
|
||||
assert.Equal(t, 1, len(actions), "Expected 1 additional client actions as part of reconcile")
|
||||
assert.Len(t, actions, 1, "Expected 1 additional client actions as part of reconcile")
|
||||
assert.True(t, actions[0].Matches("create", "endpointslices"), "First action should be create endpoint slice")
|
||||
|
||||
expectMetrics(t, expectedMetrics{desiredSlices: 1, actualSlices: 1, desiredEndpoints: 20, addedPerSync: 20, removedPerSync: 0, numCreated: 1, numUpdated: 0, numDeleted: 0, slicesChangedPerSync: 1})
|
||||
@ -2254,8 +2254,8 @@ func expectUnorderedSlicesWithTopLevelAttrs(t *testing.T, endpointSlices []disco
|
||||
}
|
||||
}
|
||||
|
||||
assert.Len(t, slicesWithNoMatch, 0, "EndpointSlice(s) found without matching attributes")
|
||||
assert.Len(t, expectedSlices, 0, "Expected slices(s) not found in EndpointSlices")
|
||||
assert.Empty(t, slicesWithNoMatch, "EndpointSlice(s) found without matching attributes")
|
||||
assert.Empty(t, expectedSlices, "Expected slices(s) not found in EndpointSlices")
|
||||
}
|
||||
|
||||
func expectActions(t *testing.T, actions []k8stesting.Action, num int, verb, resource string) {
|
||||
|
@ -71,7 +71,7 @@ func TestDownloadOpenAPISpec(t *testing.T) {
|
||||
handlerTest{data: []byte(""), etag: ""})
|
||||
assert.NoError(t, err)
|
||||
if assert.NotNil(t, groups) {
|
||||
assert.Equal(t, len(groups.Paths), 1)
|
||||
assert.Len(t, groups.Paths, 1)
|
||||
if assert.Contains(t, groups.Paths, "apis/group/version") {
|
||||
assert.NotEmpty(t, groups.Paths["apis/group/version"].ServerRelativeURL)
|
||||
}
|
||||
|
@ -3432,7 +3432,7 @@ func TestApplySetDryRun(t *testing.T) {
|
||||
cmd.Run(cmd, []string{})
|
||||
})
|
||||
assert.Equal(t, "replicationcontroller/test-rc serverside-applied (server dry run)\n", outbuff.String())
|
||||
assert.Equal(t, len(serverSideData), 1, "unexpected creation")
|
||||
assert.Len(t, serverSideData, 1, "unexpected creation")
|
||||
require.Nil(t, serverSideData[pathSecret], "secret was created")
|
||||
})
|
||||
|
||||
@ -3449,7 +3449,7 @@ func TestApplySetDryRun(t *testing.T) {
|
||||
cmd.Run(cmd, []string{})
|
||||
})
|
||||
assert.Equal(t, "replicationcontroller/test-rc configured (dry run)\n", outbuff.String())
|
||||
assert.Equal(t, len(serverSideData), 1, "unexpected creation")
|
||||
assert.Len(t, serverSideData, 1, "unexpected creation")
|
||||
require.Nil(t, serverSideData[pathSecret], "secret was created")
|
||||
})
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user