mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 06:27:05 +00:00
fix: enable expected-actual rule from testifylint in module k8s.io/kubernetes
Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
This commit is contained in:
parent
a8c955ab42
commit
f736cca0e5
@ -279,7 +279,7 @@ func TestAPIVersionOfDiscoveryEndpoints(t *testing.T) {
|
||||
}
|
||||
apiVersions := metav1.APIVersions{}
|
||||
assert.NoError(decodeResponse(resp, &apiVersions))
|
||||
assert.Equal(apiVersions.APIVersion, "")
|
||||
assert.Equal("", apiVersions.APIVersion)
|
||||
|
||||
// /api/v1 exists in release-1.1
|
||||
resp, err = http.Get(server.URL + "/api/v1")
|
||||
@ -288,7 +288,7 @@ func TestAPIVersionOfDiscoveryEndpoints(t *testing.T) {
|
||||
}
|
||||
resourceList := metav1.APIResourceList{}
|
||||
assert.NoError(decodeResponse(resp, &resourceList))
|
||||
assert.Equal(resourceList.APIVersion, "")
|
||||
assert.Equal("", resourceList.APIVersion)
|
||||
|
||||
// /apis exists in release-1.1
|
||||
resp, err = http.Get(server.URL + "/apis")
|
||||
@ -297,7 +297,7 @@ func TestAPIVersionOfDiscoveryEndpoints(t *testing.T) {
|
||||
}
|
||||
groupList := metav1.APIGroupList{}
|
||||
assert.NoError(decodeResponse(resp, &groupList))
|
||||
assert.Equal(groupList.APIVersion, "")
|
||||
assert.Equal("", groupList.APIVersion)
|
||||
|
||||
// /apis/autoscaling doesn't exist in release-1.1, so the APIVersion field
|
||||
// should be non-empty in the results returned by the server.
|
||||
@ -307,7 +307,7 @@ func TestAPIVersionOfDiscoveryEndpoints(t *testing.T) {
|
||||
}
|
||||
group := metav1.APIGroup{}
|
||||
assert.NoError(decodeResponse(resp, &group))
|
||||
assert.Equal(group.APIVersion, "v1")
|
||||
assert.Equal("v1", group.APIVersion)
|
||||
|
||||
// apis/autoscaling/v1 doesn't exist in release-1.1, so the APIVersion field
|
||||
// should be non-empty in the results returned by the server.
|
||||
@ -318,7 +318,7 @@ func TestAPIVersionOfDiscoveryEndpoints(t *testing.T) {
|
||||
}
|
||||
resourceList = metav1.APIResourceList{}
|
||||
assert.NoError(decodeResponse(resp, &resourceList))
|
||||
assert.Equal(resourceList.APIVersion, "v1")
|
||||
assert.Equal("v1", resourceList.APIVersion)
|
||||
|
||||
}
|
||||
|
||||
|
@ -111,6 +111,6 @@ func TestAdmissionOptionsAddFlags(t *testing.T) {
|
||||
}
|
||||
|
||||
// using assert because cannot compare neither pointer nor function of underlying GenericAdmission
|
||||
assert.Equal(t, opts.GenericAdmission.ConfigFile, "admission_control_config.yaml")
|
||||
assert.Equal(t, opts.GenericAdmission.EnablePlugins, []string{"foo", "bar", "baz"})
|
||||
assert.Equal(t, "admission_control_config.yaml", opts.GenericAdmission.ConfigFile)
|
||||
assert.Equal(t, []string{"foo", "bar", "baz"}, opts.GenericAdmission.EnablePlugins)
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
|
||||
utilstore "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/testing/example_checkpoint_formats/v1"
|
||||
v1 "k8s.io/kubernetes/pkg/kubelet/checkpointmanager/testing/example_checkpoint_formats/v1"
|
||||
)
|
||||
|
||||
var testStore *utilstore.MemStore
|
||||
@ -199,8 +199,8 @@ func TestCheckpointManager(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
actualPortMappings, actualHostNetwork := checkpointOut.GetData()
|
||||
expPortMappings, expHostNetwork := tc.checkpoint.GetData()
|
||||
assert.Equal(t, actualPortMappings, expPortMappings)
|
||||
assert.Equal(t, actualHostNetwork, expHostNetwork)
|
||||
assert.Equal(t, expPortMappings, actualPortMappings)
|
||||
assert.Equal(t, expHostNetwork, actualHostNetwork)
|
||||
}
|
||||
// Test it fails if tried to read V1 structure into V2, a different structure from the structure which is checkpointed
|
||||
checkpointV2 := newFakeCheckpointV2("", nil, false)
|
||||
@ -229,7 +229,7 @@ func TestCheckpointManager(t *testing.T) {
|
||||
keys, err := manager.ListCheckpoints()
|
||||
assert.NoError(t, err)
|
||||
sort.Strings(keys)
|
||||
assert.Equal(t, keys, []string{"key1", "key2"})
|
||||
assert.Equal(t, []string{"key1", "key2"}, keys)
|
||||
|
||||
// Test RemoveCheckpoints
|
||||
err = manager.RemoveCheckpoint("key1")
|
||||
@ -241,7 +241,7 @@ func TestCheckpointManager(t *testing.T) {
|
||||
// Test ListCheckpoints
|
||||
keys, err = manager.ListCheckpoints()
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, keys, []string{"key2"})
|
||||
assert.Equal(t, []string{"key2"}, keys)
|
||||
|
||||
// Test Get NonExisted Checkpoint
|
||||
checkpointNE := newFakeCheckpointV1("NE", nil, false)
|
||||
|
@ -144,7 +144,7 @@ func TestRun(t *testing.T) {
|
||||
// Wait for the second callback to be issued.
|
||||
<-callbackChan
|
||||
|
||||
require.Equal(t, callbackCount, 2)
|
||||
require.Equal(t, 2, callbackCount)
|
||||
}
|
||||
|
||||
func TestAllocate(t *testing.T) {
|
||||
|
@ -905,7 +905,7 @@ func TestHasWindowsHostProcessContainer(t *testing.T) {
|
||||
pod := &v1.Pod{}
|
||||
pod.Spec = *testCase.podSpec
|
||||
result := HasWindowsHostProcessContainer(pod)
|
||||
assert.Equal(t, result, testCase.expectedResult)
|
||||
assert.Equal(t, testCase.expectedResult, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -154,7 +154,7 @@ func TestDetectImagesInitialDetect(t *testing.T) {
|
||||
_, err := manager.detectImages(ctx, zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 3)
|
||||
assert.Equal(3, manager.imageRecordsLen())
|
||||
noContainer, ok := manager.getImageRecord(imageID(0))
|
||||
require.True(t, ok)
|
||||
assert.Equal(zero, noContainer.firstDetected)
|
||||
@ -207,7 +207,7 @@ func TestDetectImagesInitialDetectWithRuntimeHandlerInImageCriAPIFeatureGate(t *
|
||||
_, err := manager.detectImages(ctx, zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 3)
|
||||
assert.Equal(3, manager.imageRecordsLen())
|
||||
noContainer, ok := manager.getImageRecordWithRuntimeHandlerInImageCriAPIFeatureGate(imageID(0), testRuntimeHandler)
|
||||
require.True(t, ok)
|
||||
assert.Equal(zero, noContainer.firstDetected)
|
||||
@ -246,7 +246,7 @@ func TestDetectImagesWithNewImage(t *testing.T) {
|
||||
_, err := manager.detectImages(ctx, zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 2)
|
||||
assert.Equal(2, manager.imageRecordsLen())
|
||||
|
||||
// Add a new image.
|
||||
fakeRuntime.ImageList = []container.Image{
|
||||
@ -259,7 +259,7 @@ func TestDetectImagesWithNewImage(t *testing.T) {
|
||||
startTime := time.Now().Add(-time.Millisecond)
|
||||
_, err = manager.detectImages(ctx, detectedTime)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 3)
|
||||
assert.Equal(3, manager.imageRecordsLen())
|
||||
noContainer, ok := manager.getImageRecord(imageID(0))
|
||||
require.True(t, ok)
|
||||
assert.Equal(zero, noContainer.firstDetected)
|
||||
@ -403,7 +403,7 @@ func TestDetectImagesContainerStopped(t *testing.T) {
|
||||
_, err := manager.detectImages(ctx, zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 2)
|
||||
assert.Equal(2, manager.imageRecordsLen())
|
||||
withContainer, ok := manager.getImageRecord(imageID(1))
|
||||
require.True(t, ok)
|
||||
|
||||
@ -411,7 +411,7 @@ func TestDetectImagesContainerStopped(t *testing.T) {
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{}
|
||||
_, err = manager.detectImages(ctx, time.Now())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 2)
|
||||
assert.Equal(2, manager.imageRecordsLen())
|
||||
container1, ok := manager.getImageRecord(imageID(0))
|
||||
require.True(t, ok)
|
||||
assert.Equal(zero, container1.firstDetected)
|
||||
@ -442,13 +442,13 @@ func TestDetectImagesWithRemovedImages(t *testing.T) {
|
||||
_, err := manager.detectImages(ctx, zero)
|
||||
assert := assert.New(t)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 2)
|
||||
assert.Equal(2, manager.imageRecordsLen())
|
||||
|
||||
// Simulate both images being removed.
|
||||
fakeRuntime.ImageList = []container.Image{}
|
||||
_, err = manager.detectImages(ctx, time.Now())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(manager.imageRecordsLen(), 0)
|
||||
assert.Equal(0, manager.imageRecordsLen())
|
||||
}
|
||||
|
||||
func TestFreeSpaceImagesInUseContainersAreIgnored(t *testing.T) {
|
||||
@ -573,7 +573,7 @@ func TestFreeSpaceRemoveByLeastRecentlyUsed(t *testing.T) {
|
||||
}
|
||||
_, err = manager.detectImages(ctx, time.Now())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, manager.imageRecordsLen(), 2)
|
||||
require.Equal(t, 2, manager.imageRecordsLen())
|
||||
|
||||
// We're setting the delete time one minute in the future, so the time the image
|
||||
// was first detected and the delete time are different.
|
||||
@ -609,7 +609,7 @@ func TestFreeSpaceTiesBrokenByDetectedTime(t *testing.T) {
|
||||
fakeRuntime.AllPodList = []*containertest.FakePod{}
|
||||
_, err = manager.detectImages(ctx, time.Now())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, manager.imageRecordsLen(), 2)
|
||||
require.Equal(t, 2, manager.imageRecordsLen())
|
||||
|
||||
assert := assert.New(t)
|
||||
getImagesAndFreeSpace(ctx, t, assert, manager, fakeRuntime, 1024, 2048, 1, time.Now())
|
||||
@ -726,7 +726,7 @@ func TestGarbageCollectImageNotOldEnough(t *testing.T) {
|
||||
t.Log(fakeClock.Now())
|
||||
_, err := manager.detectImages(ctx, fakeClock.Now())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, manager.imageRecordsLen(), 2)
|
||||
require.Equal(t, 2, manager.imageRecordsLen())
|
||||
// no space freed since one image is in used, and another one is not old enough
|
||||
assert := assert.New(t)
|
||||
getImagesAndFreeSpace(ctx, t, assert, manager, fakeRuntime, 1024, 0, 2, fakeClock.Now())
|
||||
|
@ -635,7 +635,7 @@ func TestEvalCRIPullErr(t *testing.T) {
|
||||
input: crierrors.ErrRegistryUnavailable,
|
||||
assert: func(msg string, err error) {
|
||||
assert.ErrorIs(t, err, crierrors.ErrRegistryUnavailable)
|
||||
assert.Equal(t, msg, "image pull failed for test because the registry is unavailable")
|
||||
assert.Equal(t, "image pull failed for test because the registry is unavailable", msg)
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -643,7 +643,7 @@ func TestEvalCRIPullErr(t *testing.T) {
|
||||
input: fmt.Errorf("%v: foo", crierrors.ErrRegistryUnavailable),
|
||||
assert: func(msg string, err error) {
|
||||
assert.ErrorIs(t, err, crierrors.ErrRegistryUnavailable)
|
||||
assert.Equal(t, msg, "image pull failed for test because the registry is unavailable: foo")
|
||||
assert.Equal(t, "image pull failed for test because the registry is unavailable: foo", msg)
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -651,7 +651,7 @@ func TestEvalCRIPullErr(t *testing.T) {
|
||||
input: crierrors.ErrSignatureValidationFailed,
|
||||
assert: func(msg string, err error) {
|
||||
assert.ErrorIs(t, err, crierrors.ErrSignatureValidationFailed)
|
||||
assert.Equal(t, msg, "image pull failed for test because the signature validation failed")
|
||||
assert.Equal(t, "image pull failed for test because the signature validation failed", msg)
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -659,7 +659,7 @@ func TestEvalCRIPullErr(t *testing.T) {
|
||||
input: fmt.Errorf("%w: bar", crierrors.ErrSignatureValidationFailed),
|
||||
assert: func(msg string, err error) {
|
||||
assert.ErrorIs(t, err, crierrors.ErrSignatureValidationFailed)
|
||||
assert.Equal(t, msg, "image pull failed for test because the signature validation failed: bar")
|
||||
assert.Equal(t, "image pull failed for test because the signature validation failed: bar", msg)
|
||||
},
|
||||
},
|
||||
} {
|
||||
|
@ -323,7 +323,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
actions := kubeClient.Actions()
|
||||
require.Len(t, actions, 2)
|
||||
require.True(t, actions[1].Matches("patch", "nodes"))
|
||||
require.Equal(t, actions[1].GetSubresource(), "status")
|
||||
require.Equal(t, "status", actions[1].GetSubresource())
|
||||
|
||||
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
|
||||
assert.NoError(t, err)
|
||||
@ -719,7 +719,7 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
actions := kubeClient.Actions()
|
||||
require.Len(t, actions, 2)
|
||||
require.True(t, actions[1].Matches("patch", "nodes"))
|
||||
require.Equal(t, actions[1].GetSubresource(), "status")
|
||||
require.Equal(t, "status", actions[1].GetSubresource())
|
||||
|
||||
updatedNode, err := kubeClient.CoreV1().Nodes().Get(ctx, testKubeletHostname, metav1.GetOptions{})
|
||||
require.NoError(t, err, "can't apply node status patch")
|
||||
@ -1642,7 +1642,7 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
|
||||
actions := kubeClient.Actions()
|
||||
require.Len(t, actions, 1)
|
||||
require.True(t, actions[0].Matches("patch", "nodes"))
|
||||
require.Equal(t, actions[0].GetSubresource(), "status")
|
||||
require.Equal(t, "status", actions[0].GetSubresource())
|
||||
|
||||
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[0].(core.PatchActionImpl).GetPatch())
|
||||
assert.NoError(t, err)
|
||||
|
@ -6115,7 +6115,7 @@ func TestGetNonExistentImagePullSecret(t *testing.T) {
|
||||
|
||||
assert.Len(t, fakeRecorder.Events, 1)
|
||||
event := <-fakeRecorder.Events
|
||||
assert.Equal(t, event, expectedEvent)
|
||||
assert.Equal(t, expectedEvent, event)
|
||||
}
|
||||
|
||||
func TestParseGetSubIdsOutput(t *testing.T) {
|
||||
|
@ -44,7 +44,7 @@ func TestPullImage(t *testing.T) {
|
||||
images, err := fakeManager.ListImages(ctx)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, images, 1)
|
||||
assert.Equal(t, images[0].RepoTags, []string{"busybox"})
|
||||
assert.Equal(t, []string{"busybox"}, images[0].RepoTags)
|
||||
}
|
||||
|
||||
func TestPullImageWithError(t *testing.T) {
|
||||
|
@ -91,7 +91,7 @@ func TestCreatePodSandbox(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, sandboxes, 1)
|
||||
assert.Equal(t, sandboxes[0].Id, fmt.Sprintf("%s_%s_%s_1", pod.Name, pod.Namespace, pod.UID))
|
||||
assert.Equal(t, sandboxes[0].State, runtimeapi.PodSandboxState_SANDBOX_READY)
|
||||
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_READY, sandboxes[0].State)
|
||||
}
|
||||
|
||||
func TestGeneratePodSandboxLinuxConfigSeccomp(t *testing.T) {
|
||||
|
@ -32,8 +32,8 @@ func TestGetStat(t *testing.T) {
|
||||
fi, err := getStat(event)
|
||||
fiExpected, errExpected := os.Stat(event.Name)
|
||||
|
||||
assert.Equal(t, fi, fiExpected)
|
||||
assert.Equal(t, err, errExpected)
|
||||
assert.Equal(t, fiExpected, fi)
|
||||
assert.Equal(t, errExpected, err)
|
||||
}
|
||||
|
||||
func TestGetSocketPath(t *testing.T) {
|
||||
|
@ -918,9 +918,9 @@ func TestCheckpointContainer(t *testing.T) {
|
||||
t.Errorf("Got error POSTing: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
assert.Equal(t, resp.StatusCode, 500)
|
||||
assert.Equal(t, 500, resp.StatusCode)
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
assert.Equal(t, string(body), "checkpointing of other/foo/checkpointingFailure failed (Returning error for test)")
|
||||
assert.Equal(t, "checkpointing of other/foo/checkpointingFailure failed (Returning error for test)", string(body))
|
||||
})
|
||||
// Now test a successful checkpoint succeeds
|
||||
setPodByNameFunc(fw, podNamespace, podName, expectedContainerName)
|
||||
@ -929,7 +929,7 @@ func TestCheckpointContainer(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Got error POSTing: %v", err)
|
||||
}
|
||||
assert.Equal(t, resp.StatusCode, 200)
|
||||
assert.Equal(t, 200, resp.StatusCode)
|
||||
})
|
||||
|
||||
// Now test for 404 if checkpointing support is explicitly disabled.
|
||||
|
@ -93,14 +93,14 @@ func TestSummaryProviderGetStatsNoSplitFileSystem(t *testing.T) {
|
||||
summary, err := provider.Get(ctx, true)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(summary.Node.NodeName, "test-node")
|
||||
assert.Equal("test-node", summary.Node.NodeName)
|
||||
assert.Equal(summary.Node.StartTime, systemBootTime)
|
||||
assert.Equal(summary.Node.CPU, cgroupStatsMap["/"].cs.CPU)
|
||||
assert.Equal(summary.Node.Memory, cgroupStatsMap["/"].cs.Memory)
|
||||
assert.Equal(summary.Node.Swap, cgroupStatsMap["/"].cs.Swap)
|
||||
assert.Equal(summary.Node.Network, cgroupStatsMap["/"].ns)
|
||||
assert.Equal(summary.Node.Fs, rootFsStats)
|
||||
assert.Equal(summary.Node.Runtime, &statsapi.RuntimeStats{ContainerFs: imageFsStats, ImageFs: imageFsStats})
|
||||
assert.Equal(&statsapi.RuntimeStats{ContainerFs: imageFsStats, ImageFs: imageFsStats}, summary.Node.Runtime)
|
||||
|
||||
assert.Len(summary.Node.SystemContainers, 4)
|
||||
assert.Contains(summary.Node.SystemContainers, statsapi.ContainerStats{
|
||||
@ -189,7 +189,7 @@ func TestSummaryProviderGetStatsSplitImageFs(t *testing.T) {
|
||||
summary, err := provider.Get(ctx, true)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(summary.Node.NodeName, "test-node")
|
||||
assert.Equal("test-node", summary.Node.NodeName)
|
||||
assert.Equal(summary.Node.StartTime, systemBootTime)
|
||||
assert.Equal(summary.Node.CPU, cgroupStatsMap["/"].cs.CPU)
|
||||
assert.Equal(summary.Node.Memory, cgroupStatsMap["/"].cs.Memory)
|
||||
@ -197,7 +197,7 @@ func TestSummaryProviderGetStatsSplitImageFs(t *testing.T) {
|
||||
assert.Equal(summary.Node.Network, cgroupStatsMap["/"].ns)
|
||||
assert.Equal(summary.Node.Fs, rootFsStats)
|
||||
// Since we are a split filesystem we want root filesystem to be container fs and image to be image filesystem
|
||||
assert.Equal(summary.Node.Runtime, &statsapi.RuntimeStats{ContainerFs: rootFsStats, ImageFs: imageFsStats})
|
||||
assert.Equal(&statsapi.RuntimeStats{ContainerFs: rootFsStats, ImageFs: imageFsStats}, summary.Node.Runtime)
|
||||
|
||||
assert.Len(summary.Node.SystemContainers, 4)
|
||||
assert.Contains(summary.Node.SystemContainers, statsapi.ContainerStats{
|
||||
@ -276,7 +276,7 @@ func TestSummaryProviderGetCPUAndMemoryStats(t *testing.T) {
|
||||
summary, err := provider.GetCPUAndMemoryStats(ctx)
|
||||
assert.NoError(err)
|
||||
|
||||
assert.Equal(summary.Node.NodeName, "test-node")
|
||||
assert.Equal("test-node", summary.Node.NodeName)
|
||||
assert.Equal(summary.Node.StartTime, cgroupStatsMap["/"].cs.StartTime)
|
||||
assert.Equal(summary.Node.CPU, cgroupStatsMap["/"].cs.CPU)
|
||||
assert.Equal(summary.Node.Memory, cgroupStatsMap["/"].cs.Memory)
|
||||
|
@ -615,7 +615,7 @@ func TestStaticPod(t *testing.T) {
|
||||
assert.True(t, isPodStatusByKubeletEqual(&status, &retrievedStatus), "Expected: %+v, Got: %+v", status, retrievedStatus)
|
||||
|
||||
t.Logf("Should sync pod because the corresponding mirror pod is created")
|
||||
assert.Equal(t, m.syncBatch(true), 1)
|
||||
assert.Equal(t, 1, m.syncBatch(true))
|
||||
verifyActions(t, m, []core.Action{getAction(), patchAction()})
|
||||
|
||||
t.Logf("syncBatch should not sync any pods because nothing is changed.")
|
||||
@ -629,7 +629,7 @@ func TestStaticPod(t *testing.T) {
|
||||
m.podManager.(mutablePodManager).AddPod(mirrorPod)
|
||||
|
||||
t.Logf("Should not update to mirror pod, because UID has changed.")
|
||||
assert.Equal(t, m.syncBatch(true), 1)
|
||||
assert.Equal(t, 1, m.syncBatch(true))
|
||||
verifyActions(t, m, []core.Action{getAction()})
|
||||
}
|
||||
|
||||
|
@ -92,7 +92,7 @@ func testStore(t *testing.T, store Store) {
|
||||
keys, err := store.List()
|
||||
assert.NoError(t, err)
|
||||
sort.Strings(keys)
|
||||
assert.Equal(t, keys, []string{"id1", "id2"})
|
||||
assert.Equal(t, []string{"id1", "id2"}, keys)
|
||||
|
||||
// Test Delete data
|
||||
for _, c := range testCases {
|
||||
|
@ -2397,12 +2397,12 @@ func TestReconcileWithUpdateReconstructedFromAPIServer(t *testing.T) {
|
||||
if vol.VolumeName == volumeName1 {
|
||||
// devicePath + attachability must have been updated from node.status
|
||||
assert.True(t, vol.PluginIsAttachable)
|
||||
assert.Equal(t, vol.DevicePath, "fake/path")
|
||||
assert.Equal(t, "fake/path", vol.DevicePath)
|
||||
}
|
||||
if vol.VolumeName == volumeName2 {
|
||||
// only attachability was updated from node.status
|
||||
assert.False(t, vol.PluginIsAttachable)
|
||||
assert.Equal(t, vol.DevicePath, "/dev/reconstructed")
|
||||
assert.Equal(t, "/dev/reconstructed", vol.DevicePath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4415,9 +4415,9 @@ func TestEndpointSliceE2E(t *testing.T) {
|
||||
realServers1, rsErr1 := ipvs.GetRealServers(virtualServers1[0])
|
||||
assert.NoError(t, rsErr1, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers1, 3, "Expected 3 real servers")
|
||||
assert.Equal(t, realServers1[0].String(), "10.0.1.1:80")
|
||||
assert.Equal(t, realServers1[1].String(), "10.0.1.2:80")
|
||||
assert.Equal(t, realServers1[2].String(), "10.0.1.3:80")
|
||||
assert.Equal(t, "10.0.1.1:80", realServers1[0].String())
|
||||
assert.Equal(t, "10.0.1.2:80", realServers1[1].String())
|
||||
assert.Equal(t, "10.0.1.3:80", realServers1[2].String())
|
||||
|
||||
fp.OnEndpointSliceDelete(endpointSlice)
|
||||
fp.syncProxyRules()
|
||||
@ -4963,17 +4963,17 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) {
|
||||
realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer)
|
||||
assert.NoError(t, rsErr1, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers1, 3, "Expected 3 real servers")
|
||||
assert.Equal(t, realServers1[0].String(), "10.0.1.1:80")
|
||||
assert.Equal(t, realServers1[1].String(), "10.0.1.2:80")
|
||||
assert.Equal(t, realServers1[2].String(), "10.0.1.5:80")
|
||||
assert.Equal(t, "10.0.1.1:80", realServers1[0].String())
|
||||
assert.Equal(t, "10.0.1.2:80", realServers1[1].String())
|
||||
assert.Equal(t, "10.0.1.5:80", realServers1[2].String())
|
||||
|
||||
// externalIP should route to cluster-wide ready endpoints
|
||||
realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer)
|
||||
assert.NoError(t, rsErr2, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers2, 3, "Expected 3 real servers")
|
||||
assert.Equal(t, realServers2[0].String(), "10.0.1.1:80")
|
||||
assert.Equal(t, realServers2[1].String(), "10.0.1.2:80")
|
||||
assert.Equal(t, realServers1[2].String(), "10.0.1.5:80")
|
||||
assert.Equal(t, "10.0.1.1:80", realServers2[0].String())
|
||||
assert.Equal(t, "10.0.1.2:80", realServers2[1].String())
|
||||
assert.Equal(t, "10.0.1.5:80", realServers1[2].String())
|
||||
|
||||
fp.OnEndpointSliceDelete(endpointSlice)
|
||||
fp.syncProxyRules()
|
||||
@ -5136,16 +5136,16 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) {
|
||||
realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer)
|
||||
assert.NoError(t, rsErr1, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers1, 3, "Expected 3 real servers")
|
||||
assert.Equal(t, realServers1[0].String(), "10.0.1.1:80")
|
||||
assert.Equal(t, realServers1[1].String(), "10.0.1.2:80")
|
||||
assert.Equal(t, realServers1[2].String(), "10.0.1.5:80")
|
||||
assert.Equal(t, "10.0.1.1:80", realServers1[0].String())
|
||||
assert.Equal(t, "10.0.1.2:80", realServers1[1].String())
|
||||
assert.Equal(t, "10.0.1.5:80", realServers1[2].String())
|
||||
|
||||
// externalIP should route to local ready + non-terminating endpoints if they exist
|
||||
realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer)
|
||||
assert.NoError(t, rsErr2, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers2, 2, "Expected 2 real servers")
|
||||
assert.Equal(t, realServers2[0].String(), "10.0.1.1:80")
|
||||
assert.Equal(t, realServers2[1].String(), "10.0.1.2:80")
|
||||
assert.Equal(t, "10.0.1.1:80", realServers2[0].String())
|
||||
assert.Equal(t, "10.0.1.2:80", realServers2[1].String())
|
||||
|
||||
fp.OnEndpointSliceDelete(endpointSlice)
|
||||
fp.syncProxyRules()
|
||||
@ -5307,17 +5307,17 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) {
|
||||
realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer)
|
||||
assert.NoError(t, rsErr1, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers1, 3, "Expected 1 real servers")
|
||||
assert.Equal(t, realServers1[0].String(), "10.0.1.1:80")
|
||||
assert.Equal(t, realServers1[1].String(), "10.0.1.2:80")
|
||||
assert.Equal(t, realServers1[2].String(), "10.0.1.4:80")
|
||||
assert.Equal(t, "10.0.1.1:80", realServers1[0].String())
|
||||
assert.Equal(t, "10.0.1.2:80", realServers1[1].String())
|
||||
assert.Equal(t, "10.0.1.4:80", realServers1[2].String())
|
||||
|
||||
// externalIP should fall back to ready + terminating endpoints
|
||||
realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer)
|
||||
assert.NoError(t, rsErr2, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers2, 3, "Expected 2 real servers")
|
||||
assert.Equal(t, realServers2[0].String(), "10.0.1.1:80")
|
||||
assert.Equal(t, realServers2[1].String(), "10.0.1.2:80")
|
||||
assert.Equal(t, realServers2[2].String(), "10.0.1.4:80")
|
||||
assert.Equal(t, "10.0.1.1:80", realServers2[0].String())
|
||||
assert.Equal(t, "10.0.1.2:80", realServers2[1].String())
|
||||
assert.Equal(t, "10.0.1.4:80", realServers2[2].String())
|
||||
|
||||
fp.OnEndpointSliceDelete(endpointSlice)
|
||||
fp.syncProxyRules()
|
||||
@ -5479,14 +5479,14 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) {
|
||||
realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer)
|
||||
assert.NoError(t, rsErr1, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers1, 1, "Expected 1 real servers")
|
||||
assert.Equal(t, realServers1[0].String(), "10.0.1.5:80")
|
||||
assert.Equal(t, "10.0.1.5:80", realServers1[0].String())
|
||||
|
||||
// externalIP should fall back to local ready + terminating endpoints
|
||||
realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer)
|
||||
assert.NoError(t, rsErr2, "Expected no error getting real servers")
|
||||
assert.Len(t, realServers2, 2, "Expected 2 real servers")
|
||||
assert.Equal(t, realServers2[0].String(), "10.0.1.1:80")
|
||||
assert.Equal(t, realServers2[1].String(), "10.0.1.2:80")
|
||||
assert.Equal(t, "10.0.1.1:80", realServers2[0].String())
|
||||
assert.Equal(t, "10.0.1.2:80", realServers2[1].String())
|
||||
|
||||
fp.OnEndpointSliceDelete(endpointSlice)
|
||||
fp.syncProxyRules()
|
||||
|
@ -4879,17 +4879,17 @@ func TestProxier_OnServiceCIDRsChanged(t *testing.T) {
|
||||
|
||||
proxier = &Proxier{ipFamily: v1.IPv4Protocol}
|
||||
proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "fd00:10:96::/112"})
|
||||
assert.Equal(t, proxier.serviceCIDRs, "172.30.0.0/16")
|
||||
assert.Equal(t, "172.30.0.0/16", proxier.serviceCIDRs)
|
||||
|
||||
proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "172.50.0.0/16", "fd00:10:96::/112", "fd00:172:30::/112"})
|
||||
assert.Equal(t, proxier.serviceCIDRs, "172.30.0.0/16,172.50.0.0/16")
|
||||
assert.Equal(t, "172.30.0.0/16,172.50.0.0/16", proxier.serviceCIDRs)
|
||||
|
||||
proxier = &Proxier{ipFamily: v1.IPv6Protocol}
|
||||
proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "fd00:10:96::/112"})
|
||||
assert.Equal(t, proxier.serviceCIDRs, "fd00:10:96::/112")
|
||||
assert.Equal(t, "fd00:10:96::/112", proxier.serviceCIDRs)
|
||||
|
||||
proxier.OnServiceCIDRsChanged([]string{"172.30.0.0/16", "172.50.0.0/16", "fd00:10:96::/112", "fd00:172:30::/112"})
|
||||
assert.Equal(t, proxier.serviceCIDRs, "fd00:10:96::/112,fd00:172:30::/112")
|
||||
assert.Equal(t, "fd00:10:96::/112,fd00:172:30::/112", proxier.serviceCIDRs)
|
||||
}
|
||||
|
||||
// TestBadIPs tests that "bad" IPs and CIDRs in Services/Endpoints are rewritten to
|
||||
|
@ -176,7 +176,7 @@ func TestOperationGenerator_GenerateExpandAndRecoverVolumeFunc(t *testing.T) {
|
||||
}
|
||||
updatedPVC := expansionResponse.pvc
|
||||
actualResizeStatus := updatedPVC.Status.AllocatedResourceStatuses[v1.ResourceStorage]
|
||||
assert.Equal(t, actualResizeStatus, test.expectedResizeStatus)
|
||||
assert.Equal(t, test.expectedResizeStatus, actualResizeStatus)
|
||||
actualAllocatedSize := updatedPVC.Status.AllocatedResources.Storage()
|
||||
if test.expectedAllocatedSize.Cmp(*actualAllocatedSize) != 0 {
|
||||
t.Fatalf("GenerateExpandAndRecoverVolumeFunc failed: expected allocated size %s, got %s", test.expectedAllocatedSize.String(), actualAllocatedSize.String())
|
||||
|
Loading…
Reference in New Issue
Block a user