Merge pull request #127552 from mmorel-35/testifylint/nil-compare@k8s.io/kubernetes

fix: enable nil-compare and error-nil rules from testifylint in module `k8s.io/kubernetes`
This commit is contained in:
Kubernetes Prow Robot 2024-09-25 19:58:00 +01:00 committed by GitHub
commit f976be809e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
13 changed files with 100 additions and 99 deletions

View File

@ -348,7 +348,7 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
testManager, err := newManagerImpl(socketName, nil, topologyStore) testManager, err := newManagerImpl(socketName, nil, topologyStore)
as := assert.New(t) as := assert.New(t)
as.NotNil(testManager) as.NotNil(testManager)
as.Nil(err) as.NoError(err)
devs := []pluginapi.Device{ devs := []pluginapi.Device{
{ID: "Device1", Health: pluginapi.Healthy}, {ID: "Device1", Health: pluginapi.Healthy},
@ -464,11 +464,11 @@ func TestUpdateCapacityAllocatable(t *testing.T) {
// during the time of propagating capacity change to the scheduler will be // during the time of propagating capacity change to the scheduler will be
// properly rejected instead of being incorrectly started. // properly rejected instead of being incorrectly started.
err = testManager.writeCheckpoint() err = testManager.writeCheckpoint()
as.Nil(err) as.NoError(err)
testManager.healthyDevices = make(map[string]sets.Set[string]) testManager.healthyDevices = make(map[string]sets.Set[string])
testManager.unhealthyDevices = make(map[string]sets.Set[string]) testManager.unhealthyDevices = make(map[string]sets.Set[string])
err = testManager.readCheckpoint() err = testManager.readCheckpoint()
as.Nil(err) as.NoError(err)
as.Len(testManager.endpoints, 1) as.Len(testManager.endpoints, 1)
as.Contains(testManager.endpoints, resourceName2) as.Contains(testManager.endpoints, resourceName2)
capacity, allocatable, removed = testManager.GetCapacity() capacity, allocatable, removed = testManager.GetCapacity()
@ -490,7 +490,7 @@ func TestGetAllocatableDevicesMultipleResources(t *testing.T) {
testManager, err := newManagerImpl(socketName, nil, topologyStore) testManager, err := newManagerImpl(socketName, nil, topologyStore)
as := assert.New(t) as := assert.New(t)
as.NotNil(testManager) as.NotNil(testManager)
as.Nil(err) as.NoError(err)
resource1Devs := []pluginapi.Device{ resource1Devs := []pluginapi.Device{
{ID: "R1Device1", Health: pluginapi.Healthy}, {ID: "R1Device1", Health: pluginapi.Healthy},
@ -531,7 +531,7 @@ func TestGetAllocatableDevicesHealthTransition(t *testing.T) {
testManager, err := newManagerImpl(socketName, nil, topologyStore) testManager, err := newManagerImpl(socketName, nil, topologyStore)
as := assert.New(t) as := assert.New(t)
as.NotNil(testManager) as.NotNil(testManager)
as.Nil(err) as.NoError(err)
resource1Devs := []pluginapi.Device{ resource1Devs := []pluginapi.Device{
{ID: "R1Device1", Health: pluginapi.Healthy}, {ID: "R1Device1", Health: pluginapi.Healthy},
@ -673,10 +673,10 @@ func TestCheckpoint(t *testing.T) {
resourceName3 := "domain2.com/resource3" resourceName3 := "domain2.com/resource3"
as := assert.New(t) as := assert.New(t)
tmpDir, err := os.MkdirTemp("", "checkpoint") tmpDir, err := os.MkdirTemp("", "checkpoint")
as.Nil(err) as.NoError(err)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
ckm, err := checkpointmanager.NewCheckpointManager(tmpDir) ckm, err := checkpointmanager.NewCheckpointManager(tmpDir)
as.Nil(err) as.NoError(err)
testManager := &ManagerImpl{ testManager := &ManagerImpl{
endpoints: make(map[string]endpointInfo), endpoints: make(map[string]endpointInfo),
healthyDevices: make(map[string]sets.Set[string]), healthyDevices: make(map[string]sets.Set[string]),
@ -742,10 +742,10 @@ func TestCheckpoint(t *testing.T) {
err = testManager.writeCheckpoint() err = testManager.writeCheckpoint()
as.Nil(err) as.NoError(err)
testManager.podDevices = newPodDevices() testManager.podDevices = newPodDevices()
err = testManager.readCheckpoint() err = testManager.readCheckpoint()
as.Nil(err) as.NoError(err)
as.Equal(expectedPodDevices.size(), testManager.podDevices.size()) as.Equal(expectedPodDevices.size(), testManager.podDevices.size())
for podUID, containerDevices := range expectedPodDevices.devs { for podUID, containerDevices := range expectedPodDevices.devs {
@ -1007,10 +1007,10 @@ func TestPodContainerDeviceAllocation(t *testing.T) {
activePods: []*v1.Pod{}, activePods: []*v1.Pod{},
} }
tmpDir, err := os.MkdirTemp("", "checkpoint") tmpDir, err := os.MkdirTemp("", "checkpoint")
as.Nil(err) as.NoError(err)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources) testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources)
as.Nil(err) as.NoError(err)
testPods := []*v1.Pod{ testPods := []*v1.Pod{
makePod(v1.ResourceList{ makePod(v1.ResourceList{
@ -1067,7 +1067,7 @@ func TestPodContainerDeviceAllocation(t *testing.T) {
} }
runContainerOpts, err := testManager.GetDeviceRunContainerOptions(pod, &pod.Spec.Containers[0]) runContainerOpts, err := testManager.GetDeviceRunContainerOptions(pod, &pod.Spec.Containers[0])
if testCase.expErr == nil { if testCase.expErr == nil {
as.Nil(err) as.NoError(err)
} }
if testCase.expectedContainerOptsLen == nil { if testCase.expectedContainerOptsLen == nil {
as.Nil(runContainerOpts) as.Nil(runContainerOpts)
@ -1088,7 +1088,7 @@ func TestPodContainerDeviceToAllocate(t *testing.T) {
resourceName3 := "domain2.com/resource3" resourceName3 := "domain2.com/resource3"
as := require.New(t) as := require.New(t)
tmpDir, err := os.MkdirTemp("", "checkpoint") tmpDir, err := os.MkdirTemp("", "checkpoint")
as.Nil(err) as.NoError(err)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
testManager := &ManagerImpl{ testManager := &ManagerImpl{
@ -1273,11 +1273,11 @@ func TestGetDeviceRunContainerOptions(t *testing.T) {
as := require.New(t) as := require.New(t)
tmpDir, err := os.MkdirTemp("", "checkpoint") tmpDir, err := os.MkdirTemp("", "checkpoint")
as.Nil(err) as.NoError(err)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources) testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources)
as.Nil(err) as.NoError(err)
pod1 := makePod(v1.ResourceList{ pod1 := makePod(v1.ResourceList{
v1.ResourceName(res1.resourceName): res1.resourceQuantity, v1.ResourceName(res1.resourceName): res1.resourceQuantity,
@ -1291,13 +1291,13 @@ func TestGetDeviceRunContainerOptions(t *testing.T) {
podsStub.updateActivePods(activePods) podsStub.updateActivePods(activePods)
err = testManager.Allocate(pod1, &pod1.Spec.Containers[0]) err = testManager.Allocate(pod1, &pod1.Spec.Containers[0])
as.Nil(err) as.NoError(err)
err = testManager.Allocate(pod2, &pod2.Spec.Containers[0]) err = testManager.Allocate(pod2, &pod2.Spec.Containers[0])
as.Nil(err) as.NoError(err)
// when pod is in activePods, GetDeviceRunContainerOptions should return // when pod is in activePods, GetDeviceRunContainerOptions should return
runContainerOpts, err := testManager.GetDeviceRunContainerOptions(pod1, &pod1.Spec.Containers[0]) runContainerOpts, err := testManager.GetDeviceRunContainerOptions(pod1, &pod1.Spec.Containers[0])
as.Nil(err) as.NoError(err)
as.Len(runContainerOpts.Devices, 3) as.Len(runContainerOpts.Devices, 3)
as.Len(runContainerOpts.Mounts, 2) as.Len(runContainerOpts.Mounts, 2)
as.Len(runContainerOpts.Envs, 2) as.Len(runContainerOpts.Envs, 2)
@ -1308,7 +1308,7 @@ func TestGetDeviceRunContainerOptions(t *testing.T) {
// when pod is removed from activePods,G etDeviceRunContainerOptions should return error // when pod is removed from activePods,G etDeviceRunContainerOptions should return error
runContainerOpts, err = testManager.GetDeviceRunContainerOptions(pod1, &pod1.Spec.Containers[0]) runContainerOpts, err = testManager.GetDeviceRunContainerOptions(pod1, &pod1.Spec.Containers[0])
as.Nil(err) as.NoError(err)
as.Nil(runContainerOpts) as.Nil(runContainerOpts)
} }
@ -1335,11 +1335,11 @@ func TestInitContainerDeviceAllocation(t *testing.T) {
activePods: []*v1.Pod{}, activePods: []*v1.Pod{},
} }
tmpDir, err := os.MkdirTemp("", "checkpoint") tmpDir, err := os.MkdirTemp("", "checkpoint")
as.Nil(err) as.NoError(err)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources) testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources)
as.Nil(err) as.NoError(err)
podWithPluginResourcesInInitContainers := &v1.Pod{ podWithPluginResourcesInInitContainers := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@ -1393,7 +1393,7 @@ func TestInitContainerDeviceAllocation(t *testing.T) {
for _, container := range podWithPluginResourcesInInitContainers.Spec.Containers { for _, container := range podWithPluginResourcesInInitContainers.Spec.Containers {
err = testManager.Allocate(podWithPluginResourcesInInitContainers, &container) err = testManager.Allocate(podWithPluginResourcesInInitContainers, &container)
} }
as.Nil(err) as.NoError(err)
podUID := string(podWithPluginResourcesInInitContainers.UID) podUID := string(podWithPluginResourcesInInitContainers.UID)
initCont1 := podWithPluginResourcesInInitContainers.Spec.InitContainers[0].Name initCont1 := podWithPluginResourcesInInitContainers.Spec.InitContainers[0].Name
initCont2 := podWithPluginResourcesInInitContainers.Spec.InitContainers[1].Name initCont2 := podWithPluginResourcesInInitContainers.Spec.InitContainers[1].Name
@ -1436,11 +1436,11 @@ func TestRestartableInitContainerDeviceAllocation(t *testing.T) {
activePods: []*v1.Pod{}, activePods: []*v1.Pod{},
} }
tmpDir, err := os.MkdirTemp("", "checkpoint") tmpDir, err := os.MkdirTemp("", "checkpoint")
as.Nil(err) as.NoError(err)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources) testManager, err := getTestManager(tmpDir, podsStub.getActivePods, testResources)
as.Nil(err) as.NoError(err)
containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways containerRestartPolicyAlways := v1.ContainerRestartPolicyAlways
podWithPluginResourcesInRestartableInitContainers := &v1.Pod{ podWithPluginResourcesInRestartableInitContainers := &v1.Pod{
@ -1503,7 +1503,7 @@ func TestRestartableInitContainerDeviceAllocation(t *testing.T) {
for _, container := range podWithPluginResourcesInRestartableInitContainers.Spec.Containers { for _, container := range podWithPluginResourcesInRestartableInitContainers.Spec.Containers {
err = testManager.Allocate(podWithPluginResourcesInRestartableInitContainers, &container) err = testManager.Allocate(podWithPluginResourcesInRestartableInitContainers, &container)
} }
as.Nil(err) as.NoError(err)
podUID := string(podWithPluginResourcesInRestartableInitContainers.UID) podUID := string(podWithPluginResourcesInRestartableInitContainers.UID)
regularInitCont1 := podWithPluginResourcesInRestartableInitContainers.Spec.InitContainers[0].Name regularInitCont1 := podWithPluginResourcesInRestartableInitContainers.Spec.InitContainers[0].Name
restartableInitCont2 := podWithPluginResourcesInRestartableInitContainers.Spec.InitContainers[1].Name restartableInitCont2 := podWithPluginResourcesInRestartableInitContainers.Spec.InitContainers[1].Name
@ -1553,11 +1553,11 @@ func TestUpdatePluginResources(t *testing.T) {
as := assert.New(t) as := assert.New(t)
monitorCallback := func(resourceName string, devices []pluginapi.Device) {} monitorCallback := func(resourceName string, devices []pluginapi.Device) {}
tmpDir, err := os.MkdirTemp("", "checkpoint") tmpDir, err := os.MkdirTemp("", "checkpoint")
as.Nil(err) as.NoError(err)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
ckm, err := checkpointmanager.NewCheckpointManager(tmpDir) ckm, err := checkpointmanager.NewCheckpointManager(tmpDir)
as.Nil(err) as.NoError(err)
m := &ManagerImpl{ m := &ManagerImpl{
allocatedDevices: make(map[string]sets.Set[string]), allocatedDevices: make(map[string]sets.Set[string]),
healthyDevices: make(map[string]sets.Set[string]), healthyDevices: make(map[string]sets.Set[string]),
@ -1611,11 +1611,11 @@ func TestDevicePreStartContainer(t *testing.T) {
activePods: []*v1.Pod{}, activePods: []*v1.Pod{},
} }
tmpDir, err := os.MkdirTemp("", "checkpoint") tmpDir, err := os.MkdirTemp("", "checkpoint")
as.Nil(err) as.NoError(err)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
testManager, err := getTestManager(tmpDir, podsStub.getActivePods, []TestResource{res1}) testManager, err := getTestManager(tmpDir, podsStub.getActivePods, []TestResource{res1})
as.Nil(err) as.NoError(err)
ch := make(chan []string, 1) ch := make(chan []string, 1)
testManager.endpoints[res1.resourceName] = endpointInfo{ testManager.endpoints[res1.resourceName] = endpointInfo{
@ -1631,9 +1631,9 @@ func TestDevicePreStartContainer(t *testing.T) {
activePods = append(activePods, pod) activePods = append(activePods, pod)
podsStub.updateActivePods(activePods) podsStub.updateActivePods(activePods)
err = testManager.Allocate(pod, &pod.Spec.Containers[0]) err = testManager.Allocate(pod, &pod.Spec.Containers[0])
as.Nil(err) as.NoError(err)
runContainerOpts, err := testManager.GetDeviceRunContainerOptions(pod, &pod.Spec.Containers[0]) runContainerOpts, err := testManager.GetDeviceRunContainerOptions(pod, &pod.Spec.Containers[0])
as.Nil(err) as.NoError(err)
var initializedDevs []string var initializedDevs []string
select { select {
case <-time.After(time.Second): case <-time.After(time.Second):
@ -1647,7 +1647,7 @@ func TestDevicePreStartContainer(t *testing.T) {
as.Equal(len(initializedDevs), res1.devs.Devices().Len()) as.Equal(len(initializedDevs), res1.devs.Devices().Len())
expectedResps, err := allocateStubFunc()([]string{"dev1", "dev2"}) expectedResps, err := allocateStubFunc()([]string{"dev1", "dev2"})
as.Nil(err) as.NoError(err)
as.Len(expectedResps.ContainerResponses, 1) as.Len(expectedResps.ContainerResponses, 1)
expectedResp := expectedResps.ContainerResponses[0] expectedResp := expectedResps.ContainerResponses[0]
as.Equal(len(runContainerOpts.Devices), len(expectedResp.Devices)) as.Equal(len(runContainerOpts.Devices), len(expectedResp.Devices))
@ -1659,9 +1659,9 @@ func TestDevicePreStartContainer(t *testing.T) {
activePods = append(activePods, pod2) activePods = append(activePods, pod2)
podsStub.updateActivePods(activePods) podsStub.updateActivePods(activePods)
err = testManager.Allocate(pod2, &pod2.Spec.Containers[0]) err = testManager.Allocate(pod2, &pod2.Spec.Containers[0])
as.Nil(err) as.NoError(err)
_, err = testManager.GetDeviceRunContainerOptions(pod2, &pod2.Spec.Containers[0]) _, err = testManager.GetDeviceRunContainerOptions(pod2, &pod2.Spec.Containers[0])
as.Nil(err) as.NoError(err)
select { select {
case <-time.After(time.Millisecond): case <-time.After(time.Millisecond):
t.Log("When pod resourceQuantity is 0, PreStartContainer RPC stub will be skipped") t.Log("When pod resourceQuantity is 0, PreStartContainer RPC stub will be skipped")
@ -1673,10 +1673,10 @@ func TestDevicePreStartContainer(t *testing.T) {
func TestResetExtendedResource(t *testing.T) { func TestResetExtendedResource(t *testing.T) {
as := assert.New(t) as := assert.New(t)
tmpDir, err := os.MkdirTemp("", "checkpoint") tmpDir, err := os.MkdirTemp("", "checkpoint")
as.Nil(err) as.NoError(err)
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
ckm, err := checkpointmanager.NewCheckpointManager(tmpDir) ckm, err := checkpointmanager.NewCheckpointManager(tmpDir)
as.Nil(err) as.NoError(err)
testManager := &ManagerImpl{ testManager := &ManagerImpl{
endpoints: make(map[string]endpointInfo), endpoints: make(map[string]endpointInfo),
healthyDevices: make(map[string]sets.Set[string]), healthyDevices: make(map[string]sets.Set[string]),
@ -1699,16 +1699,16 @@ func TestResetExtendedResource(t *testing.T) {
testManager.healthyDevices[extendedResourceName].Insert("dev1") testManager.healthyDevices[extendedResourceName].Insert("dev1")
// checkpoint is present, indicating node hasn't been recreated // checkpoint is present, indicating node hasn't been recreated
err = testManager.writeCheckpoint() err = testManager.writeCheckpoint()
as.Nil(err) require.NoError(t, err)
as.False(testManager.ShouldResetExtendedResourceCapacity()) as.False(testManager.ShouldResetExtendedResourceCapacity())
// checkpoint is absent, representing node recreation // checkpoint is absent, representing node recreation
ckpts, err := ckm.ListCheckpoints() ckpts, err := ckm.ListCheckpoints()
as.Nil(err) as.NoError(err)
for _, ckpt := range ckpts { for _, ckpt := range ckpts {
err = ckm.RemoveCheckpoint(ckpt) err = ckm.RemoveCheckpoint(ckpt)
as.Nil(err) as.NoError(err)
} }
as.True(testManager.ShouldResetExtendedResourceCapacity()) as.True(testManager.ShouldResetExtendedResourceCapacity())
} }

View File

@ -129,7 +129,7 @@ func createTestQOSContainerManager() (*qosContainerManagerImpl, error) {
func TestQoSContainerCgroup(t *testing.T) { func TestQoSContainerCgroup(t *testing.T) {
m, err := createTestQOSContainerManager() m, err := createTestQOSContainerManager()
assert.Nil(t, err) assert.NoError(t, err)
qosConfigs := map[v1.PodQOSClass]*CgroupConfig{ qosConfigs := map[v1.PodQOSClass]*CgroupConfig{
v1.PodQOSGuaranteed: { v1.PodQOSGuaranteed: {

View File

@ -157,7 +157,7 @@ func TestCacheGetPodDoesNotExist(t *testing.T) {
// object with id filled. // object with id filled.
actualStatus, actualErr := cache.Get(podID) actualStatus, actualErr := cache.Get(podID)
assert.Equal(t, status, actualStatus) assert.Equal(t, status, actualStatus)
assert.Equal(t, nil, actualErr) assert.NoError(t, actualErr)
} }
func TestDelete(t *testing.T) { func TestDelete(t *testing.T) {
@ -167,13 +167,13 @@ func TestDelete(t *testing.T) {
cache.Set(podID, status, nil, time.Time{}) cache.Set(podID, status, nil, time.Time{})
actualStatus, actualErr := cache.Get(podID) actualStatus, actualErr := cache.Get(podID)
assert.Equal(t, status, actualStatus) assert.Equal(t, status, actualStatus)
assert.Equal(t, nil, actualErr) assert.NoError(t, actualErr)
// Delete the pod from cache, and verify that we get an empty status. // Delete the pod from cache, and verify that we get an empty status.
cache.Delete(podID) cache.Delete(podID)
expectedStatus := &PodStatus{ID: podID} expectedStatus := &PodStatus{ID: podID}
actualStatus, actualErr = cache.Get(podID) actualStatus, actualErr = cache.Get(podID)
assert.Equal(t, expectedStatus, actualStatus) assert.Equal(t, expectedStatus, actualStatus)
assert.Equal(t, nil, actualErr) assert.NoError(t, actualErr)
} }
func verifyNotification(t *testing.T, ch chan *data, expectNotification bool) { func verifyNotification(t *testing.T, ch chan *data, expectNotification bool) {

View File

@ -644,7 +644,7 @@ func TestGarbageCollectCadvisorFailure(t *testing.T) {
manager, _ := newRealImageGCManager(policy, mockStatsProvider) manager, _ := newRealImageGCManager(policy, mockStatsProvider)
mockStatsProvider.EXPECT().ImageFsStats(mock.Anything).Return(&statsapi.FsStats{}, &statsapi.FsStats{}, fmt.Errorf("error")) mockStatsProvider.EXPECT().ImageFsStats(mock.Anything).Return(&statsapi.FsStats{}, &statsapi.FsStats{}, fmt.Errorf("error"))
assert.NotNil(t, manager.GarbageCollect(ctx, time.Now())) assert.Error(t, manager.GarbageCollect(ctx, time.Now()))
} }
func TestGarbageCollectBelowSuccess(t *testing.T) { func TestGarbageCollectBelowSuccess(t *testing.T) {
@ -689,7 +689,7 @@ func TestGarbageCollectNotEnoughFreed(t *testing.T) {
makeImage(0, 50), makeImage(0, 50),
} }
assert.NotNil(t, manager.GarbageCollect(ctx, time.Now())) assert.Error(t, manager.GarbageCollect(ctx, time.Now()))
} }
func TestGarbageCollectImageNotOldEnough(t *testing.T) { func TestGarbageCollectImageNotOldEnough(t *testing.T) {

View File

@ -584,7 +584,7 @@ func TestMaxParallelImagePullsLimit(t *testing.T) {
wg.Add(1) wg.Add(1)
go func() { go func() {
_, _, err := puller.EnsureImageExists(ctx, nil, pod, container.Image, nil, nil, "", container.ImagePullPolicy) _, _, err := puller.EnsureImageExists(ctx, nil, pod, container.Image, nil, nil, "", container.ImagePullPolicy)
assert.Nil(t, err) assert.NoError(t, err)
wg.Done() wg.Done()
}() }()
} }
@ -596,7 +596,7 @@ func TestMaxParallelImagePullsLimit(t *testing.T) {
wg.Add(1) wg.Add(1)
go func() { go func() {
_, _, err := puller.EnsureImageExists(ctx, nil, pod, container.Image, nil, nil, "", container.ImagePullPolicy) _, _, err := puller.EnsureImageExists(ctx, nil, pod, container.Image, nil, nil, "", container.ImagePullPolicy)
assert.Nil(t, err) assert.NoError(t, err)
wg.Done() wg.Done()
}() }()
} }

View File

@ -21,7 +21,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality" apiequality "k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -69,7 +69,7 @@ func TestPodResourceLimitsDefaulting(t *testing.T) {
as := assert.New(t) as := assert.New(t)
for idx, tc := range cases { for idx, tc := range cases {
actual, _, err := tk.kubelet.defaultPodLimitsForDownwardAPI(tc.pod, nil) actual, _, err := tk.kubelet.defaultPodLimitsForDownwardAPI(tc.pod, nil)
as.Nil(err, "failed to default pod limits: %v", err) as.NoError(err, "failed to default pod limits: %v", err)
if !apiequality.Semantic.DeepEqual(tc.expected, actual) { if !apiequality.Semantic.DeepEqual(tc.expected, actual) {
as.Fail("test case [%d] failed. Expected: %+v, Got: %+v", idx, tc.expected, actual) as.Fail("test case [%d] failed. Expected: %+v, Got: %+v", idx, tc.expected, actual)
} }

View File

@ -467,7 +467,7 @@ func TestGeneratePodSandboxWindowsConfig_HostNetwork(t *testing.T) {
wc, err := m.generatePodSandboxWindowsConfig(pod) wc, err := m.generatePodSandboxWindowsConfig(pod)
assert.Equal(t, testCase.expectedWindowsConfig, wc) assert.Equal(t, testCase.expectedWindowsConfig, wc)
assert.Equal(t, nil, err) assert.NoError(t, err)
}) })
} }
} }

View File

@ -28,6 +28,7 @@ import (
"github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/component-base/metrics/testutil" "k8s.io/component-base/metrics/testutil"
@ -420,7 +421,7 @@ func TestRemoveCacheEntry(t *testing.T) {
pleg.Relist() pleg.Relist()
actualStatus, actualErr := pleg.cache.Get(pods[0].ID) actualStatus, actualErr := pleg.cache.Get(pods[0].ID)
assert.Equal(t, &kubecontainer.PodStatus{ID: pods[0].ID}, actualStatus) assert.Equal(t, &kubecontainer.PodStatus{ID: pods[0].ID}, actualStatus)
assert.Equal(t, nil, actualErr) assert.NoError(t, actualErr)
} }
func TestHealthy(t *testing.T) { func TestHealthy(t *testing.T) {
@ -479,7 +480,7 @@ func TestRelistWithReinspection(t *testing.T) {
actualEvents := getEventsFromChannel(ch) actualEvents := getEventsFromChannel(ch)
actualStatus, actualErr := pleg.cache.Get(podID) actualStatus, actualErr := pleg.cache.Get(podID)
assert.Equal(t, goodStatus, actualStatus) assert.Equal(t, goodStatus, actualStatus)
assert.Equal(t, nil, actualErr) assert.NoError(t, actualErr)
assert.Exactly(t, []*PodLifecycleEvent{goodEvent}, actualEvents) assert.Exactly(t, []*PodLifecycleEvent{goodEvent}, actualEvents)
// listing 2 - pretend runtime was in the middle of creating the non-infra container for the pod // listing 2 - pretend runtime was in the middle of creating the non-infra container for the pod
@ -513,7 +514,7 @@ func TestRelistWithReinspection(t *testing.T) {
actualEvents = getEventsFromChannel(ch) actualEvents = getEventsFromChannel(ch)
actualStatus, actualErr = pleg.cache.Get(podID) actualStatus, actualErr = pleg.cache.Get(podID)
assert.Equal(t, goodStatus, actualStatus) assert.Equal(t, goodStatus, actualStatus)
assert.Equal(t, nil, actualErr) assert.NoError(t, actualErr)
// no events are expected because relist #1 set the old pod record which has the infra container // no events are expected because relist #1 set the old pod record which has the infra container
// running. relist #2 had the inspection error and therefore didn't modify either old or new. // running. relist #2 had the inspection error and therefore didn't modify either old or new.
// relist #3 forced the reinspection of the pod to retrieve its status, but because the list of // relist #3 forced the reinspection of the pod to retrieve its status, but because the list of
@ -633,7 +634,7 @@ func TestRelistIPChange(t *testing.T) {
actualEvents := getEventsFromChannel(ch) actualEvents := getEventsFromChannel(ch)
actualStatus, actualErr := pleg.cache.Get(pod.ID) actualStatus, actualErr := pleg.cache.Get(pod.ID)
assert.Equal(t, status, actualStatus, tc.name) assert.Equal(t, status, actualStatus, tc.name)
assert.Nil(t, actualErr, tc.name) assert.NoError(t, actualErr, tc.name)
assert.Exactly(t, []*PodLifecycleEvent{event}, actualEvents) assert.Exactly(t, []*PodLifecycleEvent{event}, actualEvents)
// Clear the IP address and mark the container terminated // Clear the IP address and mark the container terminated
@ -658,7 +659,7 @@ func TestRelistIPChange(t *testing.T) {
statusCopy := *status statusCopy := *status
statusCopy.IPs = tc.podIPs statusCopy.IPs = tc.podIPs
assert.Equal(t, &statusCopy, actualStatus, tc.name) assert.Equal(t, &statusCopy, actualStatus, tc.name)
assert.Nil(t, actualErr, tc.name) require.NoError(t, actualErr, tc.name)
assert.Exactly(t, []*PodLifecycleEvent{event}, actualEvents) assert.Exactly(t, []*PodLifecycleEvent{event}, actualEvents)
} }
} }

View File

@ -178,7 +178,7 @@ func TestNormalVolumeEvent(t *testing.T) {
statsCalculator.calcAndStoreStats() statsCalculator.calcAndStoreStats()
event, err := WatchEvent(eventStore) event, err := WatchEvent(eventStore)
assert.NotNil(t, err) assert.Error(t, err)
assert.Equal(t, "", event) assert.Equal(t, "", event)
} }
@ -206,7 +206,7 @@ func TestAbnormalVolumeEvent(t *testing.T) {
statsCalculator.calcAndStoreStats() statsCalculator.calcAndStoreStats()
event, err := WatchEvent(eventStore) event, err := WatchEvent(eventStore)
assert.Nil(t, err) assert.NoError(t, err)
assert.Equal(t, fmt.Sprintf("Warning VolumeConditionAbnormal Volume %s: The target path of the volume doesn't exist", "vol0"), event) assert.Equal(t, fmt.Sprintf("Warning VolumeConditionAbnormal Volume %s: The target path of the volume doesn't exist", "vol0"), event)
} }

View File

@ -42,10 +42,10 @@ func TestLocalEndpoint(t *testing.T) {
for _, test := range tests { for _, test := range tests {
fullPath, err := LocalEndpoint(test.path, test.file) fullPath, err := LocalEndpoint(test.path, test.file)
if test.expectError { if test.expectError {
assert.NotNil(t, err, "expected error") assert.Error(t, err, "expected error")
continue continue
} }
assert.Nil(t, err, "expected no error") assert.NoError(t, err, "expected no error")
assert.Equal(t, test.expectedFullPath, fullPath) assert.Equal(t, test.expectedFullPath, fullPath)
} }
} }

View File

@ -93,7 +93,7 @@ func TestGrpcProber_Probe(t *testing.T) {
s := New() s := New()
p, o, err := s.Probe("", "", 32, time.Second) p, o, err := s.Probe("", "", 32, time.Second)
assert.Equal(t, probe.Failure, p) assert.Equal(t, probe.Failure, p)
assert.Equal(t, nil, err) assert.NoError(t, err)
assert.Equal(t, "timeout: failed to connect service \":32\" within 1s: context deadline exceeded", o) assert.Equal(t, "timeout: failed to connect service \":32\" within 1s: context deadline exceeded", o)
}) })
t.Run("Should: return nil error because connection closed", func(t *testing.T) { t.Run("Should: return nil error because connection closed", func(t *testing.T) {
@ -105,13 +105,13 @@ func TestGrpcProber_Probe(t *testing.T) {
assert.Len(t, u, 3) assert.Len(t, u, 3)
port, err := strconv.Atoi(u[2]) port, err := strconv.Atoi(u[2])
assert.Equal(t, nil, err) assert.NoError(t, err)
// take some time to wait server boot // take some time to wait server boot
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
p, _, err := s.Probe("127.0.0.1", "", port, time.Second) p, _, err := s.Probe("127.0.0.1", "", port, time.Second)
assert.Equal(t, probe.Failure, p) assert.Equal(t, probe.Failure, p)
assert.Equal(t, nil, err) assert.NoError(t, err)
}) })
t.Run("Should: return nil error because server response not served", func(t *testing.T) { t.Run("Should: return nil error because server response not served", func(t *testing.T) {
s := New() s := New()
@ -127,7 +127,7 @@ func TestGrpcProber_Probe(t *testing.T) {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
p, o, err := s.Probe("0.0.0.0", "", port, time.Second) p, o, err := s.Probe("0.0.0.0", "", port, time.Second)
assert.Equal(t, probe.Failure, p) assert.Equal(t, probe.Failure, p)
assert.Equal(t, nil, err) assert.NoError(t, err)
assert.Equal(t, "service unhealthy (responded with \"NOT_SERVING\")", o) assert.Equal(t, "service unhealthy (responded with \"NOT_SERVING\")", o)
}) })
t.Run("Should: return nil-error because server not response in time", func(t *testing.T) { t.Run("Should: return nil-error because server not response in time", func(t *testing.T) {
@ -145,7 +145,7 @@ func TestGrpcProber_Probe(t *testing.T) {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
p, o, err := s.Probe("0.0.0.0", "", port, time.Second*2) p, o, err := s.Probe("0.0.0.0", "", port, time.Second*2)
assert.Equal(t, probe.Failure, p) assert.Equal(t, probe.Failure, p)
assert.Equal(t, nil, err) assert.NoError(t, err)
assert.Equal(t, "timeout: health rpc did not complete within 2s", o) assert.Equal(t, "timeout: health rpc did not complete within 2s", o)
}) })
@ -164,7 +164,7 @@ func TestGrpcProber_Probe(t *testing.T) {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
p, _, err := s.Probe("0.0.0.0", "", port, time.Second*2) p, _, err := s.Probe("0.0.0.0", "", port, time.Second*2)
assert.Equal(t, probe.Success, p) assert.Equal(t, probe.Success, p)
assert.Equal(t, nil, err) assert.NoError(t, err)
}) })
t.Run("Should: not return error because check was success, when listen port is 0", func(t *testing.T) { t.Run("Should: not return error because check was success, when listen port is 0", func(t *testing.T) {
s := New() s := New()
@ -181,6 +181,6 @@ func TestGrpcProber_Probe(t *testing.T) {
time.Sleep(2 * time.Second) time.Sleep(2 * time.Second)
p, _, err := s.Probe("0.0.0.0", "", port, time.Second*2) p, _, err := s.Probe("0.0.0.0", "", port, time.Second*2)
assert.Equal(t, probe.Success, p) assert.Equal(t, probe.Success, p)
assert.Equal(t, nil, err) assert.NoError(t, err)
}) })
} }

View File

@ -4410,10 +4410,10 @@ func TestEndpointSliceE2E(t *testing.T) {
assert.Equal(t, 1, activeEntries1.Len(), "Expected 1 active entry in KUBE-LOOP-BACK") assert.Equal(t, 1, activeEntries1.Len(), "Expected 1 active entry in KUBE-LOOP-BACK")
assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod") assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
virtualServers1, vsErr1 := ipvs.GetVirtualServers() virtualServers1, vsErr1 := ipvs.GetVirtualServers()
assert.Nil(t, vsErr1, "Expected no error getting virtual servers") assert.NoError(t, vsErr1, "Expected no error getting virtual servers")
assert.Len(t, virtualServers1, 1, "Expected 1 virtual server") assert.Len(t, virtualServers1, 1, "Expected 1 virtual server")
realServers1, rsErr1 := ipvs.GetRealServers(virtualServers1[0]) realServers1, rsErr1 := ipvs.GetRealServers(virtualServers1[0])
assert.Nil(t, rsErr1, "Expected no error getting real servers") assert.NoError(t, rsErr1, "Expected no error getting real servers")
assert.Len(t, realServers1, 3, "Expected 3 real servers") assert.Len(t, realServers1, 3, "Expected 3 real servers")
assert.Equal(t, realServers1[0].String(), "10.0.1.1:80") assert.Equal(t, realServers1[0].String(), "10.0.1.1:80")
assert.Equal(t, realServers1[1].String(), "10.0.1.2:80") assert.Equal(t, realServers1[1].String(), "10.0.1.2:80")
@ -4427,10 +4427,10 @@ func TestEndpointSliceE2E(t *testing.T) {
activeEntries2 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries activeEntries2 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries
assert.Equal(t, 0, activeEntries2.Len(), "Expected 0 active entries in KUBE-LOOP-BACK") assert.Equal(t, 0, activeEntries2.Len(), "Expected 0 active entries in KUBE-LOOP-BACK")
virtualServers2, vsErr2 := ipvs.GetVirtualServers() virtualServers2, vsErr2 := ipvs.GetVirtualServers()
assert.Nil(t, vsErr2, "Expected no error getting virtual servers") assert.NoError(t, vsErr2, "Expected no error getting virtual servers")
assert.Len(t, virtualServers2, 1, "Expected 1 virtual server") assert.Len(t, virtualServers2, 1, "Expected 1 virtual server")
realServers2, rsErr2 := ipvs.GetRealServers(virtualServers2[0]) realServers2, rsErr2 := ipvs.GetRealServers(virtualServers2[0])
assert.Nil(t, rsErr2, "Expected no error getting real servers") assert.NoError(t, rsErr2, "Expected no error getting real servers")
assert.Empty(t, realServers2, "Expected 0 real servers") assert.Empty(t, realServers2, "Expected 0 real servers")
} }
@ -4805,11 +4805,11 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) {
if tc.expectVirtualServer { if tc.expectVirtualServer {
virtualServers1, vsErr1 := ipvs.GetVirtualServers() virtualServers1, vsErr1 := ipvs.GetVirtualServers()
assert.Nil(t, vsErr1, "Expected no error getting virtual servers") assert.NoError(t, vsErr1, "Expected no error getting virtual servers")
assert.Len(t, virtualServers1, 1, "Expected 1 virtual server") assert.Len(t, virtualServers1, 1, "Expected 1 virtual server")
realServers1, rsErr1 := ipvs.GetRealServers(virtualServers1[0]) realServers1, rsErr1 := ipvs.GetRealServers(virtualServers1[0])
assert.Nil(t, rsErr1, "Expected no error getting real servers") assert.NoError(t, rsErr1, "Expected no error getting real servers")
assert.Len(t, realServers1, tc.expectLocalRealServerNum, fmt.Sprintf("Expected %d real servers", tc.expectLocalRealServerNum)) assert.Len(t, realServers1, tc.expectLocalRealServerNum, fmt.Sprintf("Expected %d real servers", tc.expectLocalRealServerNum))
for i := 0; i < tc.expectLocalRealServerNum; i++ { for i := 0; i < tc.expectLocalRealServerNum; i++ {
@ -4825,10 +4825,10 @@ func TestTestInternalTrafficPolicyE2E(t *testing.T) {
activeEntries3 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries activeEntries3 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries
assert.Equal(t, 0, activeEntries3.Len(), "Expected 0 active entries in KUBE-LOOP-BACK") assert.Equal(t, 0, activeEntries3.Len(), "Expected 0 active entries in KUBE-LOOP-BACK")
virtualServers2, vsErr2 := ipvs.GetVirtualServers() virtualServers2, vsErr2 := ipvs.GetVirtualServers()
assert.Nil(t, vsErr2, "Expected no error getting virtual servers") assert.NoError(t, vsErr2, "Expected no error getting virtual servers")
assert.Len(t, virtualServers2, 1, "Expected 1 virtual server") assert.Len(t, virtualServers2, 1, "Expected 1 virtual server")
realServers2, rsErr2 := ipvs.GetRealServers(virtualServers2[0]) realServers2, rsErr2 := ipvs.GetRealServers(virtualServers2[0])
assert.Nil(t, rsErr2, "Expected no error getting real servers") assert.NoError(t, rsErr2, "Expected no error getting real servers")
assert.Empty(t, realServers2, "Expected 0 real servers") assert.Empty(t, realServers2, "Expected 0 real servers")
} }
} }
@ -4945,7 +4945,7 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) {
assert.True(t, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference fourth pod") assert.True(t, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference fourth pod")
virtualServers, vsErr := ipvs.GetVirtualServers() virtualServers, vsErr := ipvs.GetVirtualServers()
assert.Nil(t, vsErr, "Expected no error getting virtual servers") assert.NoError(t, vsErr, "Expected no error getting virtual servers")
assert.Len(t, virtualServers, 2, "Expected 2 virtual server") assert.Len(t, virtualServers, 2, "Expected 2 virtual server")
var clusterIPServer, externalIPServer *utilipvs.VirtualServer var clusterIPServer, externalIPServer *utilipvs.VirtualServer
@ -4961,7 +4961,7 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) {
// clusterIP should route to cluster-wide ready endpoints // clusterIP should route to cluster-wide ready endpoints
realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer) realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer)
assert.Nil(t, rsErr1, "Expected no error getting real servers") assert.NoError(t, rsErr1, "Expected no error getting real servers")
assert.Len(t, realServers1, 3, "Expected 3 real servers") assert.Len(t, realServers1, 3, "Expected 3 real servers")
assert.Equal(t, realServers1[0].String(), "10.0.1.1:80") assert.Equal(t, realServers1[0].String(), "10.0.1.1:80")
assert.Equal(t, realServers1[1].String(), "10.0.1.2:80") assert.Equal(t, realServers1[1].String(), "10.0.1.2:80")
@ -4969,7 +4969,7 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) {
// externalIP should route to cluster-wide ready endpoints // externalIP should route to cluster-wide ready endpoints
realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer) realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer)
assert.Nil(t, rsErr2, "Expected no error getting real servers") assert.NoError(t, rsErr2, "Expected no error getting real servers")
assert.Len(t, realServers2, 3, "Expected 3 real servers") assert.Len(t, realServers2, 3, "Expected 3 real servers")
assert.Equal(t, realServers2[0].String(), "10.0.1.1:80") assert.Equal(t, realServers2[0].String(), "10.0.1.1:80")
assert.Equal(t, realServers2[1].String(), "10.0.1.2:80") assert.Equal(t, realServers2[1].String(), "10.0.1.2:80")
@ -4984,7 +4984,7 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) {
assert.Equal(t, 0, activeEntries2.Len(), "Expected 0 active entries in KUBE-LOOP-BACK") assert.Equal(t, 0, activeEntries2.Len(), "Expected 0 active entries in KUBE-LOOP-BACK")
virtualServers, vsErr = ipvs.GetVirtualServers() virtualServers, vsErr = ipvs.GetVirtualServers()
assert.Nil(t, vsErr, "Expected no error getting virtual servers") assert.NoError(t, vsErr, "Expected no error getting virtual servers")
assert.Len(t, virtualServers, 2, "Expected 2 virtual server") assert.Len(t, virtualServers, 2, "Expected 2 virtual server")
for _, virtualServer := range virtualServers { for _, virtualServer := range virtualServers {
@ -4998,11 +4998,11 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) {
} }
realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer) realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer)
assert.Nil(t, rsErr1, "Expected no error getting real servers") assert.NoError(t, rsErr1, "Expected no error getting real servers")
assert.Empty(t, realServers1, "Expected 0 real servers") assert.Empty(t, realServers1, "Expected 0 real servers")
realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer) realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer)
assert.Nil(t, rsErr2, "Expected no error getting real servers") assert.NoError(t, rsErr2, "Expected no error getting real servers")
assert.Empty(t, realServers2, "Expected 0 real servers") assert.Empty(t, realServers2, "Expected 0 real servers")
} }
@ -5118,7 +5118,7 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) {
assert.True(t, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference second (local) pod") assert.True(t, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference second (local) pod")
virtualServers, vsErr := ipvs.GetVirtualServers() virtualServers, vsErr := ipvs.GetVirtualServers()
assert.Nil(t, vsErr, "Expected no error getting virtual servers") assert.NoError(t, vsErr, "Expected no error getting virtual servers")
assert.Len(t, virtualServers, 2, "Expected 2 virtual server") assert.Len(t, virtualServers, 2, "Expected 2 virtual server")
var clusterIPServer, externalIPServer *utilipvs.VirtualServer var clusterIPServer, externalIPServer *utilipvs.VirtualServer
@ -5134,7 +5134,7 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) {
// clusterIP should route to cluster-wide ready endpoints // clusterIP should route to cluster-wide ready endpoints
realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer) realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer)
assert.Nil(t, rsErr1, "Expected no error getting real servers") assert.NoError(t, rsErr1, "Expected no error getting real servers")
assert.Len(t, realServers1, 3, "Expected 3 real servers") assert.Len(t, realServers1, 3, "Expected 3 real servers")
assert.Equal(t, realServers1[0].String(), "10.0.1.1:80") assert.Equal(t, realServers1[0].String(), "10.0.1.1:80")
assert.Equal(t, realServers1[1].String(), "10.0.1.2:80") assert.Equal(t, realServers1[1].String(), "10.0.1.2:80")
@ -5142,7 +5142,7 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) {
// externalIP should route to local ready + non-terminating endpoints if they exist // externalIP should route to local ready + non-terminating endpoints if they exist
realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer) realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer)
assert.Nil(t, rsErr2, "Expected no error getting real servers") assert.NoError(t, rsErr2, "Expected no error getting real servers")
assert.Len(t, realServers2, 2, "Expected 2 real servers") assert.Len(t, realServers2, 2, "Expected 2 real servers")
assert.Equal(t, realServers2[0].String(), "10.0.1.1:80") assert.Equal(t, realServers2[0].String(), "10.0.1.1:80")
assert.Equal(t, realServers2[1].String(), "10.0.1.2:80") assert.Equal(t, realServers2[1].String(), "10.0.1.2:80")
@ -5156,7 +5156,7 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) {
assert.Equal(t, 0, activeEntries2.Len(), "Expected 0 active entries in KUBE-LOOP-BACK") assert.Equal(t, 0, activeEntries2.Len(), "Expected 0 active entries in KUBE-LOOP-BACK")
virtualServers, vsErr = ipvs.GetVirtualServers() virtualServers, vsErr = ipvs.GetVirtualServers()
assert.Nil(t, vsErr, "Expected no error getting virtual servers") assert.NoError(t, vsErr, "Expected no error getting virtual servers")
assert.Len(t, virtualServers, 2, "Expected 2 virtual server") assert.Len(t, virtualServers, 2, "Expected 2 virtual server")
for _, virtualServer := range virtualServers { for _, virtualServer := range virtualServers {
@ -5170,11 +5170,11 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) {
} }
realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer) realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer)
assert.Nil(t, rsErr1, "Expected no error getting real servers") assert.NoError(t, rsErr1, "Expected no error getting real servers")
assert.Empty(t, realServers1, "Expected 0 real servers") assert.Empty(t, realServers1, "Expected 0 real servers")
realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer) realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer)
assert.Nil(t, rsErr2, "Expected no error getting real servers") assert.NoError(t, rsErr2, "Expected no error getting real servers")
assert.Empty(t, realServers2, "Expected 0 real servers") assert.Empty(t, realServers2, "Expected 0 real servers")
} }
@ -5289,7 +5289,7 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) {
assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod") assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
virtualServers, vsErr := ipvs.GetVirtualServers() virtualServers, vsErr := ipvs.GetVirtualServers()
assert.Nil(t, vsErr, "Expected no error getting virtual servers") assert.NoError(t, vsErr, "Expected no error getting virtual servers")
assert.Len(t, virtualServers, 2, "Expected 2 virtual server") assert.Len(t, virtualServers, 2, "Expected 2 virtual server")
var clusterIPServer, externalIPServer *utilipvs.VirtualServer var clusterIPServer, externalIPServer *utilipvs.VirtualServer
@ -5305,7 +5305,7 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) {
// clusterIP should fall back to cluster-wide ready + terminating endpoints // clusterIP should fall back to cluster-wide ready + terminating endpoints
realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer) realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer)
assert.Nil(t, rsErr1, "Expected no error getting real servers") assert.NoError(t, rsErr1, "Expected no error getting real servers")
assert.Len(t, realServers1, 3, "Expected 1 real servers") assert.Len(t, realServers1, 3, "Expected 1 real servers")
assert.Equal(t, realServers1[0].String(), "10.0.1.1:80") assert.Equal(t, realServers1[0].String(), "10.0.1.1:80")
assert.Equal(t, realServers1[1].String(), "10.0.1.2:80") assert.Equal(t, realServers1[1].String(), "10.0.1.2:80")
@ -5313,7 +5313,7 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) {
// externalIP should fall back to ready + terminating endpoints // externalIP should fall back to ready + terminating endpoints
realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer) realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer)
assert.Nil(t, rsErr2, "Expected no error getting real servers") assert.NoError(t, rsErr2, "Expected no error getting real servers")
assert.Len(t, realServers2, 3, "Expected 2 real servers") assert.Len(t, realServers2, 3, "Expected 2 real servers")
assert.Equal(t, realServers2[0].String(), "10.0.1.1:80") assert.Equal(t, realServers2[0].String(), "10.0.1.1:80")
assert.Equal(t, realServers2[1].String(), "10.0.1.2:80") assert.Equal(t, realServers2[1].String(), "10.0.1.2:80")
@ -5328,7 +5328,7 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) {
assert.Equal(t, 0, activeEntries2.Len(), "Expected 0 active entries in KUBE-LOOP-BACK") assert.Equal(t, 0, activeEntries2.Len(), "Expected 0 active entries in KUBE-LOOP-BACK")
virtualServers, vsErr = ipvs.GetVirtualServers() virtualServers, vsErr = ipvs.GetVirtualServers()
assert.Nil(t, vsErr, "Expected no error getting virtual servers") assert.NoError(t, vsErr, "Expected no error getting virtual servers")
assert.Len(t, virtualServers, 2, "Expected 2 virtual server") assert.Len(t, virtualServers, 2, "Expected 2 virtual server")
for _, virtualServer := range virtualServers { for _, virtualServer := range virtualServers {
@ -5342,11 +5342,11 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) {
} }
realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer) realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer)
assert.Nil(t, rsErr1, "Expected no error getting real servers") assert.NoError(t, rsErr1, "Expected no error getting real servers")
assert.Empty(t, realServers1, "Expected 0 real servers") assert.Empty(t, realServers1, "Expected 0 real servers")
realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer) realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer)
assert.Nil(t, rsErr2, "Expected no error getting real servers") assert.NoError(t, rsErr2, "Expected no error getting real servers")
assert.Empty(t, realServers2, "Expected 0 real servers") assert.Empty(t, realServers2, "Expected 0 real servers")
} }
@ -5461,7 +5461,7 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) {
assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod") assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
virtualServers, vsErr := ipvs.GetVirtualServers() virtualServers, vsErr := ipvs.GetVirtualServers()
assert.Nil(t, vsErr, "Expected no error getting virtual servers") assert.NoError(t, vsErr, "Expected no error getting virtual servers")
assert.Len(t, virtualServers, 2, "Expected 2 virtual server") assert.Len(t, virtualServers, 2, "Expected 2 virtual server")
var clusterIPServer, externalIPServer *utilipvs.VirtualServer var clusterIPServer, externalIPServer *utilipvs.VirtualServer
@ -5477,13 +5477,13 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) {
// clusterIP should route to cluster-wide Ready endpoints // clusterIP should route to cluster-wide Ready endpoints
realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer) realServers1, rsErr1 := ipvs.GetRealServers(clusterIPServer)
assert.Nil(t, rsErr1, "Expected no error getting real servers") assert.NoError(t, rsErr1, "Expected no error getting real servers")
assert.Len(t, realServers1, 1, "Expected 1 real servers") assert.Len(t, realServers1, 1, "Expected 1 real servers")
assert.Equal(t, realServers1[0].String(), "10.0.1.5:80") assert.Equal(t, realServers1[0].String(), "10.0.1.5:80")
// externalIP should fall back to local ready + terminating endpoints // externalIP should fall back to local ready + terminating endpoints
realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer) realServers2, rsErr2 := ipvs.GetRealServers(externalIPServer)
assert.Nil(t, rsErr2, "Expected no error getting real servers") assert.NoError(t, rsErr2, "Expected no error getting real servers")
assert.Len(t, realServers2, 2, "Expected 2 real servers") assert.Len(t, realServers2, 2, "Expected 2 real servers")
assert.Equal(t, realServers2[0].String(), "10.0.1.1:80") assert.Equal(t, realServers2[0].String(), "10.0.1.1:80")
assert.Equal(t, realServers2[1].String(), "10.0.1.2:80") assert.Equal(t, realServers2[1].String(), "10.0.1.2:80")
@ -5497,7 +5497,7 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) {
assert.Equal(t, 0, activeEntries2.Len(), "Expected 0 active entries in KUBE-LOOP-BACK") assert.Equal(t, 0, activeEntries2.Len(), "Expected 0 active entries in KUBE-LOOP-BACK")
virtualServers, vsErr = ipvs.GetVirtualServers() virtualServers, vsErr = ipvs.GetVirtualServers()
assert.Nil(t, vsErr, "Expected no error getting virtual servers") assert.NoError(t, vsErr, "Expected no error getting virtual servers")
assert.Len(t, virtualServers, 2, "Expected 2 virtual server") assert.Len(t, virtualServers, 2, "Expected 2 virtual server")
for _, virtualServer := range virtualServers { for _, virtualServer := range virtualServers {
@ -5511,11 +5511,11 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) {
} }
realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer) realServers1, rsErr1 = ipvs.GetRealServers(clusterIPServer)
assert.Nil(t, rsErr1, "Expected no error getting real servers") assert.NoError(t, rsErr1, "Expected no error getting real servers")
assert.Empty(t, realServers1, "Expected 0 real servers") assert.Empty(t, realServers1, "Expected 0 real servers")
realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer) realServers2, rsErr2 = ipvs.GetRealServers(externalIPServer)
assert.Nil(t, rsErr2, "Expected no error getting real servers") assert.NoError(t, rsErr2, "Expected no error getting real servers")
assert.Empty(t, realServers2, "Expected 0 real servers") assert.Empty(t, realServers2, "Expected 0 real servers")
} }

View File

@ -961,7 +961,7 @@ func TestVolumeHealthDisable(t *testing.T) {
csClient := setupClientWithVolumeStatsAndCondition(t, tc.volumeStatsSet, false, true, false) csClient := setupClientWithVolumeStatsAndCondition(t, tc.volumeStatsSet, false, true, false)
metrics, err := csClient.NodeGetVolumeStats(ctx, csiSource.VolumeHandle, tc.volumeData.DeviceMountPath) metrics, err := csClient.NodeGetVolumeStats(ctx, csiSource.VolumeHandle, tc.volumeData.DeviceMountPath)
if tc.success { if tc.success {
assert.Nil(t, err) assert.NoError(t, err)
} }
if metrics == nil { if metrics == nil {