diff --git a/hack/golangci-hints.yaml b/hack/golangci-hints.yaml index f55310c88ed..b9e0d4e1c63 100644 --- a/hack/golangci-hints.yaml +++ b/hack/golangci-hints.yaml @@ -90,6 +90,7 @@ linters: - revive - staticcheck - stylecheck + - testifylint - unused linters-settings: # please keep this alphabetized @@ -179,3 +180,5 @@ linters-settings: # please keep this alphabetized staticcheck: checks: - "all" + testifylint: + enable-all: true diff --git a/hack/golangci-strict.yaml b/hack/golangci-strict.yaml index f2116689d46..52dca4c1f9a 100644 --- a/hack/golangci-strict.yaml +++ b/hack/golangci-strict.yaml @@ -133,6 +133,7 @@ linters: - revive - staticcheck - stylecheck + - testifylint - unused disable: # https://github.com/kubernetes/kubernetes/issues/117288#issuecomment-1507008359 @@ -219,3 +220,5 @@ linters-settings: # please keep this alphabetized staticcheck: checks: - "all" + testifylint: + enable-all: true diff --git a/hack/golangci.yaml b/hack/golangci.yaml index c0a06bd3ded..729a924272b 100644 --- a/hack/golangci.yaml +++ b/hack/golangci.yaml @@ -139,6 +139,7 @@ linters: - revive - staticcheck - stylecheck + - testifylint - unused linters-settings: # please keep this alphabetized @@ -231,3 +232,17 @@ linters-settings: # please keep this alphabetized stylecheck: checks: - "ST1019" # Importing the same package multiple times + testifylint: + enable-all: true + disable: # TODO: remove each disabled rule and fix it + - blank-import + - compares + - empty + - error-is-as + - error-nil + - expected-actual + - float-compare + - go-require + - len + - nil-compare + - require-error diff --git a/hack/golangci.yaml.in b/hack/golangci.yaml.in index 7094669400e..fdf09cf4968 100644 --- a/hack/golangci.yaml.in +++ b/hack/golangci.yaml.in @@ -148,6 +148,7 @@ linters: - revive - staticcheck - stylecheck + - testifylint - unused {{- if .Strict}} disable: @@ -206,3 +207,19 @@ linters-settings: # please keep this alphabetized checks: - "ST1019" # Importing the same package multiple times {{- end}} + testifylint: + enable-all: true + {{- if .Base}} + disable: # TODO: remove each disabled rule and fix it + - blank-import + - compares + - empty + - error-is-as + - error-nil + - expected-actual + - float-compare + - go-require + - len + - nil-compare + - require-error + {{- end}} diff --git a/pkg/controller/endpointslice/endpointslice_controller_test.go b/pkg/controller/endpointslice/endpointslice_controller_test.go index a6a2a22cd0d..0494cde95d8 100644 --- a/pkg/controller/endpointslice/endpointslice_controller_test.go +++ b/pkg/controller/endpointslice/endpointslice_controller_test.go @@ -1685,7 +1685,7 @@ func TestPodDeleteBatching(t *testing.T) { old, exists, err := esController.podStore.GetByKey(fmt.Sprintf("%s/%s", ns, update.podName)) assert.Nil(t, err, "error while retrieving old value of %q: %v", update.podName, err) - assert.Equal(t, true, exists, "pod should exist") + assert.True(t, exists, "pod should exist") esController.podStore.Delete(old) esController.deletePod(old) } diff --git a/pkg/kubelet/cm/dra/claiminfo_test.go b/pkg/kubelet/cm/dra/claiminfo_test.go index 58652cc605c..ffa2ce74aa1 100644 --- a/pkg/kubelet/cm/dra/claiminfo_test.go +++ b/pkg/kubelet/cm/dra/claiminfo_test.go @@ -488,7 +488,7 @@ func TestClaimInfoSetPrepared(t *testing.T) { } { t.Run(test.description, func(t *testing.T) { test.claimInfo.setPrepared() - assert.Equal(t, test.claimInfo.isPrepared(), true) + assert.True(t, test.claimInfo.isPrepared()) }) } } diff --git a/pkg/kubelet/container/helpers_test.go b/pkg/kubelet/container/helpers_test.go index d44c386164d..5e65734c70d 100644 --- a/pkg/kubelet/container/helpers_test.go +++ b/pkg/kubelet/container/helpers_test.go @@ -680,21 +680,21 @@ func TestShouldRecordEvent(t *testing.T) { } _, actual := innerEventRecorder.shouldRecordEvent(nil) - assert.Equal(t, false, actual) + assert.False(t, actual) var obj = &v1.ObjectReference{Namespace: "claimrefns", Name: "claimrefname"} _, actual = innerEventRecorder.shouldRecordEvent(obj) - assert.Equal(t, true, actual) + assert.True(t, actual) obj = &v1.ObjectReference{Namespace: "system", Name: "infra", FieldPath: "implicitly required container "} _, actual = innerEventRecorder.shouldRecordEvent(obj) - assert.Equal(t, false, actual) + assert.False(t, actual) var nilObj *v1.ObjectReference = nil _, actual = innerEventRecorder.shouldRecordEvent(nilObj) - assert.Equal(t, false, actual, "should not panic if the typed nil was used, see https://github.com/kubernetes/kubernetes/issues/95552") + assert.False(t, actual, "should not panic if the typed nil was used, see https://github.com/kubernetes/kubernetes/issues/95552") } func TestHasWindowsHostProcessContainer(t *testing.T) { diff --git a/pkg/kubelet/kubelet_getters_test.go b/pkg/kubelet/kubelet_getters_test.go index 71f95417201..b66bc23d2c6 100644 --- a/pkg/kubelet/kubelet_getters_test.go +++ b/pkg/kubelet/kubelet_getters_test.go @@ -121,14 +121,14 @@ func TestHandlerSupportsUserNamespaces(t *testing.T) { }) got, err := kubelet.HandlerSupportsUserNamespaces("has-support") - assert.Equal(t, true, got) + assert.True(t, got) assert.NoError(t, err) got, err = kubelet.HandlerSupportsUserNamespaces("has-no-support") - assert.Equal(t, false, got) + assert.False(t, got) assert.NoError(t, err) got, err = kubelet.HandlerSupportsUserNamespaces("unknown") - assert.Equal(t, false, got) + assert.False(t, got) assert.Error(t, err) } diff --git a/pkg/kubelet/kubelet_node_status_test.go b/pkg/kubelet/kubelet_node_status_test.go index 3a4db819069..906ec307a01 100644 --- a/pkg/kubelet/kubelet_node_status_test.go +++ b/pkg/kubelet/kubelet_node_status_test.go @@ -2732,8 +2732,7 @@ func TestRegisterWithApiServerWithTaint(t *testing.T) { Effect: v1.TaintEffectNoSchedule, } - require.Equal(t, - true, + require.True(t, taintutil.TaintExists(got.Spec.Taints, unschedulableTaint), "test unschedulable taint for TaintNodesByCondition") } diff --git a/pkg/kubelet/nodeshutdown/nodeshutdown_manager_linux_test.go b/pkg/kubelet/nodeshutdown/nodeshutdown_manager_linux_test.go index 9d36ef8797e..a0453a87627 100644 --- a/pkg/kubelet/nodeshutdown/nodeshutdown_manager_linux_test.go +++ b/pkg/kubelet/nodeshutdown/nodeshutdown_manager_linux_test.go @@ -364,7 +364,7 @@ func TestManager(t *testing.T) { assert.NoError(t, err, "expected manager.Start() to not return error") assert.True(t, fakeDbus.didInhibitShutdown, "expected that manager inhibited shutdown") assert.NoError(t, manager.ShutdownStatus(), "expected that manager does not return error since shutdown is not active") - assert.Equal(t, manager.Admit(nil).Admit, true) + assert.True(t, manager.Admit(nil).Admit) // Send fake shutdown event select { @@ -386,7 +386,7 @@ func TestManager(t *testing.T) { } assert.Error(t, manager.ShutdownStatus(), "expected that manager returns error since shutdown is active") - assert.Equal(t, manager.Admit(nil).Admit, false) + assert.False(t, manager.Admit(nil).Admit) assert.Equal(t, tc.expectedPodToGracePeriodOverride, killedPodsToGracePeriods) assert.Equal(t, tc.expectedDidOverrideInhibitDelay, fakeDbus.didOverrideInhibitDelay, "override system inhibit delay differs") if tc.expectedPodStatuses != nil { diff --git a/pkg/kubelet/pleg/evented_test.go b/pkg/kubelet/pleg/evented_test.go index 10ddb01dd1b..1b0ee548a95 100644 --- a/pkg/kubelet/pleg/evented_test.go +++ b/pkg/kubelet/pleg/evented_test.go @@ -58,7 +58,7 @@ func TestHealthyEventedPLEG(t *testing.T) { // test if healthy when event channel has 5 events isHealthy, err := pleg.Healthy() require.NoError(t, err) - assert.Equal(t, true, isHealthy) + assert.True(t, isHealthy) // send remaining 95 events and make channel out of capacity for _, event := range events[5:] { @@ -67,7 +67,7 @@ func TestHealthyEventedPLEG(t *testing.T) { // pleg is unhealthy when channel is out of capacity isHealthy, err = pleg.Healthy() require.Error(t, err) - assert.Equal(t, false, isHealthy) + assert.False(t, isHealthy) } func TestUpdateRunningPodMetric(t *testing.T) { diff --git a/pkg/kubelet/userns/userns_manager_test.go b/pkg/kubelet/userns/userns_manager_test.go index cd9520b2b7d..6aa497b6c5a 100644 --- a/pkg/kubelet/userns/userns_manager_test.go +++ b/pkg/kubelet/userns/userns_manager_test.go @@ -98,7 +98,7 @@ func TestUserNsManagerAllocate(t *testing.T) { allocated, length, err := m.allocateOne("one") assert.NoError(t, err) assert.Equal(t, userNsLength, int(length), "m.isSet(%d).length=%v", allocated, length) - assert.Equal(t, true, m.isSet(allocated), "m.isSet(%d)", allocated) + assert.True(t, m.isSet(allocated), "m.isSet(%d)", allocated) allocated2, length2, err := m.allocateOne("two") assert.NoError(t, err) @@ -114,8 +114,8 @@ func TestUserNsManagerAllocate(t *testing.T) { m.Release("one") m.Release("two") - assert.Equal(t, false, m.isSet(allocated), "m.isSet(%d)", allocated) - assert.Equal(t, false, m.isSet(allocated2), "m.nsSet(%d)", allocated2) + assert.False(t, m.isSet(allocated), "m.isSet(%d)", allocated) + assert.False(t, m.isSet(allocated2), "m.nsSet(%d)", allocated2) var allocs []uint32 for i := 0; i < 1000; i++ { @@ -128,14 +128,14 @@ func TestUserNsManagerAllocate(t *testing.T) { allocs = append(allocs, allocated) } for i, v := range allocs { - assert.Equal(t, true, m.isSet(v), "m.isSet(%d) should be true", v) + assert.True(t, m.isSet(v), "m.isSet(%d) should be true", v) m.Release(types.UID(fmt.Sprintf("%d", i))) - assert.Equal(t, false, m.isSet(v), "m.isSet(%d) should be false", v) + assert.False(t, m.isSet(v), "m.isSet(%d) should be false", v) err = m.record(types.UID(fmt.Sprintf("%d", i)), v, userNsLength) assert.NoError(t, err) m.Release(types.UID(fmt.Sprintf("%d", i))) - assert.Equal(t, false, m.isSet(v), "m.isSet(%d) should be false", v) + assert.False(t, m.isSet(v), "m.isSet(%d) should be false", v) } } diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go index ce65a09af5b..30e9427822c 100644 --- a/pkg/proxy/ipvs/proxier_test.go +++ b/pkg/proxy/ipvs/proxier_test.go @@ -4408,7 +4408,7 @@ func TestEndpointSliceE2E(t *testing.T) { assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"]) activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries assert.Equal(t, 1, activeEntries1.Len(), "Expected 1 active entry in KUBE-LOOP-BACK") - assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod") + assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod") virtualServers1, vsErr1 := ipvs.GetVirtualServers() assert.Nil(t, vsErr1, "Expected no error getting virtual servers") assert.Len(t, virtualServers1, 1, "Expected 1 virtual server") @@ -4465,7 +4465,7 @@ func TestHealthCheckNodePortE2E(t *testing.T) { assert.NotNil(t, fp.ipsetList["KUBE-HEALTH-CHECK-NODE-PORT"]) activeEntries1 := fp.ipsetList["KUBE-HEALTH-CHECK-NODE-PORT"].activeEntries assert.Equal(t, 1, activeEntries1.Len(), "Expected 1 active entry in KUBE-HEALTH-CHECK-NODE-PORT") - assert.Equal(t, true, activeEntries1.Has("30000"), "Expected activeEntries to reference hc node port in spec") + assert.True(t, activeEntries1.Has("30000"), "Expected activeEntries to reference hc node port in spec") // Update health check node port in the spec newSvc := svc @@ -4477,7 +4477,7 @@ func TestHealthCheckNodePortE2E(t *testing.T) { assert.NotNil(t, fp.ipsetList["KUBE-HEALTH-CHECK-NODE-PORT"]) activeEntries2 := fp.ipsetList["KUBE-HEALTH-CHECK-NODE-PORT"].activeEntries assert.Equal(t, 1, activeEntries2.Len(), "Expected 1 active entry in KUBE-HEALTH-CHECK-NODE-PORT") - assert.Equal(t, true, activeEntries2.Has("30001"), "Expected activeEntries to reference updated hc node port in spec") + assert.True(t, activeEntries2.Has("30001"), "Expected activeEntries to reference updated hc node port in spec") fp.OnServiceDelete(&svc) fp.syncProxyRules() @@ -4939,10 +4939,10 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) { assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"]) activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries assert.Equal(t, 4, activeEntries1.Len(), "Expected 4 active entry in KUBE-LOOP-BACK") - assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first pod") - assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second pod") - assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference third pod") - assert.Equal(t, true, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference fourth pod") + assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first pod") + assert.True(t, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second pod") + assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference third pod") + assert.True(t, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference fourth pod") virtualServers, vsErr := ipvs.GetVirtualServers() assert.Nil(t, vsErr, "Expected no error getting virtual servers") @@ -5112,10 +5112,10 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) { assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"]) activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries assert.Equal(t, 4, activeEntries1.Len(), "Expected 3 active entry in KUBE-LOOP-BACK") - assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod") - assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod") - assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod") - assert.Equal(t, true, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference second (local) pod") + assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod") + assert.True(t, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod") + assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod") + assert.True(t, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference second (local) pod") virtualServers, vsErr := ipvs.GetVirtualServers() assert.Nil(t, vsErr, "Expected no error getting virtual servers") @@ -5284,9 +5284,9 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) { assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"]) activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries assert.Equal(t, 3, activeEntries1.Len(), "Expected 3 active entry in KUBE-LOOP-BACK") - assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod") - assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod") - assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod") + assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod") + assert.True(t, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod") + assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod") virtualServers, vsErr := ipvs.GetVirtualServers() assert.Nil(t, vsErr, "Expected no error getting virtual servers") @@ -5456,9 +5456,9 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) { assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"]) activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries assert.Equal(t, 3, activeEntries1.Len(), "Expected 3 active entry in KUBE-LOOP-BACK") - assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod") - assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod") - assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod") + assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod") + assert.True(t, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod") + assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod") virtualServers, vsErr := ipvs.GetVirtualServers() assert.Nil(t, vsErr, "Expected no error getting virtual servers") diff --git a/staging/src/k8s.io/apimachinery/pkg/watch/mux_test.go b/staging/src/k8s.io/apimachinery/pkg/watch/mux_test.go index aa3f3b7bb6d..9b46ce77e98 100644 --- a/staging/src/k8s.io/apimachinery/pkg/watch/mux_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/watch/mux_test.go @@ -242,7 +242,7 @@ func TestBroadcasterSendEventAfterShutdown(t *testing.T) { assert.EqualError(t, err, "broadcaster already stopped", "ActionOrDrop should report error id broadcaster is shutdown") sendOnClosed, err := m.ActionOrDrop(event.Type, event.Object) - assert.Equal(t, sendOnClosed, false, "ActionOrDrop should return false if broadcaster is already shutdown") + assert.False(t, sendOnClosed, "ActionOrDrop should return false if broadcaster is already shutdown") assert.EqualError(t, err, "broadcaster already stopped", "ActionOrDrop should report error id broadcaster is shutdown") } diff --git a/staging/src/k8s.io/cri-client/pkg/logs/logs_test.go b/staging/src/k8s.io/cri-client/pkg/logs/logs_test.go index cf53d7a5f5f..00262cc3b41 100644 --- a/staging/src/k8s.io/cri-client/pkg/logs/logs_test.go +++ b/staging/src/k8s.io/cri-client/pkg/logs/logs_test.go @@ -535,7 +535,7 @@ func TestReadLogsLimitsWithTimestamps(t *testing.T) { // 2. The last item in the log should be 9999 _, err = time.Parse(time.RFC3339, string(ts)) assert.NoError(t, err, "timestamp not found") - assert.Equal(t, true, bytes.HasSuffix(logline, []byte("9999")), "is the complete log found") + assert.True(t, bytes.HasSuffix(logline, []byte("9999")), "is the complete log found") } assert.Equal(t, 2, lineCount, "should have two lines") diff --git a/test/integration/client/cert_rotation_test.go b/test/integration/client/cert_rotation_test.go index 41f9f45a446..4bf2e9b13f8 100644 --- a/test/integration/client/cert_rotation_test.go +++ b/test/integration/client/cert_rotation_test.go @@ -84,7 +84,7 @@ func TestCertRotation(t *testing.T) { // Should have had a rotation; connections will have been closed select { case _, ok := <-w.ResultChan(): - assert.Equal(t, false, ok) + assert.False(t, ok) default: t.Fatal("Watch wasn't closed despite rotation") }