fix: enable bool-compare rule from testifylint linter (#125135)

* fix: enable bool-compare rule from testifylint linter

Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>

* Update hack/golangci.yaml.in

Co-authored-by: Patrick Ohly <patrick.ohly@intel.com>

* Update golangci.yaml.in

* Update golangci-strict.yaml

* Update golangci.yaml.in

* Update golangci.yaml.in

* Update golangci.yaml.in

* Update golangci.yaml.in

* Update golangci.yaml

* Update golangci-hints.yaml

* Update golangci-strict.yaml

* Update golangci.yaml.in

* Update golangci.yaml

* Update mux_test.go

---------

Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com>
Co-authored-by: Patrick Ohly <patrick.ohly@intel.com>
This commit is contained in:
Matthieu MOREL 2024-06-28 19:58:05 +02:00 committed by GitHub
parent bcadbfcc55
commit 0cde5f1e28
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 78 additions and 41 deletions

View File

@ -90,6 +90,7 @@ linters:
- revive
- staticcheck
- stylecheck
- testifylint
- unused
linters-settings: # please keep this alphabetized
@ -179,3 +180,5 @@ linters-settings: # please keep this alphabetized
staticcheck:
checks:
- "all"
testifylint:
enable-all: true

View File

@ -133,6 +133,7 @@ linters:
- revive
- staticcheck
- stylecheck
- testifylint
- unused
disable:
# https://github.com/kubernetes/kubernetes/issues/117288#issuecomment-1507008359
@ -219,3 +220,5 @@ linters-settings: # please keep this alphabetized
staticcheck:
checks:
- "all"
testifylint:
enable-all: true

View File

@ -139,6 +139,7 @@ linters:
- revive
- staticcheck
- stylecheck
- testifylint
- unused
linters-settings: # please keep this alphabetized
@ -231,3 +232,17 @@ linters-settings: # please keep this alphabetized
stylecheck:
checks:
- "ST1019" # Importing the same package multiple times
testifylint:
enable-all: true
disable: # TODO: remove each disabled rule and fix it
- blank-import
- compares
- empty
- error-is-as
- error-nil
- expected-actual
- float-compare
- go-require
- len
- nil-compare
- require-error

View File

@ -148,6 +148,7 @@ linters:
- revive
- staticcheck
- stylecheck
- testifylint
- unused
{{- if .Strict}}
disable:
@ -206,3 +207,19 @@ linters-settings: # please keep this alphabetized
checks:
- "ST1019" # Importing the same package multiple times
{{- end}}
testifylint:
enable-all: true
{{- if .Base}}
disable: # TODO: remove each disabled rule and fix it
- blank-import
- compares
- empty
- error-is-as
- error-nil
- expected-actual
- float-compare
- go-require
- len
- nil-compare
- require-error
{{- end}}

View File

@ -1685,7 +1685,7 @@ func TestPodDeleteBatching(t *testing.T) {
old, exists, err := esController.podStore.GetByKey(fmt.Sprintf("%s/%s", ns, update.podName))
assert.Nil(t, err, "error while retrieving old value of %q: %v", update.podName, err)
assert.Equal(t, true, exists, "pod should exist")
assert.True(t, exists, "pod should exist")
esController.podStore.Delete(old)
esController.deletePod(old)
}

View File

@ -488,7 +488,7 @@ func TestClaimInfoSetPrepared(t *testing.T) {
} {
t.Run(test.description, func(t *testing.T) {
test.claimInfo.setPrepared()
assert.Equal(t, test.claimInfo.isPrepared(), true)
assert.True(t, test.claimInfo.isPrepared())
})
}
}

View File

@ -680,21 +680,21 @@ func TestShouldRecordEvent(t *testing.T) {
}
_, actual := innerEventRecorder.shouldRecordEvent(nil)
assert.Equal(t, false, actual)
assert.False(t, actual)
var obj = &v1.ObjectReference{Namespace: "claimrefns", Name: "claimrefname"}
_, actual = innerEventRecorder.shouldRecordEvent(obj)
assert.Equal(t, true, actual)
assert.True(t, actual)
obj = &v1.ObjectReference{Namespace: "system", Name: "infra", FieldPath: "implicitly required container "}
_, actual = innerEventRecorder.shouldRecordEvent(obj)
assert.Equal(t, false, actual)
assert.False(t, actual)
var nilObj *v1.ObjectReference = nil
_, actual = innerEventRecorder.shouldRecordEvent(nilObj)
assert.Equal(t, false, actual, "should not panic if the typed nil was used, see https://github.com/kubernetes/kubernetes/issues/95552")
assert.False(t, actual, "should not panic if the typed nil was used, see https://github.com/kubernetes/kubernetes/issues/95552")
}
func TestHasWindowsHostProcessContainer(t *testing.T) {

View File

@ -121,14 +121,14 @@ func TestHandlerSupportsUserNamespaces(t *testing.T) {
})
got, err := kubelet.HandlerSupportsUserNamespaces("has-support")
assert.Equal(t, true, got)
assert.True(t, got)
assert.NoError(t, err)
got, err = kubelet.HandlerSupportsUserNamespaces("has-no-support")
assert.Equal(t, false, got)
assert.False(t, got)
assert.NoError(t, err)
got, err = kubelet.HandlerSupportsUserNamespaces("unknown")
assert.Equal(t, false, got)
assert.False(t, got)
assert.Error(t, err)
}

View File

@ -2732,8 +2732,7 @@ func TestRegisterWithApiServerWithTaint(t *testing.T) {
Effect: v1.TaintEffectNoSchedule,
}
require.Equal(t,
true,
require.True(t,
taintutil.TaintExists(got.Spec.Taints, unschedulableTaint),
"test unschedulable taint for TaintNodesByCondition")
}

View File

@ -364,7 +364,7 @@ func TestManager(t *testing.T) {
assert.NoError(t, err, "expected manager.Start() to not return error")
assert.True(t, fakeDbus.didInhibitShutdown, "expected that manager inhibited shutdown")
assert.NoError(t, manager.ShutdownStatus(), "expected that manager does not return error since shutdown is not active")
assert.Equal(t, manager.Admit(nil).Admit, true)
assert.True(t, manager.Admit(nil).Admit)
// Send fake shutdown event
select {
@ -386,7 +386,7 @@ func TestManager(t *testing.T) {
}
assert.Error(t, manager.ShutdownStatus(), "expected that manager returns error since shutdown is active")
assert.Equal(t, manager.Admit(nil).Admit, false)
assert.False(t, manager.Admit(nil).Admit)
assert.Equal(t, tc.expectedPodToGracePeriodOverride, killedPodsToGracePeriods)
assert.Equal(t, tc.expectedDidOverrideInhibitDelay, fakeDbus.didOverrideInhibitDelay, "override system inhibit delay differs")
if tc.expectedPodStatuses != nil {

View File

@ -58,7 +58,7 @@ func TestHealthyEventedPLEG(t *testing.T) {
// test if healthy when event channel has 5 events
isHealthy, err := pleg.Healthy()
require.NoError(t, err)
assert.Equal(t, true, isHealthy)
assert.True(t, isHealthy)
// send remaining 95 events and make channel out of capacity
for _, event := range events[5:] {
@ -67,7 +67,7 @@ func TestHealthyEventedPLEG(t *testing.T) {
// pleg is unhealthy when channel is out of capacity
isHealthy, err = pleg.Healthy()
require.Error(t, err)
assert.Equal(t, false, isHealthy)
assert.False(t, isHealthy)
}
func TestUpdateRunningPodMetric(t *testing.T) {

View File

@ -98,7 +98,7 @@ func TestUserNsManagerAllocate(t *testing.T) {
allocated, length, err := m.allocateOne("one")
assert.NoError(t, err)
assert.Equal(t, userNsLength, int(length), "m.isSet(%d).length=%v", allocated, length)
assert.Equal(t, true, m.isSet(allocated), "m.isSet(%d)", allocated)
assert.True(t, m.isSet(allocated), "m.isSet(%d)", allocated)
allocated2, length2, err := m.allocateOne("two")
assert.NoError(t, err)
@ -114,8 +114,8 @@ func TestUserNsManagerAllocate(t *testing.T) {
m.Release("one")
m.Release("two")
assert.Equal(t, false, m.isSet(allocated), "m.isSet(%d)", allocated)
assert.Equal(t, false, m.isSet(allocated2), "m.nsSet(%d)", allocated2)
assert.False(t, m.isSet(allocated), "m.isSet(%d)", allocated)
assert.False(t, m.isSet(allocated2), "m.nsSet(%d)", allocated2)
var allocs []uint32
for i := 0; i < 1000; i++ {
@ -128,14 +128,14 @@ func TestUserNsManagerAllocate(t *testing.T) {
allocs = append(allocs, allocated)
}
for i, v := range allocs {
assert.Equal(t, true, m.isSet(v), "m.isSet(%d) should be true", v)
assert.True(t, m.isSet(v), "m.isSet(%d) should be true", v)
m.Release(types.UID(fmt.Sprintf("%d", i)))
assert.Equal(t, false, m.isSet(v), "m.isSet(%d) should be false", v)
assert.False(t, m.isSet(v), "m.isSet(%d) should be false", v)
err = m.record(types.UID(fmt.Sprintf("%d", i)), v, userNsLength)
assert.NoError(t, err)
m.Release(types.UID(fmt.Sprintf("%d", i)))
assert.Equal(t, false, m.isSet(v), "m.isSet(%d) should be false", v)
assert.False(t, m.isSet(v), "m.isSet(%d) should be false", v)
}
}

View File

@ -4408,7 +4408,7 @@ func TestEndpointSliceE2E(t *testing.T) {
assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"])
activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries
assert.Equal(t, 1, activeEntries1.Len(), "Expected 1 active entry in KUBE-LOOP-BACK")
assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
virtualServers1, vsErr1 := ipvs.GetVirtualServers()
assert.Nil(t, vsErr1, "Expected no error getting virtual servers")
assert.Len(t, virtualServers1, 1, "Expected 1 virtual server")
@ -4465,7 +4465,7 @@ func TestHealthCheckNodePortE2E(t *testing.T) {
assert.NotNil(t, fp.ipsetList["KUBE-HEALTH-CHECK-NODE-PORT"])
activeEntries1 := fp.ipsetList["KUBE-HEALTH-CHECK-NODE-PORT"].activeEntries
assert.Equal(t, 1, activeEntries1.Len(), "Expected 1 active entry in KUBE-HEALTH-CHECK-NODE-PORT")
assert.Equal(t, true, activeEntries1.Has("30000"), "Expected activeEntries to reference hc node port in spec")
assert.True(t, activeEntries1.Has("30000"), "Expected activeEntries to reference hc node port in spec")
// Update health check node port in the spec
newSvc := svc
@ -4477,7 +4477,7 @@ func TestHealthCheckNodePortE2E(t *testing.T) {
assert.NotNil(t, fp.ipsetList["KUBE-HEALTH-CHECK-NODE-PORT"])
activeEntries2 := fp.ipsetList["KUBE-HEALTH-CHECK-NODE-PORT"].activeEntries
assert.Equal(t, 1, activeEntries2.Len(), "Expected 1 active entry in KUBE-HEALTH-CHECK-NODE-PORT")
assert.Equal(t, true, activeEntries2.Has("30001"), "Expected activeEntries to reference updated hc node port in spec")
assert.True(t, activeEntries2.Has("30001"), "Expected activeEntries to reference updated hc node port in spec")
fp.OnServiceDelete(&svc)
fp.syncProxyRules()
@ -4939,10 +4939,10 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) {
assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"])
activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries
assert.Equal(t, 4, activeEntries1.Len(), "Expected 4 active entry in KUBE-LOOP-BACK")
assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first pod")
assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second pod")
assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference third pod")
assert.Equal(t, true, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference fourth pod")
assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first pod")
assert.True(t, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second pod")
assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference third pod")
assert.True(t, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference fourth pod")
virtualServers, vsErr := ipvs.GetVirtualServers()
assert.Nil(t, vsErr, "Expected no error getting virtual servers")
@ -5112,10 +5112,10 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) {
assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"])
activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries
assert.Equal(t, 4, activeEntries1.Len(), "Expected 3 active entry in KUBE-LOOP-BACK")
assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod")
assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
assert.Equal(t, true, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference second (local) pod")
assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
assert.True(t, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod")
assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
assert.True(t, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference second (local) pod")
virtualServers, vsErr := ipvs.GetVirtualServers()
assert.Nil(t, vsErr, "Expected no error getting virtual servers")
@ -5284,9 +5284,9 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) {
assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"])
activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries
assert.Equal(t, 3, activeEntries1.Len(), "Expected 3 active entry in KUBE-LOOP-BACK")
assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod")
assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
assert.True(t, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod")
assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
virtualServers, vsErr := ipvs.GetVirtualServers()
assert.Nil(t, vsErr, "Expected no error getting virtual servers")
@ -5456,9 +5456,9 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) {
assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"])
activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries
assert.Equal(t, 3, activeEntries1.Len(), "Expected 3 active entry in KUBE-LOOP-BACK")
assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod")
assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
assert.True(t, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod")
assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
virtualServers, vsErr := ipvs.GetVirtualServers()
assert.Nil(t, vsErr, "Expected no error getting virtual servers")

View File

@ -242,7 +242,7 @@ func TestBroadcasterSendEventAfterShutdown(t *testing.T) {
assert.EqualError(t, err, "broadcaster already stopped", "ActionOrDrop should report error id broadcaster is shutdown")
sendOnClosed, err := m.ActionOrDrop(event.Type, event.Object)
assert.Equal(t, sendOnClosed, false, "ActionOrDrop should return false if broadcaster is already shutdown")
assert.False(t, sendOnClosed, "ActionOrDrop should return false if broadcaster is already shutdown")
assert.EqualError(t, err, "broadcaster already stopped", "ActionOrDrop should report error id broadcaster is shutdown")
}

View File

@ -535,7 +535,7 @@ func TestReadLogsLimitsWithTimestamps(t *testing.T) {
// 2. The last item in the log should be 9999
_, err = time.Parse(time.RFC3339, string(ts))
assert.NoError(t, err, "timestamp not found")
assert.Equal(t, true, bytes.HasSuffix(logline, []byte("9999")), "is the complete log found")
assert.True(t, bytes.HasSuffix(logline, []byte("9999")), "is the complete log found")
}
assert.Equal(t, 2, lineCount, "should have two lines")

View File

@ -84,7 +84,7 @@ func TestCertRotation(t *testing.T) {
// Should have had a rotation; connections will have been closed
select {
case _, ok := <-w.ResultChan():
assert.Equal(t, false, ok)
assert.False(t, ok)
default:
t.Fatal("Watch wasn't closed despite rotation")
}