mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-29 14:37:00 +00:00
fix: enable bool-compare rule from testifylint linter (#125135)
* fix: enable bool-compare rule from testifylint linter Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> * Update hack/golangci.yaml.in Co-authored-by: Patrick Ohly <patrick.ohly@intel.com> * Update golangci.yaml.in * Update golangci-strict.yaml * Update golangci.yaml.in * Update golangci.yaml.in * Update golangci.yaml.in * Update golangci.yaml.in * Update golangci.yaml * Update golangci-hints.yaml * Update golangci-strict.yaml * Update golangci.yaml.in * Update golangci.yaml * Update mux_test.go --------- Signed-off-by: Matthieu MOREL <matthieu.morel35@gmail.com> Co-authored-by: Patrick Ohly <patrick.ohly@intel.com>
This commit is contained in:
parent
bcadbfcc55
commit
0cde5f1e28
@ -90,6 +90,7 @@ linters:
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- testifylint
|
||||
- unused
|
||||
|
||||
linters-settings: # please keep this alphabetized
|
||||
@ -179,3 +180,5 @@ linters-settings: # please keep this alphabetized
|
||||
staticcheck:
|
||||
checks:
|
||||
- "all"
|
||||
testifylint:
|
||||
enable-all: true
|
||||
|
@ -133,6 +133,7 @@ linters:
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- testifylint
|
||||
- unused
|
||||
disable:
|
||||
# https://github.com/kubernetes/kubernetes/issues/117288#issuecomment-1507008359
|
||||
@ -219,3 +220,5 @@ linters-settings: # please keep this alphabetized
|
||||
staticcheck:
|
||||
checks:
|
||||
- "all"
|
||||
testifylint:
|
||||
enable-all: true
|
||||
|
@ -139,6 +139,7 @@ linters:
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- testifylint
|
||||
- unused
|
||||
|
||||
linters-settings: # please keep this alphabetized
|
||||
@ -231,3 +232,17 @@ linters-settings: # please keep this alphabetized
|
||||
stylecheck:
|
||||
checks:
|
||||
- "ST1019" # Importing the same package multiple times
|
||||
testifylint:
|
||||
enable-all: true
|
||||
disable: # TODO: remove each disabled rule and fix it
|
||||
- blank-import
|
||||
- compares
|
||||
- empty
|
||||
- error-is-as
|
||||
- error-nil
|
||||
- expected-actual
|
||||
- float-compare
|
||||
- go-require
|
||||
- len
|
||||
- nil-compare
|
||||
- require-error
|
||||
|
@ -148,6 +148,7 @@ linters:
|
||||
- revive
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- testifylint
|
||||
- unused
|
||||
{{- if .Strict}}
|
||||
disable:
|
||||
@ -206,3 +207,19 @@ linters-settings: # please keep this alphabetized
|
||||
checks:
|
||||
- "ST1019" # Importing the same package multiple times
|
||||
{{- end}}
|
||||
testifylint:
|
||||
enable-all: true
|
||||
{{- if .Base}}
|
||||
disable: # TODO: remove each disabled rule and fix it
|
||||
- blank-import
|
||||
- compares
|
||||
- empty
|
||||
- error-is-as
|
||||
- error-nil
|
||||
- expected-actual
|
||||
- float-compare
|
||||
- go-require
|
||||
- len
|
||||
- nil-compare
|
||||
- require-error
|
||||
{{- end}}
|
||||
|
@ -1685,7 +1685,7 @@ func TestPodDeleteBatching(t *testing.T) {
|
||||
|
||||
old, exists, err := esController.podStore.GetByKey(fmt.Sprintf("%s/%s", ns, update.podName))
|
||||
assert.Nil(t, err, "error while retrieving old value of %q: %v", update.podName, err)
|
||||
assert.Equal(t, true, exists, "pod should exist")
|
||||
assert.True(t, exists, "pod should exist")
|
||||
esController.podStore.Delete(old)
|
||||
esController.deletePod(old)
|
||||
}
|
||||
|
@ -488,7 +488,7 @@ func TestClaimInfoSetPrepared(t *testing.T) {
|
||||
} {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
test.claimInfo.setPrepared()
|
||||
assert.Equal(t, test.claimInfo.isPrepared(), true)
|
||||
assert.True(t, test.claimInfo.isPrepared())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -680,21 +680,21 @@ func TestShouldRecordEvent(t *testing.T) {
|
||||
}
|
||||
|
||||
_, actual := innerEventRecorder.shouldRecordEvent(nil)
|
||||
assert.Equal(t, false, actual)
|
||||
assert.False(t, actual)
|
||||
|
||||
var obj = &v1.ObjectReference{Namespace: "claimrefns", Name: "claimrefname"}
|
||||
|
||||
_, actual = innerEventRecorder.shouldRecordEvent(obj)
|
||||
assert.Equal(t, true, actual)
|
||||
assert.True(t, actual)
|
||||
|
||||
obj = &v1.ObjectReference{Namespace: "system", Name: "infra", FieldPath: "implicitly required container "}
|
||||
|
||||
_, actual = innerEventRecorder.shouldRecordEvent(obj)
|
||||
assert.Equal(t, false, actual)
|
||||
assert.False(t, actual)
|
||||
|
||||
var nilObj *v1.ObjectReference = nil
|
||||
_, actual = innerEventRecorder.shouldRecordEvent(nilObj)
|
||||
assert.Equal(t, false, actual, "should not panic if the typed nil was used, see https://github.com/kubernetes/kubernetes/issues/95552")
|
||||
assert.False(t, actual, "should not panic if the typed nil was used, see https://github.com/kubernetes/kubernetes/issues/95552")
|
||||
}
|
||||
|
||||
func TestHasWindowsHostProcessContainer(t *testing.T) {
|
||||
|
@ -121,14 +121,14 @@ func TestHandlerSupportsUserNamespaces(t *testing.T) {
|
||||
})
|
||||
|
||||
got, err := kubelet.HandlerSupportsUserNamespaces("has-support")
|
||||
assert.Equal(t, true, got)
|
||||
assert.True(t, got)
|
||||
assert.NoError(t, err)
|
||||
|
||||
got, err = kubelet.HandlerSupportsUserNamespaces("has-no-support")
|
||||
assert.Equal(t, false, got)
|
||||
assert.False(t, got)
|
||||
assert.NoError(t, err)
|
||||
|
||||
got, err = kubelet.HandlerSupportsUserNamespaces("unknown")
|
||||
assert.Equal(t, false, got)
|
||||
assert.False(t, got)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
@ -2732,8 +2732,7 @@ func TestRegisterWithApiServerWithTaint(t *testing.T) {
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
|
||||
require.Equal(t,
|
||||
true,
|
||||
require.True(t,
|
||||
taintutil.TaintExists(got.Spec.Taints, unschedulableTaint),
|
||||
"test unschedulable taint for TaintNodesByCondition")
|
||||
}
|
||||
|
@ -364,7 +364,7 @@ func TestManager(t *testing.T) {
|
||||
assert.NoError(t, err, "expected manager.Start() to not return error")
|
||||
assert.True(t, fakeDbus.didInhibitShutdown, "expected that manager inhibited shutdown")
|
||||
assert.NoError(t, manager.ShutdownStatus(), "expected that manager does not return error since shutdown is not active")
|
||||
assert.Equal(t, manager.Admit(nil).Admit, true)
|
||||
assert.True(t, manager.Admit(nil).Admit)
|
||||
|
||||
// Send fake shutdown event
|
||||
select {
|
||||
@ -386,7 +386,7 @@ func TestManager(t *testing.T) {
|
||||
}
|
||||
|
||||
assert.Error(t, manager.ShutdownStatus(), "expected that manager returns error since shutdown is active")
|
||||
assert.Equal(t, manager.Admit(nil).Admit, false)
|
||||
assert.False(t, manager.Admit(nil).Admit)
|
||||
assert.Equal(t, tc.expectedPodToGracePeriodOverride, killedPodsToGracePeriods)
|
||||
assert.Equal(t, tc.expectedDidOverrideInhibitDelay, fakeDbus.didOverrideInhibitDelay, "override system inhibit delay differs")
|
||||
if tc.expectedPodStatuses != nil {
|
||||
|
@ -58,7 +58,7 @@ func TestHealthyEventedPLEG(t *testing.T) {
|
||||
// test if healthy when event channel has 5 events
|
||||
isHealthy, err := pleg.Healthy()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, true, isHealthy)
|
||||
assert.True(t, isHealthy)
|
||||
|
||||
// send remaining 95 events and make channel out of capacity
|
||||
for _, event := range events[5:] {
|
||||
@ -67,7 +67,7 @@ func TestHealthyEventedPLEG(t *testing.T) {
|
||||
// pleg is unhealthy when channel is out of capacity
|
||||
isHealthy, err = pleg.Healthy()
|
||||
require.Error(t, err)
|
||||
assert.Equal(t, false, isHealthy)
|
||||
assert.False(t, isHealthy)
|
||||
}
|
||||
|
||||
func TestUpdateRunningPodMetric(t *testing.T) {
|
||||
|
@ -98,7 +98,7 @@ func TestUserNsManagerAllocate(t *testing.T) {
|
||||
allocated, length, err := m.allocateOne("one")
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, userNsLength, int(length), "m.isSet(%d).length=%v", allocated, length)
|
||||
assert.Equal(t, true, m.isSet(allocated), "m.isSet(%d)", allocated)
|
||||
assert.True(t, m.isSet(allocated), "m.isSet(%d)", allocated)
|
||||
|
||||
allocated2, length2, err := m.allocateOne("two")
|
||||
assert.NoError(t, err)
|
||||
@ -114,8 +114,8 @@ func TestUserNsManagerAllocate(t *testing.T) {
|
||||
|
||||
m.Release("one")
|
||||
m.Release("two")
|
||||
assert.Equal(t, false, m.isSet(allocated), "m.isSet(%d)", allocated)
|
||||
assert.Equal(t, false, m.isSet(allocated2), "m.nsSet(%d)", allocated2)
|
||||
assert.False(t, m.isSet(allocated), "m.isSet(%d)", allocated)
|
||||
assert.False(t, m.isSet(allocated2), "m.nsSet(%d)", allocated2)
|
||||
|
||||
var allocs []uint32
|
||||
for i := 0; i < 1000; i++ {
|
||||
@ -128,14 +128,14 @@ func TestUserNsManagerAllocate(t *testing.T) {
|
||||
allocs = append(allocs, allocated)
|
||||
}
|
||||
for i, v := range allocs {
|
||||
assert.Equal(t, true, m.isSet(v), "m.isSet(%d) should be true", v)
|
||||
assert.True(t, m.isSet(v), "m.isSet(%d) should be true", v)
|
||||
m.Release(types.UID(fmt.Sprintf("%d", i)))
|
||||
assert.Equal(t, false, m.isSet(v), "m.isSet(%d) should be false", v)
|
||||
assert.False(t, m.isSet(v), "m.isSet(%d) should be false", v)
|
||||
|
||||
err = m.record(types.UID(fmt.Sprintf("%d", i)), v, userNsLength)
|
||||
assert.NoError(t, err)
|
||||
m.Release(types.UID(fmt.Sprintf("%d", i)))
|
||||
assert.Equal(t, false, m.isSet(v), "m.isSet(%d) should be false", v)
|
||||
assert.False(t, m.isSet(v), "m.isSet(%d) should be false", v)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -4408,7 +4408,7 @@ func TestEndpointSliceE2E(t *testing.T) {
|
||||
assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"])
|
||||
activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries
|
||||
assert.Equal(t, 1, activeEntries1.Len(), "Expected 1 active entry in KUBE-LOOP-BACK")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
|
||||
virtualServers1, vsErr1 := ipvs.GetVirtualServers()
|
||||
assert.Nil(t, vsErr1, "Expected no error getting virtual servers")
|
||||
assert.Len(t, virtualServers1, 1, "Expected 1 virtual server")
|
||||
@ -4465,7 +4465,7 @@ func TestHealthCheckNodePortE2E(t *testing.T) {
|
||||
assert.NotNil(t, fp.ipsetList["KUBE-HEALTH-CHECK-NODE-PORT"])
|
||||
activeEntries1 := fp.ipsetList["KUBE-HEALTH-CHECK-NODE-PORT"].activeEntries
|
||||
assert.Equal(t, 1, activeEntries1.Len(), "Expected 1 active entry in KUBE-HEALTH-CHECK-NODE-PORT")
|
||||
assert.Equal(t, true, activeEntries1.Has("30000"), "Expected activeEntries to reference hc node port in spec")
|
||||
assert.True(t, activeEntries1.Has("30000"), "Expected activeEntries to reference hc node port in spec")
|
||||
|
||||
// Update health check node port in the spec
|
||||
newSvc := svc
|
||||
@ -4477,7 +4477,7 @@ func TestHealthCheckNodePortE2E(t *testing.T) {
|
||||
assert.NotNil(t, fp.ipsetList["KUBE-HEALTH-CHECK-NODE-PORT"])
|
||||
activeEntries2 := fp.ipsetList["KUBE-HEALTH-CHECK-NODE-PORT"].activeEntries
|
||||
assert.Equal(t, 1, activeEntries2.Len(), "Expected 1 active entry in KUBE-HEALTH-CHECK-NODE-PORT")
|
||||
assert.Equal(t, true, activeEntries2.Has("30001"), "Expected activeEntries to reference updated hc node port in spec")
|
||||
assert.True(t, activeEntries2.Has("30001"), "Expected activeEntries to reference updated hc node port in spec")
|
||||
|
||||
fp.OnServiceDelete(&svc)
|
||||
fp.syncProxyRules()
|
||||
@ -4939,10 +4939,10 @@ func Test_EndpointSliceReadyAndTerminatingCluster(t *testing.T) {
|
||||
assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"])
|
||||
activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries
|
||||
assert.Equal(t, 4, activeEntries1.Len(), "Expected 4 active entry in KUBE-LOOP-BACK")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first pod")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second pod")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference third pod")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference fourth pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference third pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference fourth pod")
|
||||
|
||||
virtualServers, vsErr := ipvs.GetVirtualServers()
|
||||
assert.Nil(t, vsErr, "Expected no error getting virtual servers")
|
||||
@ -5112,10 +5112,10 @@ func Test_EndpointSliceReadyAndTerminatingLocal(t *testing.T) {
|
||||
assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"])
|
||||
activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries
|
||||
assert.Equal(t, 4, activeEntries1.Len(), "Expected 3 active entry in KUBE-LOOP-BACK")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference second (local) pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.4,tcp:80,10.0.1.4"), "Expected activeEntries to reference second (local) pod")
|
||||
|
||||
virtualServers, vsErr := ipvs.GetVirtualServers()
|
||||
assert.Nil(t, vsErr, "Expected no error getting virtual servers")
|
||||
@ -5284,9 +5284,9 @@ func Test_EndpointSliceOnlyReadyAndTerminatingCluster(t *testing.T) {
|
||||
assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"])
|
||||
activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries
|
||||
assert.Equal(t, 3, activeEntries1.Len(), "Expected 3 active entry in KUBE-LOOP-BACK")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
|
||||
|
||||
virtualServers, vsErr := ipvs.GetVirtualServers()
|
||||
assert.Nil(t, vsErr, "Expected no error getting virtual servers")
|
||||
@ -5456,9 +5456,9 @@ func Test_EndpointSliceOnlyReadyAndTerminatingLocal(t *testing.T) {
|
||||
assert.NotNil(t, fp.ipsetList["KUBE-LOOP-BACK"])
|
||||
activeEntries1 := fp.ipsetList["KUBE-LOOP-BACK"].activeEntries
|
||||
assert.Equal(t, 3, activeEntries1.Len(), "Expected 3 active entry in KUBE-LOOP-BACK")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod")
|
||||
assert.Equal(t, true, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.1,tcp:80,10.0.1.1"), "Expected activeEntries to reference first (local) pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.2,tcp:80,10.0.1.2"), "Expected activeEntries to reference second (local) pod")
|
||||
assert.True(t, activeEntries1.Has("10.0.1.3,tcp:80,10.0.1.3"), "Expected activeEntries to reference second (local) pod")
|
||||
|
||||
virtualServers, vsErr := ipvs.GetVirtualServers()
|
||||
assert.Nil(t, vsErr, "Expected no error getting virtual servers")
|
||||
|
@ -242,7 +242,7 @@ func TestBroadcasterSendEventAfterShutdown(t *testing.T) {
|
||||
assert.EqualError(t, err, "broadcaster already stopped", "ActionOrDrop should report error id broadcaster is shutdown")
|
||||
|
||||
sendOnClosed, err := m.ActionOrDrop(event.Type, event.Object)
|
||||
assert.Equal(t, sendOnClosed, false, "ActionOrDrop should return false if broadcaster is already shutdown")
|
||||
assert.False(t, sendOnClosed, "ActionOrDrop should return false if broadcaster is already shutdown")
|
||||
assert.EqualError(t, err, "broadcaster already stopped", "ActionOrDrop should report error id broadcaster is shutdown")
|
||||
}
|
||||
|
||||
|
@ -535,7 +535,7 @@ func TestReadLogsLimitsWithTimestamps(t *testing.T) {
|
||||
// 2. The last item in the log should be 9999
|
||||
_, err = time.Parse(time.RFC3339, string(ts))
|
||||
assert.NoError(t, err, "timestamp not found")
|
||||
assert.Equal(t, true, bytes.HasSuffix(logline, []byte("9999")), "is the complete log found")
|
||||
assert.True(t, bytes.HasSuffix(logline, []byte("9999")), "is the complete log found")
|
||||
}
|
||||
|
||||
assert.Equal(t, 2, lineCount, "should have two lines")
|
||||
|
@ -84,7 +84,7 @@ func TestCertRotation(t *testing.T) {
|
||||
// Should have had a rotation; connections will have been closed
|
||||
select {
|
||||
case _, ok := <-w.ResultChan():
|
||||
assert.Equal(t, false, ok)
|
||||
assert.False(t, ok)
|
||||
default:
|
||||
t.Fatal("Watch wasn't closed despite rotation")
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user