From b35357b6c08f21ba0fd312536051394c2567ec79 Mon Sep 17 00:00:00 2001 From: "tao.yang" Date: Mon, 4 Sep 2023 16:59:23 +0800 Subject: [PATCH] cleanup: omit comparison with bool constants Signed-off-by: tao.yang --- pkg/apis/core/validation/validation.go | 2 +- pkg/kubelet/kuberuntime/kuberuntime_manager.go | 2 +- pkg/kubelet/userns/userns_manager.go | 2 +- pkg/probe/exec/exec_test.go | 4 ++-- pkg/proxy/util/utils_test.go | 2 +- pkg/registry/core/service/storage/storage_test.go | 2 +- pkg/scheduler/internal/heap/heap_test.go | 6 +++--- plugin/pkg/admission/gc/gc_admission.go | 2 +- plugin/pkg/admission/imagepolicy/config_test.go | 4 ++-- .../pkg/apis/apiextensions/validation/validation.go | 8 ++++---- .../k8s.io/apimachinery/pkg/util/net/port_range_test.go | 4 ++-- .../apiserver/pkg/server/egressselector/config_test.go | 4 ++-- .../k8s.io/apiserver/pkg/server/options/tracing_test.go | 4 ++-- .../apiserver/pkg/storage/cacher/cache_watcher_test.go | 2 +- staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go | 6 +++--- .../k8s.io/cli-runtime/pkg/resource/crd_finder_test.go | 2 +- staging/src/k8s.io/client-go/tools/cache/heap_test.go | 4 ++-- .../k8s.io/cloud-provider/volume/helpers/zones_test.go | 2 +- .../client-gen/generators/fake/generator_fake_for_type.go | 2 +- .../cmd/register-gen/generators/packages.go | 2 +- .../k8s.io/component-base/tracing/api/v1/config_test.go | 4 ++-- staging/src/k8s.io/kubectl/pkg/generate/generate_test.go | 4 ++-- .../legacy-cloud-providers/azure/azure_loadbalancer.go | 2 +- .../src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go | 6 +++--- .../k8s.io/legacy-cloud-providers/vsphere/nodemanager.go | 4 ++-- test/e2e/node/pods.go | 2 +- test/e2e/storage/vsphere/vsphere_volume_diskformat.go | 6 +++--- .../apiserver/admissionwebhook/match_conditions_test.go | 4 ++-- test/integration/evictions/evictions_test.go | 4 ++-- test/integration/node/lifecycle_test.go | 4 ++-- test/integration/scheduler/preemption/preemption_test.go | 2 +- test/integration/scheduler/scheduler_test.go | 2 +- test/integration/tls/ciphers_test.go | 4 ++-- 33 files changed, 57 insertions(+), 57 deletions(-) diff --git a/pkg/apis/core/validation/validation.go b/pkg/apis/core/validation/validation.go index d66823eeb85..2e91edf44e3 100644 --- a/pkg/apis/core/validation/validation.go +++ b/pkg/apis/core/validation/validation.go @@ -3365,7 +3365,7 @@ func validateHostUsers(spec *core.PodSpec, fldPath *field.Path) field.ErrorList // Only make the following checks if hostUsers is false (otherwise, the container uses the // same userns as the host, and so there isn't anything to check). - if spec.SecurityContext == nil || spec.SecurityContext.HostUsers == nil || *spec.SecurityContext.HostUsers == true { + if spec.SecurityContext == nil || spec.SecurityContext.HostUsers == nil || *spec.SecurityContext.HostUsers { return allErrs } diff --git a/pkg/kubelet/kuberuntime/kuberuntime_manager.go b/pkg/kubelet/kuberuntime/kuberuntime_manager.go index f60b8a5ecc4..ec5ac1aefc6 100644 --- a/pkg/kubelet/kuberuntime/kuberuntime_manager.go +++ b/pkg/kubelet/kuberuntime/kuberuntime_manager.go @@ -672,7 +672,7 @@ func (m *kubeGenericRuntimeManager) doPodResizeAction(pod *v1.Pod, podStatus *ku switch rName { case v1.ResourceCPU: podCpuResources := &cm.ResourceConfig{CPUPeriod: podResources.CPUPeriod} - if setLimitValue == true { + if setLimitValue { podCpuResources.CPUQuota = podResources.CPUQuota } else { podCpuResources.CPUShares = podResources.CPUShares diff --git a/pkg/kubelet/userns/userns_manager.go b/pkg/kubelet/userns/userns_manager.go index ffd23630f13..06ebf8d912f 100644 --- a/pkg/kubelet/userns/userns_manager.go +++ b/pkg/kubelet/userns/userns_manager.go @@ -374,7 +374,7 @@ func (m *UsernsManager) GetOrCreateUserNamespaceMappings(pod *v1.Pod) (*runtimea m.lock.Lock() defer m.lock.Unlock() - if pod.Spec.HostUsers == nil || *pod.Spec.HostUsers == true { + if pod.Spec.HostUsers == nil || *pod.Spec.HostUsers { return &runtimeapi.UserNamespace{ Mode: runtimeapi.NamespaceMode_NODE, }, nil diff --git a/pkg/probe/exec/exec_test.go b/pkg/probe/exec/exec_test.go index 5724228d2fc..c6ef8598312 100644 --- a/pkg/probe/exec/exec_test.go +++ b/pkg/probe/exec/exec_test.go @@ -143,10 +143,10 @@ func TestExec(t *testing.T) { if status != test.expectedStatus { t.Errorf("[%d] expected %v, got %v", i, test.expectedStatus, status) } - if err != nil && test.expectError == false { + if err != nil && !test.expectError { t.Errorf("[%d] unexpected error: %v", i, err) } - if err == nil && test.expectError == true { + if err == nil && test.expectError { t.Errorf("[%d] unexpected non-error", i) } if test.output != output { diff --git a/pkg/proxy/util/utils_test.go b/pkg/proxy/util/utils_test.go index aa3a32dd2c5..537e77570ae 100644 --- a/pkg/proxy/util/utils_test.go +++ b/pkg/proxy/util/utils_test.go @@ -698,7 +698,7 @@ func TestRevertPorts(t *testing.T) { } } for _, lp := range tc.existingPorts { - if existingPortsMap[lp].(*fakeClosable).closed == true { + if existingPortsMap[lp].(*fakeClosable).closed { t.Errorf("Expect existing localport %v to be false in test case %v", lp, i) } } diff --git a/pkg/registry/core/service/storage/storage_test.go b/pkg/registry/core/service/storage/storage_test.go index 73292a30ba5..a1ec36f630f 100644 --- a/pkg/registry/core/service/storage/storage_test.go +++ b/pkg/registry/core/service/storage/storage_test.go @@ -11671,7 +11671,7 @@ func TestServiceRegistryResourceLocation(t *testing.T) { if tc.err == false && err != nil { t.Fatalf("unexpected error: %v", err) } - if tc.err == true && err == nil { + if tc.err && err == nil { t.Fatalf("unexpected success") } if !tc.err { diff --git a/pkg/scheduler/internal/heap/heap_test.go b/pkg/scheduler/internal/heap/heap_test.go index b337e3cc387..7b853efc27d 100644 --- a/pkg/scheduler/internal/heap/heap_test.go +++ b/pkg/scheduler/internal/heap/heap_test.go @@ -209,7 +209,7 @@ func TestHeap_Get(t *testing.T) { } // Get non-existing object. _, exists, err = h.Get(mkHeapObj("non-existing", 0)) - if err != nil || exists == true { + if err != nil || exists { t.Fatalf("didn't expect to get any object") } } @@ -223,12 +223,12 @@ func TestHeap_GetByKey(t *testing.T) { h.Add(mkHeapObj("baz", 11)) obj, exists, err := h.GetByKey("baz") - if err != nil || exists == false || obj.(testHeapObject).val != 11 { + if err != nil || !exists || obj.(testHeapObject).val != 11 { t.Fatalf("unexpected error in getting element") } // Get non-existing object. _, exists, err = h.GetByKey("non-existing") - if err != nil || exists == true { + if err != nil || exists { t.Fatalf("didn't expect to get any object") } } diff --git a/plugin/pkg/admission/gc/gc_admission.go b/plugin/pkg/admission/gc/gc_admission.go index db0f3c12c10..e69c9a67ea7 100644 --- a/plugin/pkg/admission/gc/gc_admission.go +++ b/plugin/pkg/admission/gc/gc_admission.go @@ -238,7 +238,7 @@ func (a *gcPermissionsEnforcement) ownerRefToDeleteAttributeRecords(ref metav1.O func blockingOwnerRefs(refs []metav1.OwnerReference) []metav1.OwnerReference { var ret []metav1.OwnerReference for _, ref := range refs { - if ref.BlockOwnerDeletion != nil && *ref.BlockOwnerDeletion == true { + if ref.BlockOwnerDeletion != nil && *ref.BlockOwnerDeletion { ret = append(ret, ref) } } diff --git a/plugin/pkg/admission/imagepolicy/config_test.go b/plugin/pkg/admission/imagepolicy/config_test.go index 8567011ce16..c9bd13411e1 100644 --- a/plugin/pkg/admission/imagepolicy/config_test.go +++ b/plugin/pkg/admission/imagepolicy/config_test.go @@ -120,10 +120,10 @@ func TestConfigNormalization(t *testing.T) { } for _, tt := range tests { err := normalizeWebhookConfig(&tt.config) - if err == nil && tt.wantErr == true { + if err == nil && tt.wantErr { t.Errorf("%s: expected error from normalization and didn't have one", tt.test) } - if err != nil && tt.wantErr == false { + if err != nil && !tt.wantErr { t.Errorf("%s: unexpected error from normalization: %v", tt.test, err) } if err == nil && !reflect.DeepEqual(tt.config, tt.normalizedConfig) { diff --git a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go index 875b3e6de07..c22a6f1c62b 100644 --- a/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go +++ b/staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/validation/validation.go @@ -325,7 +325,7 @@ func validateCustomResourceDefinitionSpec(ctx context.Context, spec *apiextensio } if opts.allowDefaults && specHasDefaults(spec) { opts.requireStructuralSchema = true - if spec.PreserveUnknownFields == nil || *spec.PreserveUnknownFields == true { + if spec.PreserveUnknownFields == nil || *spec.PreserveUnknownFields { allErrs = append(allErrs, field.Invalid(fldPath.Child("preserveUnknownFields"), true, "must be false in order to use defaults in the schema")) } } @@ -873,7 +873,7 @@ func ValidateCustomResourceDefinitionOpenAPISchema(schema *apiextensions.JSONSch } allErrs.SchemaErrors = append(allErrs.SchemaErrors, ssv.validate(schema, fldPath)...) - if schema.UniqueItems == true { + if schema.UniqueItems { allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Forbidden(fldPath.Child("uniqueItems"), "uniqueItems cannot be set to true since the runtime complexity becomes quadratic")) } @@ -888,7 +888,7 @@ func ValidateCustomResourceDefinitionOpenAPISchema(schema *apiextensions.JSONSch // restricted like additionalProperties. if schema.AdditionalProperties != nil { if len(schema.Properties) != 0 { - if schema.AdditionalProperties.Allows == false || schema.AdditionalProperties.Schema != nil { + if !schema.AdditionalProperties.Allows || schema.AdditionalProperties.Schema != nil { allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Forbidden(fldPath.Child("additionalProperties"), "additionalProperties and properties are mutual exclusive")) } } @@ -977,7 +977,7 @@ func ValidateCustomResourceDefinitionOpenAPISchema(schema *apiextensions.JSONSch } } - if schema.XPreserveUnknownFields != nil && *schema.XPreserveUnknownFields == false { + if schema.XPreserveUnknownFields != nil && !*schema.XPreserveUnknownFields { allErrs.SchemaErrors = append(allErrs.SchemaErrors, field.Invalid(fldPath.Child("x-kubernetes-preserve-unknown-fields"), *schema.XPreserveUnknownFields, "must be true or undefined")) } diff --git a/staging/src/k8s.io/apimachinery/pkg/util/net/port_range_test.go b/staging/src/k8s.io/apimachinery/pkg/util/net/port_range_test.go index b4cbe82459c..94a1b7f8819 100644 --- a/staging/src/k8s.io/apimachinery/pkg/util/net/port_range_test.go +++ b/staging/src/k8s.io/apimachinery/pkg/util/net/port_range_test.go @@ -56,10 +56,10 @@ func TestPortRange(t *testing.T) { pr := &PortRange{} var f flag.Value = pr err := f.Set(tc.input) - if err != nil && tc.success == true { + if err != nil && tc.success { t.Errorf("expected success, got %q", err) continue - } else if err == nil && tc.success == false { + } else if err == nil && !tc.success { t.Errorf("expected failure %#v", testCases[i]) continue } else if tc.success { diff --git a/staging/src/k8s.io/apiserver/pkg/server/egressselector/config_test.go b/staging/src/k8s.io/apiserver/pkg/server/egressselector/config_test.go index 2b9861ae9d2..6effe442c4e 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/egressselector/config_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/egressselector/config_test.go @@ -541,9 +541,9 @@ func TestValidateEgressSelectorConfiguration(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { errs := ValidateEgressSelectorConfiguration(tc.contents) - if tc.expectError == false && len(errs) != 0 { + if !tc.expectError && len(errs) != 0 { t.Errorf("Calling ValidateEgressSelectorConfiguration expected no error, got %v", errs) - } else if tc.expectError == true && len(errs) == 0 { + } else if tc.expectError && len(errs) == 0 { t.Errorf("Calling ValidateEgressSelectorConfiguration expected error, got no error") } }) diff --git a/staging/src/k8s.io/apiserver/pkg/server/options/tracing_test.go b/staging/src/k8s.io/apiserver/pkg/server/options/tracing_test.go index d05c08e547c..1d29cce5212 100644 --- a/staging/src/k8s.io/apiserver/pkg/server/options/tracing_test.go +++ b/staging/src/k8s.io/apiserver/pkg/server/options/tracing_test.go @@ -67,9 +67,9 @@ func TestValidateTracingOptions(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { errs := tc.contents.Validate() - if tc.expectError == false && len(errs) != 0 { + if !tc.expectError && len(errs) != 0 { t.Errorf("Calling Validate expected no error, got %v", errs) - } else if tc.expectError == true && len(errs) == 0 { + } else if tc.expectError && len(errs) == 0 { t.Errorf("Calling Validate expected error, got no error") } }) diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cache_watcher_test.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cache_watcher_test.go index b47fe0ed4b9..1a59615ce39 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cache_watcher_test.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cache_watcher_test.go @@ -485,7 +485,7 @@ func TestCacheWatcherDrainingNoBookmarkAfterResourceVersionReceived(t *testing.T forget := func(drainWatcher bool) { lock.Lock() defer lock.Unlock() - if drainWatcher == true { + if drainWatcher { t.Fatalf("didn't expect drainWatcher to be set to true") } count++ diff --git a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go index 7248fe71768..c7d9390ae76 100644 --- a/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go +++ b/staging/src/k8s.io/apiserver/pkg/storage/cacher/cacher.go @@ -1252,7 +1252,7 @@ func (c *Cacher) LastSyncResourceVersion() (uint64, error) { // // The returned function must be called under the watchCache lock. func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(ctx context.Context, parsedResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { - if opts.SendInitialEvents == nil || *opts.SendInitialEvents == false || !opts.Predicate.AllowWatchBookmarks { + if opts.SendInitialEvents == nil || !*opts.SendInitialEvents || !opts.Predicate.AllowWatchBookmarks { return func() uint64 { return 0 }, nil } return c.getCommonResourceVersionLockedFunc(ctx, parsedResourceVersion, opts) @@ -1267,7 +1267,7 @@ func (c *Cacher) getBookmarkAfterResourceVersionLockedFunc(ctx context.Context, // // The returned function must be called under the watchCache lock. func (c *Cacher) getStartResourceVersionForWatchLockedFunc(ctx context.Context, parsedWatchResourceVersion uint64, opts storage.ListOptions) (func() uint64, error) { - if opts.SendInitialEvents == nil || *opts.SendInitialEvents == true { + if opts.SendInitialEvents == nil || *opts.SendInitialEvents { return func() uint64 { return parsedWatchResourceVersion }, nil } return c.getCommonResourceVersionLockedFunc(ctx, parsedWatchResourceVersion, opts) @@ -1298,7 +1298,7 @@ func (c *Cacher) getCommonResourceVersionLockedFunc(ctx context.Context, parsedW // Additionally, it instructs the caller whether it should ask for // all events from the cache (full state) or not. func (c *Cacher) waitUntilWatchCacheFreshAndForceAllEvents(ctx context.Context, requestedWatchRV uint64, opts storage.ListOptions) (bool, error) { - if opts.SendInitialEvents != nil && *opts.SendInitialEvents == true { + if opts.SendInitialEvents != nil && *opts.SendInitialEvents { err := c.watchCache.waitUntilFreshAndBlock(ctx, requestedWatchRV) defer c.watchCache.RUnlock() return err == nil, err diff --git a/staging/src/k8s.io/cli-runtime/pkg/resource/crd_finder_test.go b/staging/src/k8s.io/cli-runtime/pkg/resource/crd_finder_test.go index 77f33007e43..6713f81e83c 100644 --- a/staging/src/k8s.io/cli-runtime/pkg/resource/crd_finder_test.go +++ b/staging/src/k8s.io/cli-runtime/pkg/resource/crd_finder_test.go @@ -56,7 +56,7 @@ func TestCRDFinderErrors(t *testing.T) { } finder := NewCRDFinder(getter) found, err := finder.HasCRD(schema.GroupKind{Group: "", Kind: "Pod"}) - if found == true { + if found { t.Fatalf("Found the CRD with non-working getter function") } if err == nil { diff --git a/staging/src/k8s.io/client-go/tools/cache/heap_test.go b/staging/src/k8s.io/client-go/tools/cache/heap_test.go index c2e476988f7..ed50ee01351 100644 --- a/staging/src/k8s.io/client-go/tools/cache/heap_test.go +++ b/staging/src/k8s.io/client-go/tools/cache/heap_test.go @@ -264,7 +264,7 @@ func TestHeap_Get(t *testing.T) { } // Get non-existing object. _, exists, err = h.Get(mkHeapObj("non-existing", 0)) - if err != nil || exists == true { + if err != nil || exists { t.Fatalf("didn't expect to get any object") } } @@ -283,7 +283,7 @@ func TestHeap_GetByKey(t *testing.T) { } // Get non-existing object. _, exists, err = h.GetByKey("non-existing") - if err != nil || exists == true { + if err != nil || exists { t.Fatalf("didn't expect to get any object") } } diff --git a/staging/src/k8s.io/cloud-provider/volume/helpers/zones_test.go b/staging/src/k8s.io/cloud-provider/volume/helpers/zones_test.go index e31a7224b2b..bc1ff0e5d89 100644 --- a/staging/src/k8s.io/cloud-provider/volume/helpers/zones_test.go +++ b/staging/src/k8s.io/cloud-provider/volume/helpers/zones_test.go @@ -784,7 +784,7 @@ func TestSelectZoneForVolume(t *testing.T) { t.Errorf("Unexpected error from SelectZoneForVolume for %s; Error: %v", test.Name, err) } - if test.ExpectSpecificZone == true { + if test.ExpectSpecificZone { if zone != test.ExpectedZone { t.Errorf("Expected zone %v does not match obtained zone %v for %s", test.ExpectedZone, zone, test.Name) } diff --git a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go index dce920ad199..28b829cc139 100644 --- a/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go +++ b/staging/src/k8s.io/code-generator/cmd/client-gen/generators/fake/generator_fake_for_type.go @@ -77,7 +77,7 @@ func genStatus(t *types.Type) bool { // hasObjectMeta returns true if the type has a ObjectMeta field. func hasObjectMeta(t *types.Type) bool { for _, m := range t.Members { - if m.Embedded == true && m.Name == "ObjectMeta" { + if m.Embedded && m.Name == "ObjectMeta" { return true } } diff --git a/staging/src/k8s.io/code-generator/cmd/register-gen/generators/packages.go b/staging/src/k8s.io/code-generator/cmd/register-gen/generators/packages.go index 242eb3aa10a..fa8e3f1c356 100644 --- a/staging/src/k8s.io/code-generator/cmd/register-gen/generators/packages.go +++ b/staging/src/k8s.io/code-generator/cmd/register-gen/generators/packages.go @@ -93,7 +93,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat for _, t := range pkg.Types { klog.V(5).Infof("considering type = %s", t.Name.String()) for _, typeMember := range t.Members { - if typeMember.Name == "TypeMeta" && typeMember.Embedded == true { + if typeMember.Name == "TypeMeta" && typeMember.Embedded { typesToRegister = append(typesToRegister, t) } } diff --git a/staging/src/k8s.io/component-base/tracing/api/v1/config_test.go b/staging/src/k8s.io/component-base/tracing/api/v1/config_test.go index 1aa4c0664ea..1371c0cac21 100644 --- a/staging/src/k8s.io/component-base/tracing/api/v1/config_test.go +++ b/staging/src/k8s.io/component-base/tracing/api/v1/config_test.go @@ -97,9 +97,9 @@ func TestValidateTracingConfiguration(t *testing.T) { for _, tc := range testcases { t.Run(tc.name, func(t *testing.T) { errs := ValidateTracingConfiguration(tc.contents, nil, field.NewPath("tracing")) - if tc.expectError == false && len(errs) != 0 { + if !tc.expectError && len(errs) != 0 { t.Errorf("Calling ValidateTracingConfiguration expected no error, got %v", errs) - } else if tc.expectError == true && len(errs) == 0 { + } else if tc.expectError && len(errs) == 0 { t.Errorf("Calling ValidateTracingConfiguration expected error, got no error") } }) diff --git a/staging/src/k8s.io/kubectl/pkg/generate/generate_test.go b/staging/src/k8s.io/kubectl/pkg/generate/generate_test.go index 3037eb6f5b4..f4d18149906 100644 --- a/staging/src/k8s.io/kubectl/pkg/generate/generate_test.go +++ b/staging/src/k8s.io/kubectl/pkg/generate/generate_test.go @@ -254,10 +254,10 @@ func TestGetBool(t *testing.T) { for _, tt := range testCases { t.Run(tt.name, func(t *testing.T) { got, err := GetBool(tt.parameters, tt.key, tt.defaultValue) - if err != nil && tt.expectError == false { + if err != nil && !tt.expectError { t.Errorf("%s: unexpected error: %v", tt.name, err) } - if err == nil && tt.expectError == true { + if err == nil && tt.expectError { t.Errorf("%s: expect error, got nil", tt.name) } if got != tt.expected { diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 5c82ef14c8e..46a9608cea8 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -2091,7 +2091,7 @@ func deduplicate(collection *[]string) *[]string { result := make([]string, 0, len(*collection)) for _, v := range *collection { - if seen[v] == true { + if seen[v] { // skip this element } else { seen[v] = true diff --git a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index c9059355f1d..c62a867e632 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -904,7 +904,7 @@ func (ss *scaleSet) getPrimaryNetworkInterfaceConfiguration(networkConfiguration for idx := range networkConfigurations { networkConfig := &networkConfigurations[idx] - if networkConfig.Primary != nil && *networkConfig.Primary == true { + if networkConfig.Primary != nil && *networkConfig.Primary { return networkConfig, nil } } @@ -920,7 +920,7 @@ func (ss *scaleSet) getPrimaryNetworkInterfaceConfigurationForScaleSet(networkCo for idx := range networkConfigurations { networkConfig := &networkConfigurations[idx] - if networkConfig.Primary != nil && *networkConfig.Primary == true { + if networkConfig.Primary != nil && *networkConfig.Primary { return networkConfig, nil } } @@ -936,7 +936,7 @@ func getPrimaryIPConfigFromVMSSNetworkConfig(config *compute.VirtualMachineScale for idx := range ipConfigurations { ipConfig := &ipConfigurations[idx] - if ipConfig.Primary != nil && *ipConfig.Primary == true { + if ipConfig.Primary != nil && *ipConfig.Primary { return ipConfig, nil } } diff --git a/staging/src/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go b/staging/src/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go index 5bd3eea6e36..582e1291dce 100644 --- a/staging/src/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go +++ b/staging/src/k8s.io/legacy-cloud-providers/vsphere/nodemanager.go @@ -134,7 +134,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { for vc, vsi := range nm.vsphereInstanceMap { found := getVMFound() - if found == true { + if found { break } @@ -175,7 +175,7 @@ func (nm *NodeManager) DiscoverNode(node *v1.Node) error { for _, datacenterObj := range datacenterObjs { found := getVMFound() - if found == true { + if found { break } diff --git a/test/e2e/node/pods.go b/test/e2e/node/pods.go index 3329a78a87e..7bcbe15b2af 100644 --- a/test/e2e/node/pods.go +++ b/test/e2e/node/pods.go @@ -726,7 +726,7 @@ func (v *podStartVerifier) Verify(event watch.Event) error { } if status := e2epod.FindContainerStatusInPod(pod, "blocked"); status != nil { - if (status.Started != nil && *status.Started == true) || status.LastTerminationState.Terminated != nil || status.State.Waiting == nil { + if (status.Started != nil && *status.Started) || status.LastTerminationState.Terminated != nil || status.State.Waiting == nil { return fmt.Errorf("pod %s on node %s should not have started the blocked container: %#v", pod.Name, pod.Spec.NodeName, status) } } diff --git a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go index b21f5f03e97..576473fa9ad 100644 --- a/test/e2e/storage/vsphere/vsphere_volume_diskformat.go +++ b/test/e2e/storage/vsphere/vsphere_volume_diskformat.go @@ -196,15 +196,15 @@ func verifyDiskFormat(ctx context.Context, client clientset.Interface, nodeName } isDiskFormatCorrect := false if diskFormat == "eagerzeroedthick" { - if eagerlyScrub == true && thinProvisioned == false { + if eagerlyScrub && !thinProvisioned { isDiskFormatCorrect = true } } else if diskFormat == "zeroedthick" { - if eagerlyScrub == false && thinProvisioned == false { + if !eagerlyScrub && !thinProvisioned { isDiskFormatCorrect = true } } else if diskFormat == "thin" { - if eagerlyScrub == false && thinProvisioned == true { + if !eagerlyScrub && thinProvisioned { isDiskFormatCorrect = true } } diff --git a/test/integration/apiserver/admissionwebhook/match_conditions_test.go b/test/integration/apiserver/admissionwebhook/match_conditions_test.go index 1fd2c2788e5..f05685206b5 100644 --- a/test/integration/apiserver/admissionwebhook/match_conditions_test.go +++ b/test/integration/apiserver/admissionwebhook/match_conditions_test.go @@ -432,9 +432,9 @@ func TestMatchConditions(t *testing.T) { for _, pod := range testcase.pods { _, err := client.CoreV1().Pods(pod.Namespace).Create(context.TODO(), pod, dryRunCreate) - if testcase.expectErrorPod == false && err != nil { + if !testcase.expectErrorPod && err != nil { t.Fatalf("unexpected error creating test pod: %v", err) - } else if testcase.expectErrorPod == true && err == nil { + } else if testcase.expectErrorPod && err == nil { t.Fatal("expected error creating pods") } } diff --git a/test/integration/evictions/evictions_test.go b/test/integration/evictions/evictions_test.go index 32f285651d3..72a0d8ab374 100644 --- a/test/integration/evictions/evictions_test.go +++ b/test/integration/evictions/evictions_test.go @@ -430,9 +430,9 @@ func TestEvictionWithFinalizers(t *testing.T) { t.Fatalf("Failed to get the pod %q with error: %q", klog.KObj(pod), e) } _, cond := podutil.GetPodCondition(&updatedPod.Status, v1.PodConditionType(v1.DisruptionTarget)) - if tc.wantDisruptionTargetCond == true && cond == nil { + if tc.wantDisruptionTargetCond && cond == nil { t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(updatedPod), v1.DisruptionTarget) - } else if tc.wantDisruptionTargetCond == false && cond != nil { + } else if !tc.wantDisruptionTargetCond && cond != nil { t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(updatedPod), v1.DisruptionTarget) } }) diff --git a/test/integration/node/lifecycle_test.go b/test/integration/node/lifecycle_test.go index e9451d319c5..f3825d6c1a4 100644 --- a/test/integration/node/lifecycle_test.go +++ b/test/integration/node/lifecycle_test.go @@ -163,9 +163,9 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) { t.Fatalf("Test Failed: error: %q, while getting updated pod", err) } _, cond := podutil.GetPodCondition(&testPod.Status, v1.DisruptionTarget) - if test.enablePodDisruptionConditions == true && cond == nil { + if test.enablePodDisruptionConditions && cond == nil { t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(testPod), v1.DisruptionTarget) - } else if test.enablePodDisruptionConditions == false && cond != nil { + } else if !test.enablePodDisruptionConditions && cond != nil { t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(testPod), v1.DisruptionTarget) } }) diff --git a/test/integration/scheduler/preemption/preemption_test.go b/test/integration/scheduler/preemption/preemption_test.go index 6b47548f686..0a7d58d01ef 100644 --- a/test/integration/scheduler/preemption/preemption_test.go +++ b/test/integration/scheduler/preemption/preemption_test.go @@ -513,7 +513,7 @@ func TestPreemption(t *testing.T) { t.Errorf("Error %v when getting the updated status for pod %v/%v ", err, p.Namespace, p.Name) } _, cond := podutil.GetPodCondition(&pod.Status, v1.DisruptionTarget) - if test.enablePodDisruptionConditions == true && cond == nil { + if test.enablePodDisruptionConditions && cond == nil { t.Errorf("Pod %q does not have the expected condition: %q", klog.KObj(pod), v1.DisruptionTarget) } else if test.enablePodDisruptionConditions == false && cond != nil { t.Errorf("Pod %q has an unexpected condition: %q", klog.KObj(pod), v1.DisruptionTarget) diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go index bb2d3851aa4..ae4bf4eff93 100644 --- a/test/integration/scheduler/scheduler_test.go +++ b/test/integration/scheduler/scheduler_test.go @@ -102,7 +102,7 @@ func TestUnschedulableNodes(t *testing.T) { // Nodes that are unschedulable or that are not ready or // have their disk full (Node.Spec.Conditions) are excluded // based on NodeConditionPredicate, a separate check - return node != nil && node.(*v1.Node).Spec.Unschedulable == true + return node != nil && node.(*v1.Node).Spec.Unschedulable }) if err != nil { t.Fatalf("Failed to observe reflected update for setting unschedulable=true: %v", err) diff --git a/test/integration/tls/ciphers_test.go b/test/integration/tls/ciphers_test.go index cdc5c089b81..038eefedb32 100644 --- a/test/integration/tls/ciphers_test.go +++ b/test/integration/tls/ciphers_test.go @@ -79,9 +79,9 @@ func runTestAPICiphers(t *testing.T, testID int, kubePort int, clientCiphers []u defer resp.Body.Close() } - if expectedError == true && err == nil { + if expectedError && err == nil { t.Fatalf("%d: expecting error for cipher test, client cipher is supported and it should't", testID) - } else if err != nil && expectedError == false { + } else if err != nil && !expectedError { t.Fatalf("%d: not expecting error by client with cipher failed: %+v", testID, err) } }