mirror of
https://github.com/kubernetes/client-go.git
synced 2025-08-15 14:03:09 +00:00
Merge pull request #116166 from pohly/test-go-vet
fix "go vet" issues, check as part of golangci-lint Kubernetes-commit: ff735dff85367c2a096c4065b8b3c1fbbeecabc4
This commit is contained in:
commit
06ad6b391d
4
go.mod
4
go.mod
@ -25,7 +25,7 @@ require (
|
|||||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8
|
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8
|
||||||
google.golang.org/protobuf v1.28.1
|
google.golang.org/protobuf v1.28.1
|
||||||
k8s.io/api v0.0.0-20230303235756-fc1b77c3f4ab
|
k8s.io/api v0.0.0-20230303235756-fc1b77c3f4ab
|
||||||
k8s.io/apimachinery v0.0.0-20230303235433-5b1cff80ea06
|
k8s.io/apimachinery v0.0.0-20230303235435-f357b1fa74b7
|
||||||
k8s.io/klog/v2 v2.90.1
|
k8s.io/klog/v2 v2.90.1
|
||||||
k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d
|
k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d
|
||||||
k8s.io/utils v0.0.0-20230209194617-a36077c30491
|
k8s.io/utils v0.0.0-20230209194617-a36077c30491
|
||||||
@ -60,5 +60,5 @@ require (
|
|||||||
|
|
||||||
replace (
|
replace (
|
||||||
k8s.io/api => k8s.io/api v0.0.0-20230303235756-fc1b77c3f4ab
|
k8s.io/api => k8s.io/api v0.0.0-20230303235756-fc1b77c3f4ab
|
||||||
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20230303235433-5b1cff80ea06
|
k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20230303235435-f357b1fa74b7
|
||||||
)
|
)
|
||||||
|
4
go.sum
4
go.sum
@ -475,8 +475,8 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
|
|||||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||||
k8s.io/api v0.0.0-20230303235756-fc1b77c3f4ab h1:peGI8OxM+LOJxG8FqX/y6BVELxehZ4fDq9cm1a5Qaz0=
|
k8s.io/api v0.0.0-20230303235756-fc1b77c3f4ab h1:peGI8OxM+LOJxG8FqX/y6BVELxehZ4fDq9cm1a5Qaz0=
|
||||||
k8s.io/api v0.0.0-20230303235756-fc1b77c3f4ab/go.mod h1:ihVCDKSD6f+H/yGKVpY9HSgpw4StgSvrI3dbK05M9a8=
|
k8s.io/api v0.0.0-20230303235756-fc1b77c3f4ab/go.mod h1:ihVCDKSD6f+H/yGKVpY9HSgpw4StgSvrI3dbK05M9a8=
|
||||||
k8s.io/apimachinery v0.0.0-20230303235433-5b1cff80ea06 h1:d8NkMUaqPq1ghtNIScF2RIPgGhSu+scUUrxsinWDThg=
|
k8s.io/apimachinery v0.0.0-20230303235435-f357b1fa74b7 h1:YN43Lvs3Pj9iQmuWGojeBiFdz1mkrxe0EZn7Ba3TMpQ=
|
||||||
k8s.io/apimachinery v0.0.0-20230303235433-5b1cff80ea06/go.mod h1:jlJwObMa4oKAEOMnAeEaqeiM+Fwd/CbAwNyQ7OaEwS0=
|
k8s.io/apimachinery v0.0.0-20230303235435-f357b1fa74b7/go.mod h1:jlJwObMa4oKAEOMnAeEaqeiM+Fwd/CbAwNyQ7OaEwS0=
|
||||||
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
|
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
|
||||||
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU=
|
k8s.io/kube-openapi v0.0.0-20230303024457-afdc3dddf62d h1:VcFq5n7wCJB2FQMCIHfC+f+jNcGgNMar1uKd6rVlifU=
|
||||||
|
@ -1085,7 +1085,7 @@ func TestTLSCredentials(t *testing.T) {
|
|||||||
Status: &clientauthentication.ExecCredentialStatus{
|
Status: &clientauthentication.ExecCredentialStatus{
|
||||||
ClientCertificateData: string(cert),
|
ClientCertificateData: string(cert),
|
||||||
ClientKeyData: string(key),
|
ClientKeyData: string(key),
|
||||||
ExpirationTimestamp: &v1.Time{now.Add(time.Hour)},
|
ExpirationTimestamp: &v1.Time{Time: now.Add(time.Hour)},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
get(t, "valid TLS cert", false)
|
get(t, "valid TLS cert", false)
|
||||||
@ -1097,7 +1097,7 @@ func TestTLSCredentials(t *testing.T) {
|
|||||||
Status: &clientauthentication.ExecCredentialStatus{
|
Status: &clientauthentication.ExecCredentialStatus{
|
||||||
ClientCertificateData: string(nCert),
|
ClientCertificateData: string(nCert),
|
||||||
ClientKeyData: string(nKey),
|
ClientKeyData: string(nKey),
|
||||||
ExpirationTimestamp: &v1.Time{now.Add(time.Hour)},
|
ExpirationTimestamp: &v1.Time{Time: now.Add(time.Hour)},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
get(t, "untrusted TLS cert", true)
|
get(t, "untrusted TLS cert", true)
|
||||||
@ -1107,7 +1107,7 @@ func TestTLSCredentials(t *testing.T) {
|
|||||||
Status: &clientauthentication.ExecCredentialStatus{
|
Status: &clientauthentication.ExecCredentialStatus{
|
||||||
ClientCertificateData: string(cert),
|
ClientCertificateData: string(cert),
|
||||||
ClientKeyData: string(key),
|
ClientKeyData: string(key),
|
||||||
ExpirationTimestamp: &v1.Time{now.Add(time.Hour)},
|
ExpirationTimestamp: &v1.Time{Time: now.Add(time.Hour)},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
get(t, "valid TLS cert again", false)
|
get(t, "valid TLS cert again", false)
|
||||||
|
@ -35,6 +35,7 @@ func TestClientAuthenticationClusterTypesAreSynced(t *testing.T) {
|
|||||||
clientauthenticationv1beta1.Cluster{},
|
clientauthenticationv1beta1.Cluster{},
|
||||||
clientauthenticationv1.Cluster{},
|
clientauthenticationv1.Cluster{},
|
||||||
} {
|
} {
|
||||||
|
cluster := cluster
|
||||||
t.Run(fmt.Sprintf("%T", cluster), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%T", cluster), func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
testClientAuthenticationClusterTypesAreSynced(t, cluster)
|
testClientAuthenticationClusterTypesAreSynced(t, cluster)
|
||||||
|
@ -41,7 +41,7 @@ type recorderImpl struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
func (recorder *recorderImpl) Eventf(regarding runtime.Object, related runtime.Object, eventtype, reason, action, note string, args ...interface{}) {
|
||||||
timestamp := metav1.MicroTime{time.Now()}
|
timestamp := metav1.MicroTime{Time: time.Now()}
|
||||||
message := fmt.Sprintf(note, args...)
|
message := fmt.Sprintf(note, args...)
|
||||||
refRegarding, err := reference.GetReference(recorder.scheme, regarding)
|
refRegarding, err := reference.GetReference(recorder.scheme, regarding)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -92,7 +92,7 @@ func TestEventSeriesf(t *testing.T) {
|
|||||||
Name: "foo",
|
Name: "foo",
|
||||||
Namespace: "baz",
|
Namespace: "baz",
|
||||||
},
|
},
|
||||||
EventTime: metav1.MicroTime{time.Now()},
|
EventTime: metav1.MicroTime{Time: time.Now()},
|
||||||
ReportingController: "eventTest",
|
ReportingController: "eventTest",
|
||||||
ReportingInstance: "eventTest-" + hostname,
|
ReportingInstance: "eventTest-" + hostname,
|
||||||
Action: "started",
|
Action: "started",
|
||||||
@ -296,7 +296,7 @@ func TestFinishSeries(t *testing.T) {
|
|||||||
cache := map[eventKey]*eventsv1.Event{}
|
cache := map[eventKey]*eventsv1.Event{}
|
||||||
eventBroadcaster := newBroadcaster(&testEvents, 0, cache).(*eventBroadcasterImpl)
|
eventBroadcaster := newBroadcaster(&testEvents, 0, cache).(*eventBroadcasterImpl)
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, "k8s.io/kube-foo").(*recorderImpl)
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, "k8s.io/kube-foo").(*recorderImpl)
|
||||||
cachedEvent := recorder.makeEvent(regarding, related, metav1.MicroTime{time.Now()}, v1.EventTypeNormal, "test", "some verbose message: 1", "eventTest", "eventTest-"+hostname, "started")
|
cachedEvent := recorder.makeEvent(regarding, related, metav1.MicroTime{Time: time.Now()}, v1.EventTypeNormal, "test", "some verbose message: 1", "eventTest", "eventTest-"+hostname, "started")
|
||||||
nonFinishedEvent := cachedEvent.DeepCopy()
|
nonFinishedEvent := cachedEvent.DeepCopy()
|
||||||
nonFinishedEvent.ReportingController = "nonFinished-controller"
|
nonFinishedEvent.ReportingController = "nonFinished-controller"
|
||||||
cachedEvent.Series = &eventsv1.EventSeries{
|
cachedEvent.Series = &eventsv1.EventSeries{
|
||||||
@ -382,7 +382,7 @@ func TestRefreshExistingEventSeries(t *testing.T) {
|
|||||||
cache := map[eventKey]*eventsv1.Event{}
|
cache := map[eventKey]*eventsv1.Event{}
|
||||||
eventBroadcaster := newBroadcaster(&testEvents, 0, cache).(*eventBroadcasterImpl)
|
eventBroadcaster := newBroadcaster(&testEvents, 0, cache).(*eventBroadcasterImpl)
|
||||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, "k8s.io/kube-foo").(*recorderImpl)
|
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, "k8s.io/kube-foo").(*recorderImpl)
|
||||||
cachedEvent := recorder.makeEvent(regarding, related, metav1.MicroTime{time.Now()}, v1.EventTypeNormal, "test", "some verbose message: 1", "eventTest", "eventTest-"+hostname, "started")
|
cachedEvent := recorder.makeEvent(regarding, related, metav1.MicroTime{Time: time.Now()}, v1.EventTypeNormal, "test", "some verbose message: 1", "eventTest", "eventTest-"+hostname, "started")
|
||||||
cachedEvent.Series = &eventsv1.EventSeries{
|
cachedEvent.Series = &eventsv1.EventSeries{
|
||||||
Count: 10,
|
Count: 10,
|
||||||
LastObservedTime: LastObservedTime,
|
LastObservedTime: LastObservedTime,
|
||||||
|
@ -20,7 +20,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -32,6 +31,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/diff"
|
"k8s.io/apimachinery/pkg/util/diff"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
"k8s.io/client-go/kubernetes/fake"
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
fakeclient "k8s.io/client-go/testing"
|
fakeclient "k8s.io/client-go/testing"
|
||||||
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
|
rl "k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||||
@ -362,8 +362,8 @@ func TestLeaseSpecToLeaderElectionRecordRoundTrip(t *testing.T) {
|
|||||||
oldSpec := coordinationv1.LeaseSpec{
|
oldSpec := coordinationv1.LeaseSpec{
|
||||||
HolderIdentity: &holderIdentity,
|
HolderIdentity: &holderIdentity,
|
||||||
LeaseDurationSeconds: &leaseDurationSeconds,
|
LeaseDurationSeconds: &leaseDurationSeconds,
|
||||||
AcquireTime: &metav1.MicroTime{time.Now()},
|
AcquireTime: &metav1.MicroTime{Time: time.Now()},
|
||||||
RenewTime: &metav1.MicroTime{time.Now()},
|
RenewTime: &metav1.MicroTime{Time: time.Now()},
|
||||||
LeaseTransitions: &leaseTransitions,
|
LeaseTransitions: &leaseTransitions,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,10 +117,10 @@ func LeaseSpecToLeaderElectionRecord(spec *coordinationv1.LeaseSpec) *LeaderElec
|
|||||||
r.LeaderTransitions = int(*spec.LeaseTransitions)
|
r.LeaderTransitions = int(*spec.LeaseTransitions)
|
||||||
}
|
}
|
||||||
if spec.AcquireTime != nil {
|
if spec.AcquireTime != nil {
|
||||||
r.AcquireTime = metav1.Time{spec.AcquireTime.Time}
|
r.AcquireTime = metav1.Time{Time: spec.AcquireTime.Time}
|
||||||
}
|
}
|
||||||
if spec.RenewTime != nil {
|
if spec.RenewTime != nil {
|
||||||
r.RenewTime = metav1.Time{spec.RenewTime.Time}
|
r.RenewTime = metav1.Time{Time: spec.RenewTime.Time}
|
||||||
}
|
}
|
||||||
return &r
|
return &r
|
||||||
|
|
||||||
@ -132,8 +132,8 @@ func LeaderElectionRecordToLeaseSpec(ler *LeaderElectionRecord) coordinationv1.L
|
|||||||
return coordinationv1.LeaseSpec{
|
return coordinationv1.LeaseSpec{
|
||||||
HolderIdentity: &ler.HolderIdentity,
|
HolderIdentity: &ler.HolderIdentity,
|
||||||
LeaseDurationSeconds: &leaseDurationSeconds,
|
LeaseDurationSeconds: &leaseDurationSeconds,
|
||||||
AcquireTime: &metav1.MicroTime{ler.AcquireTime.Time},
|
AcquireTime: &metav1.MicroTime{Time: ler.AcquireTime.Time},
|
||||||
RenewTime: &metav1.MicroTime{ler.RenewTime.Time},
|
RenewTime: &metav1.MicroTime{Time: ler.RenewTime.Time},
|
||||||
LeaseTransitions: &leaseTransitions,
|
LeaseTransitions: &leaseTransitions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user