mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-03 17:30:00 +00:00
Fix staticcheck in apiserver and client-go pkgs
This commit is contained in:
parent
7163f8f810
commit
830a137d2e
@ -1,9 +1,6 @@
|
|||||||
vendor/k8s.io/apiserver/pkg/server/dynamiccertificates
|
vendor/k8s.io/apiserver/pkg/server/dynamiccertificates
|
||||||
vendor/k8s.io/apiserver/pkg/server/filters
|
vendor/k8s.io/apiserver/pkg/server/filters
|
||||||
vendor/k8s.io/apiserver/pkg/server/routes
|
vendor/k8s.io/apiserver/pkg/server/routes
|
||||||
vendor/k8s.io/apiserver/pkg/storage/cacher
|
|
||||||
vendor/k8s.io/apiserver/pkg/storage/tests
|
|
||||||
vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope
|
vendor/k8s.io/apiserver/pkg/storage/value/encrypt/envelope
|
||||||
vendor/k8s.io/apiserver/pkg/util/wsstream
|
vendor/k8s.io/apiserver/pkg/util/wsstream
|
||||||
vendor/k8s.io/client-go/rest
|
vendor/k8s.io/client-go/rest
|
||||||
vendor/k8s.io/client-go/rest/watch
|
|
||||||
|
@ -18,6 +18,7 @@ package cacher
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
goruntime "runtime"
|
goruntime "runtime"
|
||||||
@ -649,6 +650,7 @@ func TestCacherNoLeakWithMultipleWatchers(t *testing.T) {
|
|||||||
|
|
||||||
// run the collision test for 3 seconds to let ~2 buckets expire
|
// run the collision test for 3 seconds to let ~2 buckets expire
|
||||||
stopCh := make(chan struct{})
|
stopCh := make(chan struct{})
|
||||||
|
var watchErr error
|
||||||
time.AfterFunc(3*time.Second, func() { close(stopCh) })
|
time.AfterFunc(3*time.Second, func() { close(stopCh) })
|
||||||
|
|
||||||
wg := &sync.WaitGroup{}
|
wg := &sync.WaitGroup{}
|
||||||
@ -664,7 +666,8 @@ func TestCacherNoLeakWithMultipleWatchers(t *testing.T) {
|
|||||||
ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
|
ctx, _ := context.WithTimeout(context.Background(), 3*time.Second)
|
||||||
w, err := cacher.Watch(ctx, "pods/ns", storage.ListOptions{ResourceVersion: "0", Predicate: pred})
|
w, err := cacher.Watch(ctx, "pods/ns", storage.ListOptions{ResourceVersion: "0", Predicate: pred})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Failed to create watch: %v", err)
|
watchErr = fmt.Errorf("Failed to create watch: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
w.Stop()
|
w.Stop()
|
||||||
}
|
}
|
||||||
@ -687,6 +690,10 @@ func TestCacherNoLeakWithMultipleWatchers(t *testing.T) {
|
|||||||
// wait for adding/removing watchers to end
|
// wait for adding/removing watchers to end
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
|
if watchErr != nil {
|
||||||
|
t.Fatal(watchErr)
|
||||||
|
}
|
||||||
|
|
||||||
// wait out the expiration period and pop expired watchers
|
// wait out the expiration period and pop expired watchers
|
||||||
time.Sleep(2 * time.Second)
|
time.Sleep(2 * time.Second)
|
||||||
cacher.bookmarkWatchers.popExpiredWatchers()
|
cacher.bookmarkWatchers.popExpiredWatchers()
|
||||||
@ -742,6 +749,7 @@ func testCacherSendBookmarkEvents(t *testing.T, allowWatchBookmarks, expectedBoo
|
|||||||
}
|
}
|
||||||
|
|
||||||
resourceVersion := uint64(1000)
|
resourceVersion := uint64(1000)
|
||||||
|
errc := make(chan error, 1)
|
||||||
go func() {
|
go func() {
|
||||||
deadline := time.Now().Add(time.Second)
|
deadline := time.Now().Add(time.Second)
|
||||||
for i := 0; time.Now().Before(deadline); i++ {
|
for i := 0; time.Now().Before(deadline); i++ {
|
||||||
@ -752,7 +760,8 @@ func testCacherSendBookmarkEvents(t *testing.T, allowWatchBookmarks, expectedBoo
|
|||||||
ResourceVersion: fmt.Sprintf("%v", resourceVersion+uint64(i)),
|
ResourceVersion: fmt.Sprintf("%v", resourceVersion+uint64(i)),
|
||||||
}})
|
}})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to add a pod: %v", err)
|
errc <- fmt.Errorf("failed to add a pod: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
time.Sleep(100 * time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
}
|
}
|
||||||
@ -762,6 +771,9 @@ func testCacherSendBookmarkEvents(t *testing.T, allowWatchBookmarks, expectedBoo
|
|||||||
lastObservedRV := uint64(0)
|
lastObservedRV := uint64(0)
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
|
case err := <-errc:
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
case event, ok := <-w.ResultChan():
|
case event, ok := <-w.ResultChan():
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatal("Unexpected closed")
|
t.Fatal("Unexpected closed")
|
||||||
@ -945,7 +957,6 @@ func TestDispatchingBookmarkEventsWithConcurrentStop(t *testing.T) {
|
|||||||
|
|
||||||
select {
|
select {
|
||||||
case <-done:
|
case <-done:
|
||||||
break
|
|
||||||
case <-time.After(time.Second):
|
case <-time.After(time.Second):
|
||||||
t.Fatal("receive result timeout")
|
t.Fatal("receive result timeout")
|
||||||
}
|
}
|
||||||
@ -994,6 +1005,8 @@ func TestBookmarksOnResourceVersionUpdates(t *testing.T) {
|
|||||||
|
|
||||||
expectedRV := 2000
|
expectedRV := 2000
|
||||||
|
|
||||||
|
var rcErr error
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
@ -1001,7 +1014,8 @@ func TestBookmarksOnResourceVersionUpdates(t *testing.T) {
|
|||||||
for {
|
for {
|
||||||
event, ok := <-w.ResultChan()
|
event, ok := <-w.ResultChan()
|
||||||
if !ok {
|
if !ok {
|
||||||
t.Fatalf("Unexpected closed channel")
|
rcErr = errors.New("Unexpected closed channel")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
rv, err := cacher.versioner.ObjectResourceVersion(event.Object)
|
rv, err := cacher.versioner.ObjectResourceVersion(event.Object)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -1017,6 +1031,9 @@ func TestBookmarksOnResourceVersionUpdates(t *testing.T) {
|
|||||||
cacher.watchCache.UpdateResourceVersion(strconv.Itoa(expectedRV))
|
cacher.watchCache.UpdateResourceVersion(strconv.Itoa(expectedRV))
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
if rcErr != nil {
|
||||||
|
t.Fatal(rcErr)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type fakeTimeBudget struct{}
|
type fakeTimeBudget struct{}
|
||||||
|
@ -149,7 +149,8 @@ func TestCachingObjectRaces(t *testing.T) {
|
|||||||
}
|
}
|
||||||
accessor, err := meta.Accessor(object.GetObject())
|
accessor, err := meta.Accessor(object.GetObject())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get accessor: %v", err)
|
t.Errorf("failed to get accessor: %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if selfLink := accessor.GetSelfLink(); selfLink != "selfLink" {
|
if selfLink := accessor.GetSelfLink(); selfLink != "selfLink" {
|
||||||
t.Errorf("unexpected selfLink: %s", selfLink)
|
t.Errorf("unexpected selfLink: %s", selfLink)
|
||||||
|
@ -887,6 +887,7 @@ func TestWatchBookmarksWithCorrectResourceVersion(t *testing.T) {
|
|||||||
defer watcher.Stop()
|
defer watcher.Stop()
|
||||||
|
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
|
errc := make(chan error, 1)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
defer wg.Wait() // We must wait for the waitgroup to exit before we terminate the cache or the server in prior defers
|
defer wg.Wait() // We must wait for the waitgroup to exit before we terminate the cache or the server in prior defers
|
||||||
@ -901,7 +902,8 @@ func TestWatchBookmarksWithCorrectResourceVersion(t *testing.T) {
|
|||||||
pod := fmt.Sprintf("foo-%d", i)
|
pod := fmt.Sprintf("foo-%d", i)
|
||||||
err := createPod(etcdStorage, makeTestPod(pod))
|
err := createPod(etcdStorage, makeTestPod(pod))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to create pod %v: %v", pod, err)
|
errc <- fmt.Errorf("failed to create pod %v: %v", pod, err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
time.Sleep(time.Second / 100)
|
time.Sleep(time.Second / 100)
|
||||||
}
|
}
|
||||||
@ -910,27 +912,36 @@ func TestWatchBookmarksWithCorrectResourceVersion(t *testing.T) {
|
|||||||
|
|
||||||
bookmarkReceived := false
|
bookmarkReceived := false
|
||||||
lastObservedResourceVersion := uint64(0)
|
lastObservedResourceVersion := uint64(0)
|
||||||
for event := range watcher.ResultChan() {
|
|
||||||
rv, err := v.ObjectResourceVersion(event.Object)
|
for {
|
||||||
if err != nil {
|
select {
|
||||||
t.Fatalf("failed to parse resourceVersion from %#v", event)
|
case err := <-errc:
|
||||||
}
|
t.Fatal(err)
|
||||||
if event.Type == watch.Bookmark {
|
case event, ok := <-watcher.ResultChan():
|
||||||
bookmarkReceived = true
|
if !ok {
|
||||||
// bookmark event has a RV greater than or equal to the before one
|
// Make sure we have received a bookmark event
|
||||||
if rv < lastObservedResourceVersion {
|
if !bookmarkReceived {
|
||||||
t.Fatalf("Unexpected bookmark resourceVersion %v less than observed %v)", rv, lastObservedResourceVersion)
|
t.Fatalf("Unpexected error, we did not received a bookmark event")
|
||||||
|
}
|
||||||
|
return
|
||||||
}
|
}
|
||||||
} else {
|
rv, err := v.ObjectResourceVersion(event.Object)
|
||||||
// non-bookmark event has a RV greater than anything before
|
if err != nil {
|
||||||
if rv <= lastObservedResourceVersion {
|
t.Fatalf("failed to parse resourceVersion from %#v", event)
|
||||||
t.Fatalf("Unexpected event resourceVersion %v less than or equal to bookmark %v)", rv, lastObservedResourceVersion)
|
|
||||||
}
|
}
|
||||||
|
if event.Type == watch.Bookmark {
|
||||||
|
bookmarkReceived = true
|
||||||
|
// bookmark event has a RV greater than or equal to the before one
|
||||||
|
if rv < lastObservedResourceVersion {
|
||||||
|
t.Fatalf("Unexpected bookmark resourceVersion %v less than observed %v)", rv, lastObservedResourceVersion)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// non-bookmark event has a RV greater than anything before
|
||||||
|
if rv <= lastObservedResourceVersion {
|
||||||
|
t.Fatalf("Unexpected event resourceVersion %v less than or equal to bookmark %v)", rv, lastObservedResourceVersion)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
lastObservedResourceVersion = rv
|
||||||
}
|
}
|
||||||
lastObservedResourceVersion = rv
|
|
||||||
}
|
|
||||||
// Make sure we have received a bookmark event
|
|
||||||
if !bookmarkReceived {
|
|
||||||
t.Fatalf("Unpexected error, we did not received a bookmark event")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -185,12 +185,13 @@ func TestTimeouts(t *testing.T) {
|
|||||||
func TestIntermittentConnectionLoss(t *testing.T) {
|
func TestIntermittentConnectionLoss(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
var (
|
var (
|
||||||
wg1 sync.WaitGroup
|
wg1 sync.WaitGroup
|
||||||
wg2 sync.WaitGroup
|
wg2 sync.WaitGroup
|
||||||
timeout = 30 * time.Second
|
timeout = 30 * time.Second
|
||||||
blackOut = 1 * time.Second
|
blackOut = 1 * time.Second
|
||||||
data = []byte("test data")
|
data = []byte("test data")
|
||||||
endpoint = newEndpoint()
|
endpoint = newEndpoint()
|
||||||
|
encryptErr error
|
||||||
)
|
)
|
||||||
// Start KMS Plugin
|
// Start KMS Plugin
|
||||||
f, err := mock.NewBase64Plugin(endpoint.path)
|
f, err := mock.NewBase64Plugin(endpoint.path)
|
||||||
@ -228,7 +229,7 @@ func TestIntermittentConnectionLoss(t *testing.T) {
|
|||||||
wg1.Done()
|
wg1.Done()
|
||||||
_, err := service.Encrypt(data)
|
_, err := service.Encrypt(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed when executing encrypt, error: %v", err)
|
encryptErr = fmt.Errorf("failed when executing encrypt, error: %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@ -246,6 +247,10 @@ func TestIntermittentConnectionLoss(t *testing.T) {
|
|||||||
t.Log("Restarted KMS Plugin")
|
t.Log("Restarted KMS Plugin")
|
||||||
|
|
||||||
wg2.Wait()
|
wg2.Wait()
|
||||||
|
|
||||||
|
if encryptErr != nil {
|
||||||
|
t.Error(encryptErr)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestUnsupportedVersion(t *testing.T) {
|
func TestUnsupportedVersion(t *testing.T) {
|
||||||
|
@ -18,6 +18,7 @@ package versioned_test
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
@ -51,10 +52,13 @@ func TestDecoder(t *testing.T) {
|
|||||||
expect := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
|
expect := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
|
||||||
encoder := json.NewEncoder(in)
|
encoder := json.NewEncoder(in)
|
||||||
eType := eventType
|
eType := eventType
|
||||||
|
errc := make(chan error)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
data, err := runtime.Encode(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), expect)
|
data, err := runtime.Encode(scheme.Codecs.LegacyCodec(v1.SchemeGroupVersion), expect)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %v", err)
|
errc <- fmt.Errorf("Unexpected error %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
event := metav1.WatchEvent{
|
event := metav1.WatchEvent{
|
||||||
Type: string(eType),
|
Type: string(eType),
|
||||||
@ -70,7 +74,8 @@ func TestDecoder(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
action, got, err := decoder.Decode()
|
action, got, err := decoder.Decode()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("Unexpected error %v", err)
|
errc <- fmt.Errorf("Unexpected error %v", err)
|
||||||
|
return
|
||||||
}
|
}
|
||||||
if e, a := eType, action; e != a {
|
if e, a := eType, action; e != a {
|
||||||
t.Errorf("Expected %v, got %v", e, a)
|
t.Errorf("Expected %v, got %v", e, a)
|
||||||
@ -81,7 +86,11 @@ func TestDecoder(t *testing.T) {
|
|||||||
t.Logf("Exited read")
|
t.Logf("Exited read")
|
||||||
close(done)
|
close(done)
|
||||||
}()
|
}()
|
||||||
<-done
|
select {
|
||||||
|
case err := <-errc:
|
||||||
|
t.Fatal(err)
|
||||||
|
case <-done:
|
||||||
|
}
|
||||||
|
|
||||||
done = make(chan struct{})
|
done = make(chan struct{})
|
||||||
go func() {
|
go func() {
|
||||||
|
Loading…
Reference in New Issue
Block a user