fix staticcheck failures for test/e2e and test/integration

This commit is contained in:
Sai Harsha Kottapalli 2020-10-04 03:02:51 +05:30
parent 23a46d8843
commit a2ef69060a
No known key found for this signature in database
GPG Key ID: 4A710869A1A388D5
5 changed files with 42 additions and 33 deletions

View File

@ -1,7 +1,6 @@
cluster/images/etcd/migrate
pkg/controller/replicaset
pkg/kubelet/dockershim
test/e2e/autoscaling
test/integration/garbagecollector
test/integration/scheduler_perf
vendor/k8s.io/apimachinery/pkg/api/apitesting/roundtrip

View File

@ -54,6 +54,7 @@ go_library(
"//vendor/github.com/onsi/ginkgo:go_default_library",
"//vendor/golang.org/x/oauth2/google:go_default_library",
"//vendor/google.golang.org/api/monitoring/v3:go_default_library",
"//vendor/google.golang.org/api/option:go_default_library",
"//vendor/k8s.io/klog/v2:go_default_library",
],
)

View File

@ -22,6 +22,7 @@ import (
"time"
gcm "google.golang.org/api/monitoring/v3"
"google.golang.org/api/option"
appsv1 "k8s.io/api/apps/v1"
as "k8s.io/api/autoscaling/v2beta1"
v1 "k8s.io/api/core/v1"
@ -233,6 +234,9 @@ func (tc *CustomMetricTestCase) Run() {
ctx := context.Background()
client, err := google.DefaultClient(ctx, gcm.CloudPlatformScope)
if err != nil {
framework.Failf("Failed to initialize gcm default client, %v", err)
}
// Hack for running tests locally, needed to authenticate in Stackdriver
// If this is your use case, create application default credentials:
@ -247,7 +251,7 @@ func (tc *CustomMetricTestCase) Run() {
client := oauth2.NewClient(oauth2.NoContext, ts)
*/
gcmService, err := gcm.New(client)
gcmService, err := gcm.NewService(ctx, option.WithHTTPClient(client))
if err != nil {
framework.Failf("Failed to create gcm service, %v", err)
}

View File

@ -29,7 +29,6 @@ import (
// These tests don't seem to be running properly in parallel: issue: #20338.
//
var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: CPU)", func() {
var rc *e2eautoscaling.ResourceConsumer
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
@ -38,20 +37,20 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
SIGDescribe("[Serial] [Slow] Deployment", func() {
// CPU tests via deployments
ginkgo.It(titleUp, func() {
scaleUp("test-deployment", e2eautoscaling.KindDeployment, false, rc, f)
scaleUp("test-deployment", e2eautoscaling.KindDeployment, false, f)
})
ginkgo.It(titleDown, func() {
scaleDown("test-deployment", e2eautoscaling.KindDeployment, false, rc, f)
scaleDown("test-deployment", e2eautoscaling.KindDeployment, false, f)
})
})
SIGDescribe("[Serial] [Slow] ReplicaSet", func() {
// CPU tests via ReplicaSets
ginkgo.It(titleUp, func() {
scaleUp("rs", e2eautoscaling.KindReplicaSet, false, rc, f)
scaleUp("rs", e2eautoscaling.KindReplicaSet, false, f)
})
ginkgo.It(titleDown, func() {
scaleDown("rs", e2eautoscaling.KindReplicaSet, false, rc, f)
scaleDown("rs", e2eautoscaling.KindReplicaSet, false, f)
})
})
@ -59,10 +58,10 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
SIGDescribe("[Serial] [Slow] ReplicationController", func() {
// CPU tests via replication controllers
ginkgo.It(titleUp+" and verify decision stability", func() {
scaleUp("rc", e2eautoscaling.KindRC, true, rc, f)
scaleUp("rc", e2eautoscaling.KindRC, true, f)
})
ginkgo.It(titleDown+" and verify decision stability", func() {
scaleDown("rc", e2eautoscaling.KindRC, true, rc, f)
scaleDown("rc", e2eautoscaling.KindRC, true, f)
})
})
@ -77,7 +76,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
maxPods: 2,
firstScale: 2,
}
scaleTest.run("rc-light", e2eautoscaling.KindRC, rc, f)
scaleTest.run("rc-light", e2eautoscaling.KindRC, f)
})
ginkgo.It("Should scale from 2 pods to 1 pod [Slow]", func() {
scaleTest := &HPAScaleTest{
@ -89,7 +88,7 @@ var _ = SIGDescribe("[Feature:HPA] Horizontal pod autoscaling (scale resource: C
maxPods: 2,
firstScale: 1,
}
scaleTest.run("rc-light", e2eautoscaling.KindRC, rc, f)
scaleTest.run("rc-light", e2eautoscaling.KindRC, f)
})
})
})
@ -106,7 +105,6 @@ type HPAScaleTest struct {
firstScaleStasis time.Duration
cpuBurst int
secondScale int32
secondScaleStasis time.Duration
}
// run is a method which runs an HPA lifecycle, from a starting state, to an expected
@ -114,9 +112,9 @@ type HPAScaleTest struct {
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc *e2eautoscaling.ResourceConsumer, f *framework.Framework) {
func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, f *framework.Framework) {
const timeToWait = 15 * time.Minute
rc = e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter)
rc := e2eautoscaling.NewDynamicResourceConsumer(name, f.Namespace.Name, kind, scaleTest.initPods, scaleTest.totalInitialCPUUsage, 0, 0, scaleTest.perPodCPURequest, 200, f.ClientSet, f.ScalesGetter)
defer rc.CleanUp()
hpa := e2eautoscaling.CreateCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
defer e2eautoscaling.DeleteHorizontalPodAutoscaler(rc, hpa.Name)
@ -131,7 +129,7 @@ func (scaleTest *HPAScaleTest) run(name string, kind schema.GroupVersionKind, rc
}
}
func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc *e2eautoscaling.ResourceConsumer, f *framework.Framework) {
func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
@ -148,10 +146,10 @@ func scaleUp(name string, kind schema.GroupVersionKind, checkStability bool, rc
cpuBurst: 700,
secondScale: 5,
}
scaleTest.run(name, kind, rc, f)
scaleTest.run(name, kind, f)
}
func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, rc *e2eautoscaling.ResourceConsumer, f *framework.Framework) {
func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
@ -168,5 +166,5 @@ func scaleDown(name string, kind schema.GroupVersionKind, checkStability bool, r
cpuBurst: 10,
secondScale: 1,
}
scaleTest.run(name, kind, rc, f)
scaleTest.run(name, kind, f)
}

View File

@ -366,17 +366,16 @@ func testCrossNamespaceReferences(t *testing.T, watchCache bool) {
}
invalidOwnerReferences = append(invalidOwnerReferences, metav1.OwnerReference{Name: "invalid", UID: parent.UID, APIVersion: "v1", Kind: "Pod", Controller: pointer.BoolPtr(false)})
invalidUIDs := []types.UID{}
for i := 0; i < workers; i++ {
invalidChildType1, err := clientSet.CoreV1().ConfigMaps(namespaceA).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{GenerateName: "invalid-child-", OwnerReferences: invalidOwnerReferences}}, metav1.CreateOptions{})
_, err := clientSet.CoreV1().ConfigMaps(namespaceA).Create(context.TODO(), &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{GenerateName: "invalid-child-", OwnerReferences: invalidOwnerReferences}}, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
invalidChildType2, err := clientSet.CoreV1().Secrets(namespaceA).Create(context.TODO(), &v1.Secret{ObjectMeta: metav1.ObjectMeta{GenerateName: "invalid-child-a-", OwnerReferences: invalidOwnerReferences}}, metav1.CreateOptions{})
_, err = clientSet.CoreV1().Secrets(namespaceA).Create(context.TODO(), &v1.Secret{ObjectMeta: metav1.ObjectMeta{GenerateName: "invalid-child-a-", OwnerReferences: invalidOwnerReferences}}, metav1.CreateOptions{})
if err != nil {
t.Fatal(err)
}
invalidChildType3, err := clientSet.CoreV1().Secrets(namespaceA).Create(context.TODO(), &v1.Secret{
_, err = clientSet.CoreV1().Secrets(namespaceA).Create(context.TODO(), &v1.Secret{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"single-bad-reference": "true"},
GenerateName: "invalid-child-b-",
@ -386,7 +385,6 @@ func testCrossNamespaceReferences(t *testing.T, watchCache bool) {
if err != nil {
t.Fatal(err)
}
invalidUIDs = append(invalidUIDs, invalidChildType1.UID, invalidChildType2.UID, invalidChildType3.UID)
}
// start GC with existing objects in place to simulate controller-manager restart
@ -572,7 +570,7 @@ func TestCreateWithNonExistentOwner(t *testing.T) {
}
}
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID) {
func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet clientset.Interface, nameSuffix, namespace string, initialFinalizers []string, options metav1.DeleteOptions, wg *sync.WaitGroup, rcUIDs chan types.UID, errs chan string) {
defer wg.Done()
rcClient := clientSet.CoreV1().ReplicationControllers(namespace)
podClient := clientSet.CoreV1().Pods(namespace)
@ -582,7 +580,8 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
rc.ObjectMeta.Finalizers = initialFinalizers
rc, err := rcClient.Create(context.TODO(), rc, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create replication controller: %v", err)
errs <- fmt.Sprintf("Failed to create replication controller: %v", err)
return
}
rcUIDs <- rc.ObjectMeta.UID
// create pods.
@ -592,7 +591,8 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
pod := newPod(podName, namespace, []metav1.OwnerReference{{UID: rc.ObjectMeta.UID, Name: rc.ObjectMeta.Name}})
createdPod, err := podClient.Create(context.TODO(), pod, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Failed to create Pod: %v", err)
errs <- fmt.Sprintf("Failed to create Pod: %v", err)
return
}
podUIDs = append(podUIDs, createdPod.ObjectMeta.UID)
}
@ -624,12 +624,14 @@ func setupRCsPods(t *testing.T, gc *garbagecollector.GarbageCollector, clientSet
return true, nil
})
if err != nil {
t.Fatalf("failed to observe the expected pods in the GC graph for rc %s", rcName)
errs <- fmt.Sprintf("failed to observe the expected pods in the GC graph for rc %s", rcName)
return
}
}
// delete the rc
if err := rcClient.Delete(context.TODO(), rc.ObjectMeta.Name, options); err != nil {
t.Fatalf("failed to delete replication controller: %v", err)
errs <- fmt.Sprintf("failed to delete replication controller: %v", err)
return
}
}
@ -672,19 +674,24 @@ func TestStressingCascadingDeletion(t *testing.T) {
var wg sync.WaitGroup
wg.Add(collections * 5)
rcUIDs := make(chan types.UID, collections*5)
errs := make(chan string, 5)
for i := 0; i < collections; i++ {
// rc is created with empty finalizers, deleted with nil delete options, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, metav1.DeleteOptions{}, &wg, rcUIDs)
go setupRCsPods(t, gc, clientSet, "collection1-"+strconv.Itoa(i), ns.Name, []string{}, metav1.DeleteOptions{}, &wg, rcUIDs, errs)
// rc is created with the orphan finalizer, deleted with nil options, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, metav1.DeleteOptions{}, &wg, rcUIDs)
go setupRCsPods(t, gc, clientSet, "collection2-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, metav1.DeleteOptions{}, &wg, rcUIDs, errs)
// rc is created with the orphan finalizer, deleted with DeleteOptions.OrphanDependents=false, pods will be deleted.
go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, getNonOrphanOptions(), &wg, rcUIDs)
go setupRCsPods(t, gc, clientSet, "collection3-"+strconv.Itoa(i), ns.Name, []string{metav1.FinalizerOrphanDependents}, getNonOrphanOptions(), &wg, rcUIDs, errs)
// rc is created with empty finalizers, deleted with DeleteOptions.OrphanDependents=true, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection4-"+strconv.Itoa(i), ns.Name, []string{}, getOrphanOptions(), &wg, rcUIDs)
go setupRCsPods(t, gc, clientSet, "collection4-"+strconv.Itoa(i), ns.Name, []string{}, getOrphanOptions(), &wg, rcUIDs, errs)
// rc is created with empty finalizers, deleted with DeleteOptions.PropagationPolicy=Orphan, pods will remain.
go setupRCsPods(t, gc, clientSet, "collection5-"+strconv.Itoa(i), ns.Name, []string{}, getPropagateOrphanOptions(), &wg, rcUIDs)
go setupRCsPods(t, gc, clientSet, "collection5-"+strconv.Itoa(i), ns.Name, []string{}, getPropagateOrphanOptions(), &wg, rcUIDs, errs)
}
wg.Wait()
close(errs)
for errString := range errs {
t.Fatalf(errString)
}
t.Logf("all pods are created, all replications controllers are created then deleted")
// wait for the RCs and Pods to reach the expected numbers.
if err := wait.Poll(1*time.Second, 300*time.Second, func() (bool, error) {