From 345e528c863d5281a42ed09c21c43c4409357671 Mon Sep 17 00:00:00 2001 From: Mark Janssen Date: Wed, 21 Aug 2019 22:42:36 +0200 Subject: [PATCH 1/2] Fix staticcheck failures for pkg/scheduler/... --- hack/.staticcheck_failures | 5 ----- pkg/scheduler/algorithm/predicates/metadata_test.go | 11 +---------- .../algorithm/priorities/even_pods_spread_test.go | 5 +---- pkg/scheduler/api/v1/types.go | 4 ++-- pkg/scheduler/internal/queue/scheduling_queue_test.go | 2 +- pkg/scheduler/scheduler_test.go | 6 +++++- 6 files changed, 10 insertions(+), 23 deletions(-) diff --git a/hack/.staticcheck_failures b/hack/.staticcheck_failures index fc1300206df..14b41f0861a 100644 --- a/hack/.staticcheck_failures +++ b/hack/.staticcheck_failures @@ -47,11 +47,6 @@ pkg/registry/core/service/ipallocator pkg/registry/core/service/portallocator pkg/registry/core/service/storage pkg/registry/extensions/controller/storage -pkg/scheduler -pkg/scheduler/algorithm/predicates -pkg/scheduler/algorithm/priorities -pkg/scheduler/api/v1 -pkg/scheduler/internal/queue pkg/util/coverage pkg/util/ebtables pkg/util/ipconfig diff --git a/pkg/scheduler/algorithm/predicates/metadata_test.go b/pkg/scheduler/algorithm/predicates/metadata_test.go index b31dcec0a4a..51ff3d7d7a2 100644 --- a/pkg/scheduler/algorithm/predicates/metadata_test.go +++ b/pkg/scheduler/algorithm/predicates/metadata_test.go @@ -62,7 +62,7 @@ func predicateMetadataEquivalent(meta1, meta2 *predicateMetadata) error { if meta1.podBestEffort != meta2.podBestEffort { return fmt.Errorf("podBestEfforts are not equal") } - if meta1.serviceAffinityInUse != meta1.serviceAffinityInUse { + if meta1.serviceAffinityInUse != meta2.serviceAffinityInUse { return fmt.Errorf("serviceAffinityInUses are not equal") } if len(meta1.podPorts) != len(meta2.podPorts) { @@ -1697,15 +1697,6 @@ var ( softSpread = v1.ScheduleAnyway ) -func newPairSet(kv ...string) topologyPairSet { - result := make(topologyPairSet) - for i := 0; i < len(kv); i += 2 { - pair := topologyPair{key: kv[i], value: kv[i+1]} - result[pair] = struct{}{} - } - return result -} - // sortCriticalPaths is only served for testing purpose. func (c *podSpreadCache) sortCriticalPaths() { for _, paths := range c.tpKeyToCriticalPaths { diff --git a/pkg/scheduler/algorithm/priorities/even_pods_spread_test.go b/pkg/scheduler/algorithm/priorities/even_pods_spread_test.go index f330e341353..216f354f6db 100644 --- a/pkg/scheduler/algorithm/priorities/even_pods_spread_test.go +++ b/pkg/scheduler/algorithm/priorities/even_pods_spread_test.go @@ -493,7 +493,4 @@ func BenchmarkTestCalculateEvenPodsSpreadPriority(b *testing.B) { } } -var ( - hardSpread = v1.DoNotSchedule - softSpread = v1.ScheduleAnyway -) +var softSpread = v1.ScheduleAnyway diff --git a/pkg/scheduler/api/v1/types.go b/pkg/scheduler/api/v1/types.go index 85c88bc3101..cf2e46b1a52 100644 --- a/pkg/scheduler/api/v1/types.go +++ b/pkg/scheduler/api/v1/types.go @@ -145,7 +145,7 @@ type UtilizationShapePoint struct { // ResourceSpec represents single resource and weight for bin packing of priority RequestedToCapacityRatioArguments. type ResourceSpec struct { // Name of the resource to be managed by RequestedToCapacityRatio function. - Name apiv1.ResourceName `json:"name,casttype=ResourceName"` + Name apiv1.ResourceName `json:"name"` // Weight of the resource. Weight int64 `json:"weight,omitempty"` } @@ -154,7 +154,7 @@ type ResourceSpec struct { // managed by an extender. type ExtenderManagedResource struct { // Name is the extended resource name. - Name apiv1.ResourceName `json:"name,casttype=ResourceName"` + Name apiv1.ResourceName `json:"name"` // IgnoredByScheduler indicates whether kube-scheduler should ignore this // resource when applying predicates. IgnoredByScheduler bool `json:"ignoredByScheduler,omitempty"` diff --git a/pkg/scheduler/internal/queue/scheduling_queue_test.go b/pkg/scheduler/internal/queue/scheduling_queue_test.go index 4365346cb76..c1a79712a76 100644 --- a/pkg/scheduler/internal/queue/scheduling_queue_test.go +++ b/pkg/scheduler/internal/queue/scheduling_queue_test.go @@ -35,7 +35,7 @@ import ( "k8s.io/kubernetes/pkg/scheduler/util" ) -var negPriority, lowPriority, midPriority, highPriority, veryHighPriority = int32(-100), int32(0), int32(100), int32(1000), int32(10000) +var lowPriority, midPriority, highPriority = int32(0), int32(100), int32(1000) var mediumPriority = (lowPriority + highPriority) / 2 var highPriorityPod, highPriNominatedPod, medPriorityPod, unschedulablePod = v1.Pod{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index b02f8acee86..2fb3b1e4ff0 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -352,6 +352,7 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) { waitPodExpireChan := make(chan struct{}) timeout := make(chan struct{}) + errChan := make(chan error) go func() { for { select { @@ -361,7 +362,8 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) { } pods, err := scache.List(labels.Everything()) if err != nil { - t.Fatalf("cache.List failed: %v", err) + errChan <- fmt.Errorf("cache.List failed: %v", err) + return } if len(pods) == 0 { close(waitPodExpireChan) @@ -372,6 +374,8 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) { }() // waiting for the assumed pod to expire select { + case err := <-errChan: + t.Fatal(err) case <-waitPodExpireChan: case <-time.After(wait.ForeverTestTimeout): close(timeout) From 1a1b7001d6e5dbc8f2f363a39e684fb58da35ddb Mon Sep 17 00:00:00 2001 From: Mark Janssen Date: Fri, 23 Aug 2019 18:06:47 +0200 Subject: [PATCH 2/2] Fix staticcheck failures for scheduler packages Errors from staticcheck: cmd/kube-scheduler/app/server.go:297:27: prometheus.Handler is deprecated: Please note the issues described in the doc comment of InstrumentHandler. You might want to consider using promhttp.Handler instead. (SA1019) pkg/apis/scheduling/v1alpha1/defaults.go:27:6: func addDefaultingFuncs is unused (U1000) pkg/apis/scheduling/v1beta1/defaults.go:27:6: func addDefaultingFuncs is unused (U1000) test/e2e/scheduling/predicates.go:757:6: func verifyReplicasResult is unused (U1000) test/e2e/scheduling/predicates.go:765:6: func getPodsByLabels is unused (U1000) test/e2e/scheduling/predicates.go:772:6: func runAndKeepPodWithLabelAndGetNodeName is unused (U1000) test/e2e/scheduling/limit_range.go:172:3: this value of pod is never used (SA4006) test/e2e/scheduling/limit_range.go:177:3: this value of pod is never used (SA4006) test/e2e/scheduling/limit_range.go:196:3: this value of pod is never used (SA4006) test/e2e/scheduling/limit_range.go:201:3: this value of pod is never used (SA4006) test/e2e/scheduling/limit_range.go:240:3: this value of pod is never used (SA4006) test/e2e/scheduling/taints.go:428:13: this value of err is never used (SA4006) test/e2e/scheduling/ubernetes_lite.go:219:2: this value of pods is never used (SA4006) test/integration/scheduler/extender_test.go:78:4: this value of resp is never used (SA4006) test/integration/volumescheduling/volume_binding_test.go:529:15: this result of append is never used, except maybe in other appends (SA4010) test/integration/volumescheduling/volume_binding_test.go:538:15: this result of append is never used, except maybe in other appends (SA4010) --- cmd/kube-scheduler/app/server.go | 1 + hack/.staticcheck_failures | 6 ---- pkg/apis/scheduling/v1alpha1/defaults.go | 5 ---- pkg/apis/scheduling/v1beta1/defaults.go | 5 ---- test/e2e/scheduling/limit_range.go | 10 +++---- test/e2e/scheduling/predicates.go | 30 ------------------- test/e2e/scheduling/taints.go | 1 + test/e2e/scheduling/ubernetes_lite.go | 4 +-- test/integration/scheduler/extender_test.go | 1 - .../volumescheduling/volume_binding_test.go | 3 -- 10 files changed, 9 insertions(+), 57 deletions(-) diff --git a/cmd/kube-scheduler/app/server.go b/cmd/kube-scheduler/app/server.go index 2b5be697607..885c94484f4 100644 --- a/cmd/kube-scheduler/app/server.go +++ b/cmd/kube-scheduler/app/server.go @@ -293,6 +293,7 @@ func buildHandlerChain(handler http.Handler, authn authenticator.Request, authz func installMetricHandler(pathRecorderMux *mux.PathRecorderMux) { configz.InstallHandler(pathRecorderMux) + //lint:ignore SA1019 See the Metrics Stability Migration KEP defaultMetricsHandler := legacyregistry.Handler().ServeHTTP pathRecorderMux.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) { if req.Method == "DELETE" { diff --git a/hack/.staticcheck_failures b/hack/.staticcheck_failures index 14b41f0861a..af2a52c4960 100644 --- a/hack/.staticcheck_failures +++ b/hack/.staticcheck_failures @@ -2,12 +2,9 @@ cluster/images/etcd-version-monitor cluster/images/etcd/migrate cmd/kube-controller-manager/app cmd/kube-proxy/app -cmd/kube-scheduler/app cmd/linkcheck cmd/preferredimports hack/make-rules/helpers/go2make/testdata/dir-with-gofiles -pkg/apis/scheduling/v1alpha1 -pkg/apis/scheduling/v1beta1 pkg/client/tests pkg/controller/daemon pkg/controller/deployment @@ -93,7 +90,6 @@ test/e2e/manifest test/e2e/network test/e2e/node test/e2e/scalability -test/e2e/scheduling test/e2e/storage test/e2e/storage/drivers test/e2e/storage/testsuites @@ -124,13 +120,11 @@ test/integration/kubelet test/integration/master test/integration/replicationcontroller test/integration/scale -test/integration/scheduler test/integration/scheduler_perf test/integration/serviceaccount test/integration/serving test/integration/ttlcontroller test/integration/volume -test/integration/volumescheduling test/utils vendor/k8s.io/apiextensions-apiserver/pkg/apiserver vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion diff --git a/pkg/apis/scheduling/v1alpha1/defaults.go b/pkg/apis/scheduling/v1alpha1/defaults.go index 41327f5fdcf..5c04a24c6a9 100644 --- a/pkg/apis/scheduling/v1alpha1/defaults.go +++ b/pkg/apis/scheduling/v1alpha1/defaults.go @@ -19,15 +19,10 @@ package v1alpha1 import ( apiv1 "k8s.io/api/core/v1" "k8s.io/api/scheduling/v1alpha1" - "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" ) -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - // SetDefaults_PriorityClass sets additional defaults compared to its counterpart // in extensions. func SetDefaults_PriorityClass(obj *v1alpha1.PriorityClass) { diff --git a/pkg/apis/scheduling/v1beta1/defaults.go b/pkg/apis/scheduling/v1beta1/defaults.go index c35594e4c11..bbf217261fc 100644 --- a/pkg/apis/scheduling/v1beta1/defaults.go +++ b/pkg/apis/scheduling/v1beta1/defaults.go @@ -19,15 +19,10 @@ package v1beta1 import ( apiv1 "k8s.io/api/core/v1" "k8s.io/api/scheduling/v1beta1" - "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" ) -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - // SetDefaults_PriorityClass sets additional defaults compared to its counterpart // in extensions. func SetDefaults_PriorityClass(obj *v1beta1.PriorityClass) { diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index 4495d993624..e8eccf5b737 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -168,12 +168,12 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Failing to create a Pod with less than min resources") pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectError(err) ginkgo.By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectError(err) ginkgo.By("Updating a LimitRange") @@ -192,12 +192,12 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with less than former min resources") pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err) ginkgo.By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectError(err) ginkgo.By("Deleting a LimitRange") @@ -236,7 +236,7 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with more than former max resources") pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err) }) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 58c37f53127..5973db504c5 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" utilversion "k8s.io/apimachinery/pkg/util/version" @@ -759,35 +758,6 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)) } -// verifyReplicasResult is wrapper of verifyResult for a group pods with same "name: labelName" label, which means they belong to same RC -func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string, labelName string) { - allPods := getPodsByLabels(c, ns, map[string]string{"name": labelName}) - scheduledPods, notScheduledPods := e2epod.GetPodsScheduled(masterNodes, allPods) - - framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)) - framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)) -} - -func getPodsByLabels(c clientset.Interface, ns string, labelsMap map[string]string) *v1.PodList { - selector := labels.SelectorFromSet(labels.Set(labelsMap)) - allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()}) - framework.ExpectNoError(err) - return allPods -} - -func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, string) { - // launch a pod to find a node which can launch a pod. We intentionally do - // not just take the node list and choose the first of them. Depending on the - // cluster and the scheduler it might be that a "normal" pod cannot be - // scheduled onto it. - ginkgo.By("Trying to launch a pod with a label to get a node which can launch it.") - pod := runPausePod(f, pausePodConfig{ - Name: "with-label-" + string(uuid.NewUUID()), - Labels: map[string]string{"security": "S1"}, - }) - return pod.Spec.NodeName, pod.Name -} - // GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it func GetNodeThatCanRunPod(f *framework.Framework) string { ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.") diff --git a/test/e2e/scheduling/taints.go b/test/e2e/scheduling/taints.go index 0b00f3952cb..93e8f93ec5e 100644 --- a/test/e2e/scheduling/taints.go +++ b/test/e2e/scheduling/taints.go @@ -425,6 +425,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { ginkgo.By("Starting pods...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute) + framework.ExpectNoError(err) node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) nodeHostNameLabel, ok := node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"] diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index 08b3a7dc463..9b4e8abf7a4 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -215,12 +215,12 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string, ar }() // List the pods, making sure we observe all the replicas. selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount) + _, err = e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount) framework.ExpectNoError(err) // Wait for all of them to be scheduled ginkgo.By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector)) - pods, err = e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) + pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) framework.ExpectNoError(err) // Now make sure they're spread across zones diff --git a/test/integration/scheduler/extender_test.go b/test/integration/scheduler/extender_test.go index 3ed2a24a15c..4898cd7e424 100644 --- a/test/integration/scheduler/extender_test.go +++ b/test/integration/scheduler/extender_test.go @@ -75,7 +75,6 @@ func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Requ } if strings.Contains(req.URL.Path, filter) { - resp := &schedulerapi.ExtenderFilterResult{} resp, err := e.Filter(&args) if err != nil { resp.Error = err.Error() diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index c1e8602f5f1..b4a0cd18a57 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -511,7 +511,6 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n pods := []*v1.Pod{} pvcs := []*v1.PersistentVolumeClaim{} - pvs := []*v1.PersistentVolume{} // Create PVs for the first node for i := 0; i < numPVsFirstNode; i++ { @@ -519,7 +518,6 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } - pvs = append(pvs, pv) } // Create 1 PV per Node for the remaining nodes @@ -528,7 +526,6 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } - pvs = append(pvs, pv) } // Create pods