Fix staticcheck failures for scheduler packages

Errors from staticcheck:
cmd/kube-scheduler/app/server.go:297:27: prometheus.Handler is deprecated: Please note the issues described in the doc comment of InstrumentHandler. You might want to consider using promhttp.Handler instead.  (SA1019)
pkg/apis/scheduling/v1alpha1/defaults.go:27:6: func addDefaultingFuncs is unused (U1000)
pkg/apis/scheduling/v1beta1/defaults.go:27:6: func addDefaultingFuncs is unused (U1000)
test/e2e/scheduling/predicates.go:757:6: func verifyReplicasResult is unused (U1000)
test/e2e/scheduling/predicates.go:765:6: func getPodsByLabels is unused (U1000)
test/e2e/scheduling/predicates.go:772:6: func runAndKeepPodWithLabelAndGetNodeName is unused (U1000)
test/e2e/scheduling/limit_range.go:172:3: this value of pod is never used (SA4006)
test/e2e/scheduling/limit_range.go:177:3: this value of pod is never used (SA4006)
test/e2e/scheduling/limit_range.go:196:3: this value of pod is never used (SA4006)
test/e2e/scheduling/limit_range.go:201:3: this value of pod is never used (SA4006)
test/e2e/scheduling/limit_range.go:240:3: this value of pod is never used (SA4006)
test/e2e/scheduling/taints.go:428:13: this value of err is never used (SA4006)
test/e2e/scheduling/ubernetes_lite.go:219:2: this value of pods is never used (SA4006)
test/integration/scheduler/extender_test.go:78:4: this value of resp is never used (SA4006)
test/integration/volumescheduling/volume_binding_test.go:529:15: this result of append is never used, except maybe in other appends (SA4010)
test/integration/volumescheduling/volume_binding_test.go:538:15: this result of append is never used, except maybe in other appends (SA4010)
This commit is contained in:
Mark Janssen 2019-08-23 18:06:47 +02:00
parent 345e528c86
commit 1a1b7001d6
10 changed files with 9 additions and 57 deletions

View File

@ -293,6 +293,7 @@ func buildHandlerChain(handler http.Handler, authn authenticator.Request, authz
func installMetricHandler(pathRecorderMux *mux.PathRecorderMux) {
configz.InstallHandler(pathRecorderMux)
//lint:ignore SA1019 See the Metrics Stability Migration KEP
defaultMetricsHandler := legacyregistry.Handler().ServeHTTP
pathRecorderMux.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) {
if req.Method == "DELETE" {

View File

@ -2,12 +2,9 @@ cluster/images/etcd-version-monitor
cluster/images/etcd/migrate
cmd/kube-controller-manager/app
cmd/kube-proxy/app
cmd/kube-scheduler/app
cmd/linkcheck
cmd/preferredimports
hack/make-rules/helpers/go2make/testdata/dir-with-gofiles
pkg/apis/scheduling/v1alpha1
pkg/apis/scheduling/v1beta1
pkg/client/tests
pkg/controller/daemon
pkg/controller/deployment
@ -93,7 +90,6 @@ test/e2e/manifest
test/e2e/network
test/e2e/node
test/e2e/scalability
test/e2e/scheduling
test/e2e/storage
test/e2e/storage/drivers
test/e2e/storage/testsuites
@ -124,13 +120,11 @@ test/integration/kubelet
test/integration/master
test/integration/replicationcontroller
test/integration/scale
test/integration/scheduler
test/integration/scheduler_perf
test/integration/serviceaccount
test/integration/serving
test/integration/ttlcontroller
test/integration/volume
test/integration/volumescheduling
test/utils
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver
vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion

View File

@ -19,15 +19,10 @@ package v1alpha1
import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/api/scheduling/v1alpha1"
"k8s.io/apimachinery/pkg/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_PriorityClass sets additional defaults compared to its counterpart
// in extensions.
func SetDefaults_PriorityClass(obj *v1alpha1.PriorityClass) {

View File

@ -19,15 +19,10 @@ package v1beta1
import (
apiv1 "k8s.io/api/core/v1"
"k8s.io/api/scheduling/v1beta1"
"k8s.io/apimachinery/pkg/runtime"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
// SetDefaults_PriorityClass sets additional defaults compared to its counterpart
// in extensions.
func SetDefaults_PriorityClass(obj *v1beta1.PriorityClass) {

View File

@ -168,12 +168,12 @@ var _ = SIGDescribe("LimitRange", func() {
ginkgo.By("Failing to create a Pod with less than min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectError(err)
ginkgo.By("Failing to create a Pod with more than max resources")
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectError(err)
ginkgo.By("Updating a LimitRange")
@ -192,12 +192,12 @@ var _ = SIGDescribe("LimitRange", func() {
ginkgo.By("Creating a Pod with less than former min resources")
pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err)
ginkgo.By("Failing to create a Pod with more than max resources")
pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectError(err)
ginkgo.By("Deleting a LimitRange")
@ -236,7 +236,7 @@ var _ = SIGDescribe("LimitRange", func() {
ginkgo.By("Creating a Pod with more than former max resources")
pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{})
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
_, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err)
})

View File

@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/uuid"
utilversion "k8s.io/apimachinery/pkg/util/version"
@ -759,35 +758,6 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched
framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
}
// verifyReplicasResult is wrapper of verifyResult for a group pods with same "name: labelName" label, which means they belong to same RC
func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string, labelName string) {
allPods := getPodsByLabels(c, ns, map[string]string{"name": labelName})
scheduledPods, notScheduledPods := e2epod.GetPodsScheduled(masterNodes, allPods)
framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods))
framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods))
}
func getPodsByLabels(c clientset.Interface, ns string, labelsMap map[string]string) *v1.PodList {
selector := labels.SelectorFromSet(labels.Set(labelsMap))
allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()})
framework.ExpectNoError(err)
return allPods
}
func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, string) {
// launch a pod to find a node which can launch a pod. We intentionally do
// not just take the node list and choose the first of them. Depending on the
// cluster and the scheduler it might be that a "normal" pod cannot be
// scheduled onto it.
ginkgo.By("Trying to launch a pod with a label to get a node which can launch it.")
pod := runPausePod(f, pausePodConfig{
Name: "with-label-" + string(uuid.NewUUID()),
Labels: map[string]string{"security": "S1"},
})
return pod.Spec.NodeName, pod.Name
}
// GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it
func GetNodeThatCanRunPod(f *framework.Framework) string {
ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.")

View File

@ -425,6 +425,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() {
ginkgo.By("Starting pods...")
nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute)
framework.ExpectNoError(err)
node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{})
framework.ExpectNoError(err)
nodeHostNameLabel, ok := node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"]

View File

@ -215,12 +215,12 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string, ar
}()
// List the pods, making sure we observe all the replicas.
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount)
_, err = e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount)
framework.ExpectNoError(err)
// Wait for all of them to be scheduled
ginkgo.By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector))
pods, err = e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector)
framework.ExpectNoError(err)
// Now make sure they're spread across zones

View File

@ -75,7 +75,6 @@ func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Requ
}
if strings.Contains(req.URL.Path, filter) {
resp := &schedulerapi.ExtenderFilterResult{}
resp, err := e.Filter(&args)
if err != nil {
resp.Error = err.Error()

View File

@ -511,7 +511,6 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n
pods := []*v1.Pod{}
pvcs := []*v1.PersistentVolumeClaim{}
pvs := []*v1.PersistentVolume{}
// Create PVs for the first node
for i := 0; i < numPVsFirstNode; i++ {
@ -519,7 +518,6 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
}
pvs = append(pvs, pv)
}
// Create 1 PV per Node for the remaining nodes
@ -528,7 +526,6 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n
if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil {
t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err)
}
pvs = append(pvs, pv)
}
// Create pods