diff --git a/cmd/kube-scheduler/app/server.go b/cmd/kube-scheduler/app/server.go index 2b5be697607..885c94484f4 100644 --- a/cmd/kube-scheduler/app/server.go +++ b/cmd/kube-scheduler/app/server.go @@ -293,6 +293,7 @@ func buildHandlerChain(handler http.Handler, authn authenticator.Request, authz func installMetricHandler(pathRecorderMux *mux.PathRecorderMux) { configz.InstallHandler(pathRecorderMux) + //lint:ignore SA1019 See the Metrics Stability Migration KEP defaultMetricsHandler := legacyregistry.Handler().ServeHTTP pathRecorderMux.HandleFunc("/metrics", func(w http.ResponseWriter, req *http.Request) { if req.Method == "DELETE" { diff --git a/hack/.staticcheck_failures b/hack/.staticcheck_failures index 14b41f0861a..af2a52c4960 100644 --- a/hack/.staticcheck_failures +++ b/hack/.staticcheck_failures @@ -2,12 +2,9 @@ cluster/images/etcd-version-monitor cluster/images/etcd/migrate cmd/kube-controller-manager/app cmd/kube-proxy/app -cmd/kube-scheduler/app cmd/linkcheck cmd/preferredimports hack/make-rules/helpers/go2make/testdata/dir-with-gofiles -pkg/apis/scheduling/v1alpha1 -pkg/apis/scheduling/v1beta1 pkg/client/tests pkg/controller/daemon pkg/controller/deployment @@ -93,7 +90,6 @@ test/e2e/manifest test/e2e/network test/e2e/node test/e2e/scalability -test/e2e/scheduling test/e2e/storage test/e2e/storage/drivers test/e2e/storage/testsuites @@ -124,13 +120,11 @@ test/integration/kubelet test/integration/master test/integration/replicationcontroller test/integration/scale -test/integration/scheduler test/integration/scheduler_perf test/integration/serviceaccount test/integration/serving test/integration/ttlcontroller test/integration/volume -test/integration/volumescheduling test/utils vendor/k8s.io/apiextensions-apiserver/pkg/apiserver vendor/k8s.io/apiextensions-apiserver/pkg/apiserver/conversion diff --git a/pkg/apis/scheduling/v1alpha1/defaults.go b/pkg/apis/scheduling/v1alpha1/defaults.go index 41327f5fdcf..5c04a24c6a9 100644 --- a/pkg/apis/scheduling/v1alpha1/defaults.go +++ b/pkg/apis/scheduling/v1alpha1/defaults.go @@ -19,15 +19,10 @@ package v1alpha1 import ( apiv1 "k8s.io/api/core/v1" "k8s.io/api/scheduling/v1alpha1" - "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" ) -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - // SetDefaults_PriorityClass sets additional defaults compared to its counterpart // in extensions. func SetDefaults_PriorityClass(obj *v1alpha1.PriorityClass) { diff --git a/pkg/apis/scheduling/v1beta1/defaults.go b/pkg/apis/scheduling/v1beta1/defaults.go index c35594e4c11..bbf217261fc 100644 --- a/pkg/apis/scheduling/v1beta1/defaults.go +++ b/pkg/apis/scheduling/v1beta1/defaults.go @@ -19,15 +19,10 @@ package v1beta1 import ( apiv1 "k8s.io/api/core/v1" "k8s.io/api/scheduling/v1beta1" - "k8s.io/apimachinery/pkg/runtime" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/kubernetes/pkg/features" ) -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - // SetDefaults_PriorityClass sets additional defaults compared to its counterpart // in extensions. func SetDefaults_PriorityClass(obj *v1beta1.PriorityClass) { diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go index 4495d993624..e8eccf5b737 100644 --- a/test/e2e/scheduling/limit_range.go +++ b/test/e2e/scheduling/limit_range.go @@ -168,12 +168,12 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Failing to create a Pod with less than min resources") pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectError(err) ginkgo.By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectError(err) ginkgo.By("Updating a LimitRange") @@ -192,12 +192,12 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with less than former min resources") pod = f.NewTestPod(podName, getResourceList("10m", "50Mi", "50Gi"), v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err) ginkgo.By("Failing to create a Pod with more than max resources") pod = f.NewTestPod(podName, getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectError(err) ginkgo.By("Deleting a LimitRange") @@ -236,7 +236,7 @@ var _ = SIGDescribe("LimitRange", func() { ginkgo.By("Creating a Pod with more than former max resources") pod = f.NewTestPod(podName+"2", getResourceList("600m", "600Mi", "600Gi"), v1.ResourceList{}) - pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) + _, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod) framework.ExpectNoError(err) }) diff --git a/test/e2e/scheduling/predicates.go b/test/e2e/scheduling/predicates.go index 58c37f53127..5973db504c5 100644 --- a/test/e2e/scheduling/predicates.go +++ b/test/e2e/scheduling/predicates.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/uuid" utilversion "k8s.io/apimachinery/pkg/util/version" @@ -759,35 +758,6 @@ func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotSched framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)) } -// verifyReplicasResult is wrapper of verifyResult for a group pods with same "name: labelName" label, which means they belong to same RC -func verifyReplicasResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string, labelName string) { - allPods := getPodsByLabels(c, ns, map[string]string{"name": labelName}) - scheduledPods, notScheduledPods := e2epod.GetPodsScheduled(masterNodes, allPods) - - framework.ExpectEqual(len(notScheduledPods), expectedNotScheduled, fmt.Sprintf("Not scheduled Pods: %#v", notScheduledPods)) - framework.ExpectEqual(len(scheduledPods), expectedScheduled, fmt.Sprintf("Scheduled Pods: %#v", scheduledPods)) -} - -func getPodsByLabels(c clientset.Interface, ns string, labelsMap map[string]string) *v1.PodList { - selector := labels.SelectorFromSet(labels.Set(labelsMap)) - allPods, err := c.CoreV1().Pods(ns).List(metav1.ListOptions{LabelSelector: selector.String()}) - framework.ExpectNoError(err) - return allPods -} - -func runAndKeepPodWithLabelAndGetNodeName(f *framework.Framework) (string, string) { - // launch a pod to find a node which can launch a pod. We intentionally do - // not just take the node list and choose the first of them. Depending on the - // cluster and the scheduler it might be that a "normal" pod cannot be - // scheduled onto it. - ginkgo.By("Trying to launch a pod with a label to get a node which can launch it.") - pod := runPausePod(f, pausePodConfig{ - Name: "with-label-" + string(uuid.NewUUID()), - Labels: map[string]string{"security": "S1"}, - }) - return pod.Spec.NodeName, pod.Name -} - // GetNodeThatCanRunPod trying to launch a pod without a label to get a node which can launch it func GetNodeThatCanRunPod(f *framework.Framework) string { ginkgo.By("Trying to launch a pod without a label to get a node which can launch it.") diff --git a/test/e2e/scheduling/taints.go b/test/e2e/scheduling/taints.go index 0b00f3952cb..93e8f93ec5e 100644 --- a/test/e2e/scheduling/taints.go +++ b/test/e2e/scheduling/taints.go @@ -425,6 +425,7 @@ var _ = SIGDescribe("NoExecuteTaintManager Multiple Pods [Serial]", func() { ginkgo.By("Starting pods...") nodeName, err := testutils.RunPodAndGetNodeName(cs, pod1, 2*time.Minute) + framework.ExpectNoError(err) node, err := cs.CoreV1().Nodes().Get(nodeName, metav1.GetOptions{}) framework.ExpectNoError(err) nodeHostNameLabel, ok := node.GetObjectMeta().GetLabels()["kubernetes.io/hostname"] diff --git a/test/e2e/scheduling/ubernetes_lite.go b/test/e2e/scheduling/ubernetes_lite.go index 08b3a7dc463..9b4e8abf7a4 100644 --- a/test/e2e/scheduling/ubernetes_lite.go +++ b/test/e2e/scheduling/ubernetes_lite.go @@ -215,12 +215,12 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string, ar }() // List the pods, making sure we observe all the replicas. selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) - pods, err := e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount) + _, err = e2epod.PodsCreated(f.ClientSet, f.Namespace.Name, name, replicaCount) framework.ExpectNoError(err) // Wait for all of them to be scheduled ginkgo.By(fmt.Sprintf("Waiting for %d replicas of %s to be scheduled. Selector: %v", replicaCount, name, selector)) - pods, err = e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) + pods, err := e2epod.WaitForPodsWithLabelScheduled(f.ClientSet, f.Namespace.Name, selector) framework.ExpectNoError(err) // Now make sure they're spread across zones diff --git a/test/integration/scheduler/extender_test.go b/test/integration/scheduler/extender_test.go index 3ed2a24a15c..4898cd7e424 100644 --- a/test/integration/scheduler/extender_test.go +++ b/test/integration/scheduler/extender_test.go @@ -75,7 +75,6 @@ func (e *Extender) serveHTTP(t *testing.T, w http.ResponseWriter, req *http.Requ } if strings.Contains(req.URL.Path, filter) { - resp := &schedulerapi.ExtenderFilterResult{} resp, err := e.Filter(&args) if err != nil { resp.Error = err.Error() diff --git a/test/integration/volumescheduling/volume_binding_test.go b/test/integration/volumescheduling/volume_binding_test.go index c1e8602f5f1..b4a0cd18a57 100644 --- a/test/integration/volumescheduling/volume_binding_test.go +++ b/test/integration/volumescheduling/volume_binding_test.go @@ -511,7 +511,6 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n pods := []*v1.Pod{} pvcs := []*v1.PersistentVolumeClaim{} - pvs := []*v1.PersistentVolume{} // Create PVs for the first node for i := 0; i < numPVsFirstNode; i++ { @@ -519,7 +518,6 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } - pvs = append(pvs, pv) } // Create 1 PV per Node for the remaining nodes @@ -528,7 +526,6 @@ func testVolumeBindingWithAffinity(t *testing.T, anti bool, numNodes, numPods, n if pv, err := config.client.CoreV1().PersistentVolumes().Create(pv); err != nil { t.Fatalf("Failed to create PersistentVolume %q: %v", pv.Name, err) } - pvs = append(pvs, pv) } // Create pods