From 4ef6f1c9dd4d8e2380a45fc70ffac31a87756fc4 Mon Sep 17 00:00:00 2001 From: gmarek Date: Tue, 1 Mar 2016 11:49:51 +0100 Subject: [PATCH 1/2] Move isMasterNode function to api/helpers --- pkg/metrics/metrics_grabber.go | 9 ++------- pkg/util/system/system_utils.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 7 deletions(-) create mode 100644 pkg/util/system/system_utils.go diff --git a/pkg/metrics/metrics_grabber.go b/pkg/metrics/metrics_grabber.go index 9fefbb3304a..6058d71b95d 100644 --- a/pkg/metrics/metrics_grabber.go +++ b/pkg/metrics/metrics_grabber.go @@ -18,7 +18,6 @@ package metrics import ( "fmt" - "strings" "time" "k8s.io/kubernetes/pkg/api" @@ -26,6 +25,7 @@ import ( "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/system" "github.com/golang/glog" ) @@ -51,11 +51,6 @@ type MetricsGrabber struct { registeredMaster bool } -// TODO: find a better way of figuring out if given node is a registered master. -func isMasterNode(node *api.Node) bool { - return strings.HasSuffix(node.Name, "master") -} - func NewMetricsGrabber(c *client.Client, kubelets bool, scheduler bool, controllers bool, apiServer bool) (*MetricsGrabber, error) { registeredMaster := false masterName := "" @@ -67,7 +62,7 @@ func NewMetricsGrabber(c *client.Client, kubelets bool, scheduler bool, controll glog.Warning("Can't find any Nodes in the API server to grab metrics from") } for _, node := range nodeList.Items { - if isMasterNode(&node) { + if system.IsMasterNode(&node) { registeredMaster = true masterName = node.Name break diff --git a/pkg/util/system/system_utils.go b/pkg/util/system/system_utils.go new file mode 100644 index 00000000000..57576abebde --- /dev/null +++ b/pkg/util/system/system_utils.go @@ -0,0 +1,28 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package system + +import ( + "strings" + + "k8s.io/kubernetes/pkg/api" +) + +// TODO: find a better way of figuring out if given node is a registered master. +func IsMasterNode(node *api.Node) bool { + return strings.HasSuffix(node.Name, "master") +} From 496fc3c7eda5c21d67c17fc2dcf9d3ec847f3c26 Mon Sep 17 00:00:00 2001 From: gmarek Date: Tue, 1 Mar 2016 15:19:17 +0100 Subject: [PATCH 2/2] Make scheduler predicates test work with registered master --- test/e2e/scheduler_predicates.go | 40 ++++++++++++++++++++++++-------- test/e2e/util.go | 1 + 2 files changed, 31 insertions(+), 10 deletions(-) diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go index a42377ee1f9..2f99977f43c 100644 --- a/test/e2e/scheduler_predicates.go +++ b/test/e2e/scheduler_predicates.go @@ -28,19 +28,26 @@ import ( client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/util" + "k8s.io/kubernetes/pkg/util/sets" + "k8s.io/kubernetes/pkg/util/system" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" _ "github.com/stretchr/testify/assert" ) +// variable set in BeforeEach, never modified afterwards +var masterNodes sets.String + // Returns a number of currently scheduled and not scheduled Pods. func getPodsScheduled(pods *api.PodList) (scheduledPods, notScheduledPods []api.Pod) { for _, pod := range pods.Items { - if pod.Spec.NodeName != "" { - scheduledPods = append(scheduledPods, pod) - } else { - notScheduledPods = append(notScheduledPods, pod) + if !masterNodes.Has(pod.Spec.NodeName) { + if pod.Spec.NodeName != "" { + scheduledPods = append(scheduledPods, pod) + } else { + notScheduledPods = append(notScheduledPods, pod) + } } } return @@ -155,9 +162,18 @@ var _ = Describe("SchedulerPredicates [Serial]", func() { BeforeEach(func() { c = framework.Client ns = framework.Namespace.Name - nodeList = ListSchedulableNodesOrDie(c) + nodeList = &api.NodeList{} + nodes, err := c.Nodes().List(api.ListOptions{}) + masterNodes = sets.NewString() + for _, node := range nodes.Items { + if system.IsMasterNode(&node) { + masterNodes.Insert(node.Name) + } else { + nodeList.Items = append(nodeList.Items, node) + } + } - err := checkTestingNSDeletedExcept(c, ns) + err = checkTestingNSDeletedExcept(c, ns) expectNoError(err) // Every test case in this suite assumes that cluster add-on pods stay stable and @@ -165,7 +181,12 @@ var _ = Describe("SchedulerPredicates [Serial]", func() { // It is so because we need to have precise control on what's running in the cluster. systemPods, err := c.Pods(api.NamespaceSystem).List(api.ListOptions{}) Expect(err).NotTo(HaveOccurred()) - systemPodsNo = len(systemPods.Items) + systemPodsNo = 0 + for _, pod := range systemPods.Items { + if !masterNodes.Has(pod.Spec.NodeName) && pod.DeletionTimestamp == nil { + systemPodsNo++ + } + } err = waitForPodsRunningReady(api.NamespaceSystem, systemPodsNo, podReadyBeforeTimeout) Expect(err).NotTo(HaveOccurred()) @@ -180,10 +201,10 @@ var _ = Describe("SchedulerPredicates [Serial]", func() { totalPodCapacity = 0 for _, node := range nodeList.Items { + Logf("Node: %v", node) podCapacity, found := node.Status.Capacity["pods"] Expect(found).To(Equal(true)) totalPodCapacity += podCapacity.Value() - Logf("Node: %v", node) } currentlyScheduledPods := waitForStableCluster(c) @@ -253,8 +274,7 @@ var _ = Describe("SchedulerPredicates [Serial]", func() { expectNoError(err) for _, pod := range pods.Items { _, found := nodeToCapacityMap[pod.Spec.NodeName] - Expect(found).To(Equal(true)) - if pod.Status.Phase == api.PodRunning { + if found && pod.Status.Phase == api.PodRunning { Logf("Pod %v requesting resource %v on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName) nodeToCapacityMap[pod.Spec.NodeName] -= getRequestedCPU(pod) } diff --git a/test/e2e/util.go b/test/e2e/util.go index f74a61c900d..8906495cc8d 100644 --- a/test/e2e/util.go +++ b/test/e2e/util.go @@ -1950,6 +1950,7 @@ func startPods(c *client.Client, replicas int, namespace string, podNamePrefix s _, err := c.Pods(namespace).Create(&pod) expectNoError(err) } + Logf("Waiting for running...") if waitForRunning { label := labels.SelectorFromSet(labels.Set(map[string]string{"startPodsID": startPodsID})) err := waitForPodsWithLabelRunning(c, namespace, label)