Merge pull request #13785 from wojtek-t/minion_to_node_2

Auto commit by PR queue bot
This commit is contained in:
k8s-merge-robot 2015-09-14 23:23:06 -07:00
commit bf641078eb
36 changed files with 339 additions and 339 deletions

View File

@ -246,7 +246,7 @@ func recoverAssignedSlave(pod *api.Pod) string {
// Schedule implements the Scheduler interface of Kubernetes. // Schedule implements the Scheduler interface of Kubernetes.
// It returns the selectedMachine's name and error (if there's any). // It returns the selectedMachine's name and error (if there's any).
func (k *kubeScheduler) Schedule(pod *api.Pod, unused algorithm.MinionLister) (string, error) { func (k *kubeScheduler) Schedule(pod *api.Pod, unused algorithm.NodeLister) (string, error) {
log.Infof("Try to schedule pod %v\n", pod.Name) log.Infof("Try to schedule pod %v\n", pod.Name)
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace) ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
@ -684,7 +684,7 @@ func (k *KubernetesScheduler) NewPluginConfig(terminate <-chan struct{}, mux *ht
}) })
return &PluginConfig{ return &PluginConfig{
Config: &plugin.Config{ Config: &plugin.Config{
MinionLister: nil, NodeLister: nil,
Algorithm: &kubeScheduler{ Algorithm: &kubeScheduler{
api: kapi, api: kapi,
podUpdates: podUpdates, podUpdates: podUpdates,
@ -741,7 +741,7 @@ func (s *schedulingPlugin) Run(done <-chan struct{}) {
func (s *schedulingPlugin) scheduleOne() { func (s *schedulingPlugin) scheduleOne() {
pod := s.config.NextPod() pod := s.config.NextPod()
log.V(3).Infof("Attempting to schedule: %+v", pod) log.V(3).Infof("Attempting to schedule: %+v", pod)
dest, err := s.config.Algorithm.Schedule(pod, s.config.MinionLister) // call kubeScheduler.Schedule dest, err := s.config.Algorithm.Schedule(pod, s.config.NodeLister) // call kubeScheduler.Schedule
if err != nil { if err != nil {
log.V(1).Infof("Failed to schedule: %+v", pod) log.V(1).Infof("Failed to schedule: %+v", pod)
s.config.Recorder.Eventf(pod, "FailedScheduling", "Error scheduling: %v", err) s.config.Recorder.Eventf(pod, "FailedScheduling", "Error scheduling: %v", err)

View File

@ -25,7 +25,7 @@ import (
// ValidateEvent makes sure that the event makes sense. // ValidateEvent makes sure that the event makes sense.
func ValidateEvent(event *api.Event) errs.ValidationErrorList { func ValidateEvent(event *api.Event) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{} allErrs := errs.ValidationErrorList{}
// TODO: There is no namespace required for minion // TODO: There is no namespace required for node.
if event.InvolvedObject.Kind != "Node" && if event.InvolvedObject.Kind != "Node" &&
event.Namespace != event.InvolvedObject.Namespace { event.Namespace != event.InvolvedObject.Namespace {
allErrs = append(allErrs, errs.NewFieldInvalid("involvedObject.namespace", event.InvolvedObject.Namespace, "namespace does not match involvedObject")) allErrs = append(allErrs, errs.NewFieldInvalid("involvedObject.namespace", event.InvolvedObject.Namespace, "namespace does not match involvedObject"))

View File

@ -44,7 +44,7 @@ type policy struct {
// providers are in use. Either add "Realm", or assume "user@example.com" // providers are in use. Either add "Realm", or assume "user@example.com"
// format. // format.
// TODO: Make the "cluster" Kinds be one API group (minions, bindings, // TODO: Make the "cluster" Kinds be one API group (nodes, bindings,
// events, endpoints). The "user" Kinds are another (pods, services, // events, endpoints). The "user" Kinds are another (pods, services,
// replicationControllers, operations) Make a "plugin", e.g. build // replicationControllers, operations) Make a "plugin", e.g. build
// controller, be another group. That way when we add a new object to a // controller, be another group. That way when we add a new object to a

View File

@ -18,7 +18,7 @@ limitations under the License.
// reducing the number of server calls you'd otherwise need to make. // reducing the number of server calls you'd otherwise need to make.
// Reflector watches a server and updates a Store. Two stores are provided; // Reflector watches a server and updates a Store. Two stores are provided;
// one that simply caches objects (for example, to allow a scheduler to // one that simply caches objects (for example, to allow a scheduler to
// list currently available minions), and one that additionally acts as // list currently available nodes), and one that additionally acts as
// a FIFO queue (for example, to allow a scheduler to process incoming // a FIFO queue (for example, to allow a scheduler to process incoming
// pods). // pods).
package cache package cache

View File

@ -158,19 +158,19 @@ func (s storeToNodeConditionLister) List() (nodes api.NodeList, err error) {
// TODO Move this back to scheduler as a helper function that takes a Store, // TODO Move this back to scheduler as a helper function that takes a Store,
// rather than a method of StoreToNodeLister. // rather than a method of StoreToNodeLister.
// GetNodeInfo returns cached data for the minion 'id'. // GetNodeInfo returns cached data for the node 'id'.
func (s *StoreToNodeLister) GetNodeInfo(id string) (*api.Node, error) { func (s *StoreToNodeLister) GetNodeInfo(id string) (*api.Node, error) {
minion, exists, err := s.Get(&api.Node{ObjectMeta: api.ObjectMeta{Name: id}}) node, exists, err := s.Get(&api.Node{ObjectMeta: api.ObjectMeta{Name: id}})
if err != nil { if err != nil {
return nil, fmt.Errorf("error retrieving minion '%v' from cache: %v", id, err) return nil, fmt.Errorf("error retrieving node '%v' from cache: %v", id, err)
} }
if !exists { if !exists {
return nil, fmt.Errorf("minion '%v' is not in cache", id) return nil, fmt.Errorf("node '%v' is not in cache", id)
} }
return minion.(*api.Node), nil return node.(*api.Node), nil
} }
// StoreToReplicationControllerLister gives a store List and Exists methods. The store must contain only ReplicationControllers. // StoreToReplicationControllerLister gives a store List and Exists methods. The store must contain only ReplicationControllers.

View File

@ -25,7 +25,7 @@ import (
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
) )
func TestStoreToMinionLister(t *testing.T) { func TestStoreToNodeLister(t *testing.T) {
store := NewStore(MetaNamespaceKeyFunc) store := NewStore(MetaNamespaceKeyFunc)
ids := sets.NewString("foo", "bar", "baz") ids := sets.NewString("foo", "bar", "baz")
for id := range ids { for id := range ids {

View File

@ -61,10 +61,10 @@ func TestListWatchesCanList(t *testing.T) {
namespace string namespace string
fieldSelector fields.Selector fieldSelector fields.Selector
}{ }{
// Minion // Node
{ {
location: testapi.Default.ResourcePath("minions", api.NamespaceAll, ""), location: testapi.Default.ResourcePath("nodes", api.NamespaceAll, ""),
resource: "minions", resource: "nodes",
namespace: api.NamespaceAll, namespace: api.NamespaceAll,
fieldSelector: parseSelectorOrDie(""), fieldSelector: parseSelectorOrDie(""),
}, },
@ -112,22 +112,22 @@ func TestListWatchesCanWatch(t *testing.T) {
namespace string namespace string
fieldSelector fields.Selector fieldSelector fields.Selector
}{ }{
// Minion // Node
{ {
location: buildLocation( location: buildLocation(
testapi.Default.ResourcePathWithPrefix("watch", "minions", api.NamespaceAll, ""), testapi.Default.ResourcePathWithPrefix("watch", "nodes", api.NamespaceAll, ""),
buildQueryValues(url.Values{"resourceVersion": []string{""}})), buildQueryValues(url.Values{"resourceVersion": []string{""}})),
rv: "", rv: "",
resource: "minions", resource: "nodes",
namespace: api.NamespaceAll, namespace: api.NamespaceAll,
fieldSelector: parseSelectorOrDie(""), fieldSelector: parseSelectorOrDie(""),
}, },
{ {
location: buildLocation( location: buildLocation(
testapi.Default.ResourcePathWithPrefix("watch", "minions", api.NamespaceAll, ""), testapi.Default.ResourcePathWithPrefix("watch", "nodes", api.NamespaceAll, ""),
buildQueryValues(url.Values{"resourceVersion": []string{"42"}})), buildQueryValues(url.Values{"resourceVersion": []string{"42"}})),
rv: "42", rv: "42",
resource: "minions", resource: "nodes",
namespace: api.NamespaceAll, namespace: api.NamespaceAll,
fieldSelector: parseSelectorOrDie(""), fieldSelector: parseSelectorOrDie(""),
}, },

View File

@ -17,7 +17,7 @@ limitations under the License.
/* /*
Package client contains the implementation of the client side communication with the Package client contains the implementation of the client side communication with the
Kubernetes master. The Client class provides methods for reading, creating, updating, Kubernetes master. The Client class provides methods for reading, creating, updating,
and deleting pods, replication controllers, daemons, services, and minions. and deleting pods, replication controllers, daemons, services, and nodes.
Most consumers should use the Config object to create a Client: Most consumers should use the Config object to create a Client:

View File

@ -31,7 +31,7 @@ func getNodesResourceName() string {
return "nodes" return "nodes"
} }
func TestListMinions(t *testing.T) { func TestListNodes(t *testing.T) {
c := &testClient{ c := &testClient{
Request: testRequest{ Request: testRequest{
Method: "GET", Method: "GET",
@ -43,7 +43,7 @@ func TestListMinions(t *testing.T) {
c.Validate(t, response, err) c.Validate(t, response, err)
} }
func TestListMinionsLabels(t *testing.T) { func TestListNodesLabels(t *testing.T) {
labelSelectorQueryParamName := api.LabelSelectorQueryParam(testapi.Default.Version()) labelSelectorQueryParamName := api.LabelSelectorQueryParam(testapi.Default.Version())
c := &testClient{ c := &testClient{
Request: testRequest{ Request: testRequest{
@ -73,19 +73,19 @@ func TestListMinionsLabels(t *testing.T) {
c.Validate(t, receivedNodeList, err) c.Validate(t, receivedNodeList, err)
} }
func TestGetMinion(t *testing.T) { func TestGetNode(t *testing.T) {
c := &testClient{ c := &testClient{
Request: testRequest{ Request: testRequest{
Method: "GET", Method: "GET",
Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "1"), Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "1"),
}, },
Response: Response{StatusCode: 200, Body: &api.Node{ObjectMeta: api.ObjectMeta{Name: "minion-1"}}}, Response: Response{StatusCode: 200, Body: &api.Node{ObjectMeta: api.ObjectMeta{Name: "node-1"}}},
} }
response, err := c.Setup(t).Nodes().Get("1") response, err := c.Setup(t).Nodes().Get("1")
c.Validate(t, response, err) c.Validate(t, response, err)
} }
func TestGetMinionWithNoName(t *testing.T) { func TestGetNodeWithNoName(t *testing.T) {
c := &testClient{Error: true} c := &testClient{Error: true}
receivedNode, err := c.Setup(t).Nodes().Get("") receivedNode, err := c.Setup(t).Nodes().Get("")
if (err != nil) && (err.Error() != nameRequiredError) { if (err != nil) && (err.Error() != nameRequiredError) {
@ -95,10 +95,10 @@ func TestGetMinionWithNoName(t *testing.T) {
c.Validate(t, receivedNode, err) c.Validate(t, receivedNode, err)
} }
func TestCreateMinion(t *testing.T) { func TestCreateNode(t *testing.T) {
requestMinion := &api.Node{ requestNode := &api.Node{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: "minion-1", Name: "node-1",
}, },
Status: api.NodeStatus{ Status: api.NodeStatus{
Capacity: api.ResourceList{ Capacity: api.ResourceList{
@ -114,17 +114,17 @@ func TestCreateMinion(t *testing.T) {
Request: testRequest{ Request: testRequest{
Method: "POST", Method: "POST",
Path: testapi.Default.ResourcePath(getNodesResourceName(), "", ""), Path: testapi.Default.ResourcePath(getNodesResourceName(), "", ""),
Body: requestMinion}, Body: requestNode},
Response: Response{ Response: Response{
StatusCode: 200, StatusCode: 200,
Body: requestMinion, Body: requestNode,
}, },
} }
receivedMinion, err := c.Setup(t).Nodes().Create(requestMinion) receivedNode, err := c.Setup(t).Nodes().Create(requestNode)
c.Validate(t, receivedMinion, err) c.Validate(t, receivedNode, err)
} }
func TestDeleteMinion(t *testing.T) { func TestDeleteNode(t *testing.T) {
c := &testClient{ c := &testClient{
Request: testRequest{ Request: testRequest{
Method: "DELETE", Method: "DELETE",
@ -136,8 +136,8 @@ func TestDeleteMinion(t *testing.T) {
c.Validate(t, nil, err) c.Validate(t, nil, err)
} }
func TestUpdateMinion(t *testing.T) { func TestUpdateNode(t *testing.T) {
requestMinion := &api.Node{ requestNode := &api.Node{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: "foo", Name: "foo",
ResourceVersion: "1", ResourceVersion: "1",
@ -157,8 +157,8 @@ func TestUpdateMinion(t *testing.T) {
Method: "PUT", Method: "PUT",
Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "foo"), Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "foo"),
}, },
Response: Response{StatusCode: 200, Body: requestMinion}, Response: Response{StatusCode: 200, Body: requestNode},
} }
response, err := c.Setup(t).Nodes().Update(requestMinion) response, err := c.Setup(t).Nodes().Update(requestNode)
c.Validate(t, response, err) c.Validate(t, response, err)
} }

View File

@ -23,7 +23,7 @@ import (
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
) )
// FakeNodes implements MinionInterface. Meant to be embedded into a struct to get a default // FakeNodes implements NodeInterface. Meant to be embedded into a struct to get a default
// implementation. This makes faking out just the method you want to test easier. // implementation. This makes faking out just the method you want to test easier.
type FakeNodes struct { type FakeNodes struct {
Fake *Fake Fake *Fake
@ -47,8 +47,8 @@ func (c *FakeNodes) List(label labels.Selector, field fields.Selector) (*api.Nod
return obj.(*api.NodeList), err return obj.(*api.NodeList), err
} }
func (c *FakeNodes) Create(minion *api.Node) (*api.Node, error) { func (c *FakeNodes) Create(node *api.Node) (*api.Node, error) {
obj, err := c.Fake.Invokes(NewRootCreateAction("nodes", minion), minion) obj, err := c.Fake.Invokes(NewRootCreateAction("nodes", node), node)
if obj == nil { if obj == nil {
return nil, err return nil, err
} }
@ -56,8 +56,8 @@ func (c *FakeNodes) Create(minion *api.Node) (*api.Node, error) {
return obj.(*api.Node), err return obj.(*api.Node), err
} }
func (c *FakeNodes) Update(minion *api.Node) (*api.Node, error) { func (c *FakeNodes) Update(node *api.Node) (*api.Node, error) {
obj, err := c.Fake.Invokes(NewRootUpdateAction("nodes", minion), minion) obj, err := c.Fake.Invokes(NewRootUpdateAction("nodes", node), node)
if obj == nil { if obj == nil {
return nil, err return nil, err
} }
@ -74,14 +74,14 @@ func (c *FakeNodes) Watch(label labels.Selector, field fields.Selector, resource
return c.Fake.InvokesWatch(NewRootWatchAction("nodes", label, field, resourceVersion)) return c.Fake.InvokesWatch(NewRootWatchAction("nodes", label, field, resourceVersion))
} }
func (c *FakeNodes) UpdateStatus(minion *api.Node) (*api.Node, error) { func (c *FakeNodes) UpdateStatus(node *api.Node) (*api.Node, error) {
action := CreateActionImpl{} action := CreateActionImpl{}
action.Verb = "update" action.Verb = "update"
action.Resource = "nodes" action.Resource = "nodes"
action.Subresource = "status" action.Subresource = "status"
action.Object = minion action.Object = node
obj, err := c.Fake.Invokes(action, minion) obj, err := c.Fake.Invokes(action, node)
if obj == nil { if obj == nil {
return nil, err return nil, err
} }

View File

@ -23,16 +23,16 @@ import (
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
) )
// MinionLister interface represents anything that can list minions for a scheduler. // NodeLister interface represents anything that can list nodes for a scheduler.
type MinionLister interface { type NodeLister interface {
List() (list api.NodeList, err error) List() (list api.NodeList, err error)
} }
// FakeMinionLister implements MinionLister on a []string for test purposes. // FakeNodeLister implements NodeLister on a []string for test purposes.
type FakeMinionLister api.NodeList type FakeNodeLister api.NodeList
// List returns minions as a []string. // List returns nodes as a []string.
func (f FakeMinionLister) List() (api.NodeList, error) { func (f FakeNodeLister) List() (api.NodeList, error) {
return api.NodeList(f), nil return api.NodeList(f), nil
} }

View File

@ -203,12 +203,12 @@ type NodeSelector struct {
info NodeInfo info NodeInfo
} }
func (n *NodeSelector) PodSelectorMatches(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { func (n *NodeSelector) PodSelectorMatches(pod *api.Pod, existingPods []*api.Pod, nodeID string) (bool, error) {
minion, err := n.info.GetNodeInfo(node) node, err := n.info.GetNodeInfo(nodeID)
if err != nil { if err != nil {
return false, err return false, err
} }
return PodMatchesNodeLabels(pod, minion), nil return PodMatchesNodeLabels(pod, node), nil
} }
func PodFitsHost(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { func PodFitsHost(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
@ -233,27 +233,27 @@ func NewNodeLabelPredicate(info NodeInfo, labels []string, presence bool) algori
return labelChecker.CheckNodeLabelPresence return labelChecker.CheckNodeLabelPresence
} }
// CheckNodeLabelPresence checks whether all of the specified labels exists on a minion or not, regardless of their value // CheckNodeLabelPresence checks whether all of the specified labels exists on a node or not, regardless of their value
// If "presence" is false, then returns false if any of the requested labels matches any of the minion's labels, // If "presence" is false, then returns false if any of the requested labels matches any of the node's labels,
// otherwise returns true. // otherwise returns true.
// If "presence" is true, then returns false if any of the requested labels does not match any of the minion's labels, // If "presence" is true, then returns false if any of the requested labels does not match any of the node's labels,
// otherwise returns true. // otherwise returns true.
// //
// Consider the cases where the minions are placed in regions/zones/racks and these are identified by labels // Consider the cases where the nodes are placed in regions/zones/racks and these are identified by labels
// In some cases, it is required that only minions that are part of ANY of the defined regions/zones/racks be selected // In some cases, it is required that only nodes that are part of ANY of the defined regions/zones/racks be selected
// //
// Alternately, eliminating minions that have a certain label, regardless of value, is also useful // Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
// A minion may have a label with "retiring" as key and the date as the value // A node may have a label with "retiring" as key and the date as the value
// and it may be desirable to avoid scheduling new pods on this minion // and it may be desirable to avoid scheduling new pods on this node
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, existingPods []*api.Pod, nodeID string) (bool, error) {
var exists bool var exists bool
minion, err := n.info.GetNodeInfo(node) node, err := n.info.GetNodeInfo(nodeID)
if err != nil { if err != nil {
return false, err return false, err
} }
minionLabels := labels.Set(minion.Labels) nodeLabels := labels.Set(node.Labels)
for _, label := range n.labels { for _, label := range n.labels {
exists = minionLabels.Has(label) exists = nodeLabels.Has(label)
if (exists && !n.presence) || (!exists && n.presence) { if (exists && !n.presence) || (!exists && n.presence) {
return false, nil return false, nil
} }
@ -278,16 +278,16 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al
return affinity.CheckServiceAffinity return affinity.CheckServiceAffinity
} }
// CheckServiceAffinity ensures that only the minions that match the specified labels are considered for scheduling. // CheckServiceAffinity ensures that only the nodes that match the specified labels are considered for scheduling.
// The set of labels to be considered are provided to the struct (ServiceAffinity). // The set of labels to be considered are provided to the struct (ServiceAffinity).
// The pod is checked for the labels and any missing labels are then checked in the minion // The pod is checked for the labels and any missing labels are then checked in the node
// that hosts the service pods (peers) for the given pod. // that hosts the service pods (peers) for the given pod.
// //
// We add an implicit selector requiring some particular value V for label L to a pod, if: // We add an implicit selector requiring some particular value V for label L to a pod, if:
// - L is listed in the ServiceAffinity object that is passed into the function // - L is listed in the ServiceAffinity object that is passed into the function
// - the pod does not have any NodeSelector for L // - the pod does not have any NodeSelector for L
// - some other pod from the same service is already scheduled onto a minion that has value V for label L // - some other pod from the same service is already scheduled onto a node that has value V for label L
func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api.Pod, nodeID string) (bool, error) {
var affinitySelector labels.Selector var affinitySelector labels.Selector
// check if the pod being scheduled has the affinity labels specified in its NodeSelector // check if the pod being scheduled has the affinity labels specified in its NodeSelector
@ -322,8 +322,8 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api
} }
} }
if len(nsServicePods) > 0 { if len(nsServicePods) > 0 {
// consider any service pod and fetch the minion its hosted on // consider any service pod and fetch the node its hosted on
otherMinion, err := s.nodeInfo.GetNodeInfo(nsServicePods[0].Spec.NodeName) otherNode, err := s.nodeInfo.GetNodeInfo(nsServicePods[0].Spec.NodeName)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -332,28 +332,28 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api
if _, exists := affinityLabels[l]; exists { if _, exists := affinityLabels[l]; exists {
continue continue
} }
if labels.Set(otherMinion.Labels).Has(l) { if labels.Set(otherNode.Labels).Has(l) {
affinityLabels[l] = labels.Set(otherMinion.Labels).Get(l) affinityLabels[l] = labels.Set(otherNode.Labels).Get(l)
} }
} }
} }
} }
} }
// if there are no existing pods in the service, consider all minions // if there are no existing pods in the service, consider all nodes
if len(affinityLabels) == 0 { if len(affinityLabels) == 0 {
affinitySelector = labels.Everything() affinitySelector = labels.Everything()
} else { } else {
affinitySelector = labels.Set(affinityLabels).AsSelector() affinitySelector = labels.Set(affinityLabels).AsSelector()
} }
minion, err := s.nodeInfo.GetNodeInfo(node) node, err := s.nodeInfo.GetNodeInfo(nodeID)
if err != nil { if err != nil {
return false, err return false, err
} }
// check if the minion matches the selector // check if the node matches the selector
return affinitySelector.Matches(labels.Set(minion.Labels)), nil return affinitySelector.Matches(labels.Set(node.Labels)), nil
} }
func PodFitsPorts(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { func PodFitsPorts(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {

View File

@ -637,7 +637,7 @@ func TestServiceAffinity(t *testing.T) {
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: true, fits: true,
labels: []string{"region"}, labels: []string{"region"},
test: "service pod on same minion", test: "service pod on same node",
}, },
{ {
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
@ -646,7 +646,7 @@ func TestServiceAffinity(t *testing.T) {
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: true, fits: true,
labels: []string{"region"}, labels: []string{"region"},
test: "service pod on different minion, region match", test: "service pod on different node, region match",
}, },
{ {
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
@ -655,7 +655,7 @@ func TestServiceAffinity(t *testing.T) {
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: false, fits: false,
labels: []string{"region"}, labels: []string{"region"},
test: "service pod on different minion, region mismatch", test: "service pod on different node, region mismatch",
}, },
{ {
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
@ -691,7 +691,7 @@ func TestServiceAffinity(t *testing.T) {
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: false, fits: false,
labels: []string{"region", "zone"}, labels: []string{"region", "zone"},
test: "service pod on different minion, multiple labels, not all match", test: "service pod on different node, multiple labels, not all match",
}, },
{ {
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
@ -700,7 +700,7 @@ func TestServiceAffinity(t *testing.T) {
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
fits: true, fits: true,
labels: []string{"region", "zone"}, labels: []string{"region", "zone"},
test: "service pod on different minion, multiple labels, all match", test: "service pod on different node, multiple labels, all match",
}, },
} }

View File

@ -87,7 +87,7 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al
} }
} }
// Add the resources requested by the current pod being scheduled. // Add the resources requested by the current pod being scheduled.
// This also helps differentiate between differently sized, but empty, minions. // This also helps differentiate between differently sized, but empty, nodes.
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
cpu, memory := getNonzeroRequests(&container.Resources.Requests) cpu, memory := getNonzeroRequests(&container.Resources.Requests)
totalMilliCPU += cpu totalMilliCPU += cpu
@ -114,8 +114,8 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes // It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
// based on the minimum of the average of the fraction of requested to capacity. // based on the minimum of the average of the fraction of requested to capacity.
// Details: cpu((capacity - sum(requested)) * 10 / capacity) + memory((capacity - sum(requested)) * 10 / capacity) / 2 // Details: cpu((capacity - sum(requested)) * 10 / capacity) + memory((capacity - sum(requested)) * 10 / capacity) / 2
func LeastRequestedPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { func LeastRequestedPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
nodes, err := minionLister.List() nodes, err := nodeLister.List()
if err != nil { if err != nil {
return algorithm.HostPriorityList{}, err return algorithm.HostPriorityList{}, err
} }
@ -141,32 +141,32 @@ func NewNodeLabelPriority(label string, presence bool) algorithm.PriorityFunctio
return labelPrioritizer.CalculateNodeLabelPriority return labelPrioritizer.CalculateNodeLabelPriority
} }
// CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value. // CalculateNodeLabelPriority checks whether a particular label exists on a node or not, regardless of its value.
// If presence is true, prioritizes minions that have the specified label, regardless of value. // If presence is true, prioritizes nodes that have the specified label, regardless of value.
// If presence is false, prioritizes minions that do not have the specified label. // If presence is false, prioritizes nodes that do not have the specified label.
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
var score int var score int
minions, err := minionLister.List() nodes, err := nodeLister.List()
if err != nil { if err != nil {
return nil, err return nil, err
} }
labeledMinions := map[string]bool{} labeledNodes := map[string]bool{}
for _, minion := range minions.Items { for _, node := range nodes.Items {
exists := labels.Set(minion.Labels).Has(n.label) exists := labels.Set(node.Labels).Has(n.label)
labeledMinions[minion.Name] = (exists && n.presence) || (!exists && !n.presence) labeledNodes[node.Name] = (exists && n.presence) || (!exists && !n.presence)
} }
result := []algorithm.HostPriority{} result := []algorithm.HostPriority{}
//score int - scale of 0-10 //score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest // 0 being the lowest priority and 10 being the highest
for minionName, success := range labeledMinions { for nodeName, success := range labeledNodes {
if success { if success {
score = 10 score = 10
} else { } else {
score = 0 score = 0
} }
result = append(result, algorithm.HostPriority{Host: minionName, Score: score}) result = append(result, algorithm.HostPriority{Host: nodeName, Score: score})
} }
return result, nil return result, nil
} }
@ -177,8 +177,8 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podListe
// close the two metrics are to each other. // close the two metrics are to each other.
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by: // Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization" // "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
func BalancedResourceAllocation(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { func BalancedResourceAllocation(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
nodes, err := minionLister.List() nodes, err := nodeLister.List()
if err != nil { if err != nil {
return algorithm.HostPriorityList{}, err return algorithm.HostPriorityList{}, err
} }
@ -203,7 +203,7 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
} }
} }
// Add the resources requested by the current pod being scheduled. // Add the resources requested by the current pod being scheduled.
// This also helps differentiate between differently sized, but empty, minions. // This also helps differentiate between differently sized, but empty, nodes.
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
cpu, memory := getNonzeroRequests(&container.Resources.Requests) cpu, memory := getNonzeroRequests(&container.Resources.Requests)
totalMilliCPU += cpu totalMilliCPU += cpu

View File

@ -28,7 +28,7 @@ import (
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
) )
func makeMinion(node string, milliCPU, memory int64) api.Node { func makeNode(node string, milliCPU, memory int64) api.Node {
return api.Node{ return api.Node{
ObjectMeta: api.ObjectMeta{Name: node}, ObjectMeta: api.ObjectMeta{Name: node},
Status: api.NodeStatus{ Status: api.NodeStatus{
@ -96,7 +96,7 @@ func TestZeroRequest(t *testing.T) {
// and when the zero-request pod is the one being scheduled. // and when the zero-request pod is the one being scheduled.
{ {
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryRequest*10), makeMinion("machine2", 1000, defaultMemoryRequest*10)}, nodes: []api.Node{makeNode("machine1", 1000, defaultMemoryRequest*10), makeNode("machine2", 1000, defaultMemoryRequest*10)},
test: "test priority of zero-request pod with machine with zero-request pod", test: "test priority of zero-request pod with machine with zero-request pod",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1}, {Spec: large1}, {Spec: noResources1},
@ -105,7 +105,7 @@ func TestZeroRequest(t *testing.T) {
}, },
{ {
pod: &api.Pod{Spec: small}, pod: &api.Pod{Spec: small},
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryRequest*10), makeMinion("machine2", 1000, defaultMemoryRequest*10)}, nodes: []api.Node{makeNode("machine1", 1000, defaultMemoryRequest*10), makeNode("machine2", 1000, defaultMemoryRequest*10)},
test: "test priority of nonzero-request pod with machine with zero-request pod", test: "test priority of nonzero-request pod with machine with zero-request pod",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1}, {Spec: large1}, {Spec: noResources1},
@ -115,7 +115,7 @@ func TestZeroRequest(t *testing.T) {
// The point of this test is to verify that we're not just getting the same score no matter what we schedule. // The point of this test is to verify that we're not just getting the same score no matter what we schedule.
{ {
pod: &api.Pod{Spec: large}, pod: &api.Pod{Spec: large},
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryRequest*10), makeMinion("machine2", 1000, defaultMemoryRequest*10)}, nodes: []api.Node{makeNode("machine1", 1000, defaultMemoryRequest*10), makeNode("machine2", 1000, defaultMemoryRequest*10)},
test: "test priority of larger pod with machine with zero-request pod", test: "test priority of larger pod with machine with zero-request pod",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: large1}, {Spec: noResources1}, {Spec: large1}, {Spec: noResources1},
@ -133,7 +133,7 @@ func TestZeroRequest(t *testing.T) {
// plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want // plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want
// to test what's actually in production. // to test what's actually in production.
[]algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: NewSelectorSpreadPriority(algorithm.FakeServiceLister([]api.Service{}), algorithm.FakeControllerLister([]api.ReplicationController{})), Weight: 1}}, []algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: NewSelectorSpreadPriority(algorithm.FakeServiceLister([]api.Service{}), algorithm.FakeControllerLister([]api.ReplicationController{})), Weight: 1}},
algorithm.FakeMinionLister(api.NodeList{Items: test.nodes})) algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -222,52 +222,52 @@ func TestLeastRequested(t *testing.T) {
}{ }{
{ {
/* /*
Minion1 scores (remaining resources) on 0-10 scale Node1 scores (remaining resources) on 0-10 scale
CPU Score: ((4000 - 0) *10) / 4000 = 10 CPU Score: ((4000 - 0) *10) / 4000 = 10
Memory Score: ((10000 - 0) *10) / 10000 = 10 Memory Score: ((10000 - 0) *10) / 10000 = 10
Minion1 Score: (10 + 10) / 2 = 10 Node1 Score: (10 + 10) / 2 = 10
Minion2 scores (remaining resources) on 0-10 scale Node2 scores (remaining resources) on 0-10 scale
CPU Score: ((4000 - 0) *10) / 4000 = 10 CPU Score: ((4000 - 0) *10) / 4000 = 10
Memory Score: ((10000 - 0) *10) / 10000 = 10 Memory Score: ((10000 - 0) *10) / 10000 = 10
Minion2 Score: (10 + 10) / 2 = 10 Node2 Score: (10 + 10) / 2 = 10
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "nothing scheduled, nothing requested", test: "nothing scheduled, nothing requested",
}, },
{ {
/* /*
Minion1 scores on 0-10 scale Node1 scores on 0-10 scale
CPU Score: ((4000 - 3000) *10) / 4000 = 2.5 CPU Score: ((4000 - 3000) *10) / 4000 = 2.5
Memory Score: ((10000 - 5000) *10) / 10000 = 5 Memory Score: ((10000 - 5000) *10) / 10000 = 5
Minion1 Score: (2.5 + 5) / 2 = 3 Node1 Score: (2.5 + 5) / 2 = 3
Minion2 scores on 0-10 scale Node2 scores on 0-10 scale
CPU Score: ((6000 - 3000) *10) / 6000 = 5 CPU Score: ((6000 - 3000) *10) / 6000 = 5
Memory Score: ((10000 - 5000) *10) / 10000 = 5 Memory Score: ((10000 - 5000) *10) / 10000 = 5
Minion2 Score: (5 + 5) / 2 = 5 Node2 Score: (5 + 5) / 2 = 5
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []algorithm.HostPriority{{"machine1", 3}, {"machine2", 5}}, expectedList: []algorithm.HostPriority{{"machine1", 3}, {"machine2", 5}},
test: "nothing scheduled, resources requested, differently sized machines", test: "nothing scheduled, resources requested, differently sized machines",
}, },
{ {
/* /*
Minion1 scores on 0-10 scale Node1 scores on 0-10 scale
CPU Score: ((4000 - 0) *10) / 4000 = 10 CPU Score: ((4000 - 0) *10) / 4000 = 10
Memory Score: ((10000 - 0) *10) / 10000 = 10 Memory Score: ((10000 - 0) *10) / 10000 = 10
Minion1 Score: (10 + 10) / 2 = 10 Node1 Score: (10 + 10) / 2 = 10
Minion2 scores on 0-10 scale Node2 scores on 0-10 scale
CPU Score: ((4000 - 0) *10) / 4000 = 10 CPU Score: ((4000 - 0) *10) / 4000 = 10
Memory Score: ((10000 - 0) *10) / 10000 = 10 Memory Score: ((10000 - 0) *10) / 10000 = 10
Minion2 Score: (10 + 10) / 2 = 10 Node2 Score: (10 + 10) / 2 = 10
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "no resources requested, pods scheduled", test: "no resources requested, pods scheduled",
pods: []*api.Pod{ pods: []*api.Pod{
@ -279,18 +279,18 @@ func TestLeastRequested(t *testing.T) {
}, },
{ {
/* /*
Minion1 scores on 0-10 scale Node1 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4 CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 0) *10) / 20000 = 10 Memory Score: ((20000 - 0) *10) / 20000 = 10
Minion1 Score: (4 + 10) / 2 = 7 Node1 Score: (4 + 10) / 2 = 7
Minion2 scores on 0-10 scale Node2 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4 CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5 Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
Minion2 Score: (4 + 7.5) / 2 = 5 Node2 Score: (4 + 7.5) / 2 = 5
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)}, nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 5}}, expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 5}},
test: "no resources requested, pods scheduled with resources", test: "no resources requested, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
@ -302,18 +302,18 @@ func TestLeastRequested(t *testing.T) {
}, },
{ {
/* /*
Minion1 scores on 0-10 scale Node1 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4 CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5 Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
Minion1 Score: (4 + 7.5) / 2 = 5 Node1 Score: (4 + 7.5) / 2 = 5
Minion2 scores on 0-10 scale Node2 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4 CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 10000) *10) / 20000 = 5 Memory Score: ((20000 - 10000) *10) / 20000 = 5
Minion2 Score: (4 + 5) / 2 = 4 Node2 Score: (4 + 5) / 2 = 4
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)}, nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 4}}, expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 4}},
test: "resources requested, pods scheduled with resources", test: "resources requested, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
@ -323,18 +323,18 @@ func TestLeastRequested(t *testing.T) {
}, },
{ {
/* /*
Minion1 scores on 0-10 scale Node1 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4 CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5 Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
Minion1 Score: (4 + 7.5) / 2 = 5 Node1 Score: (4 + 7.5) / 2 = 5
Minion2 scores on 0-10 scale Node2 scores on 0-10 scale
CPU Score: ((10000 - 6000) *10) / 10000 = 4 CPU Score: ((10000 - 6000) *10) / 10000 = 4
Memory Score: ((50000 - 10000) *10) / 50000 = 8 Memory Score: ((50000 - 10000) *10) / 50000 = 8
Minion2 Score: (4 + 8) / 2 = 6 Node2 Score: (4 + 8) / 2 = 6
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)}, nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 6}}, expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 6}},
test: "resources requested, pods scheduled with resources, differently sized machines", test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*api.Pod{ pods: []*api.Pod{
@ -344,20 +344,20 @@ func TestLeastRequested(t *testing.T) {
}, },
{ {
/* /*
Minion1 scores on 0-10 scale Node1 scores on 0-10 scale
CPU Score: ((4000 - 6000) *10) / 4000 = 0 CPU Score: ((4000 - 6000) *10) / 4000 = 0
Memory Score: ((10000 - 0) *10) / 10000 = 10 Memory Score: ((10000 - 0) *10) / 10000 = 10
Minion1 Score: (0 + 10) / 2 = 5 Node1 Score: (0 + 10) / 2 = 5
Minion2 scores on 0-10 scale Node2 scores on 0-10 scale
CPU Score: ((4000 - 6000) *10) / 4000 = 0 CPU Score: ((4000 - 6000) *10) / 4000 = 0
Memory Score: ((10000 - 5000) *10) / 10000 = 5 Memory Score: ((10000 - 5000) *10) / 10000 = 5
Minion2 Score: (0 + 5) / 2 = 2 Node2 Score: (0 + 5) / 2 = 2
*/ */
pod: &api.Pod{Spec: cpuOnly}, pod: &api.Pod{Spec: cpuOnly},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 2}}, expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 2}},
test: "requested resources exceed minion capacity", test: "requested resources exceed node capacity",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly}, {Spec: cpuOnly},
{Spec: cpuAndMemory}, {Spec: cpuAndMemory},
@ -365,9 +365,9 @@ func TestLeastRequested(t *testing.T) {
}, },
{ {
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)}, nodes: []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "zero minion resources, pods scheduled with resources", test: "zero node resources, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly}, {Spec: cpuOnly},
{Spec: cpuAndMemory}, {Spec: cpuAndMemory},
@ -376,7 +376,7 @@ func TestLeastRequested(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
list, err := LeastRequestedPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(api.NodeList{Items: test.nodes})) list, err := LeastRequestedPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -470,7 +470,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
label: test.label, label: test.label,
presence: test.presence, presence: test.presence,
} }
list, err := prioritizer.CalculateNodeLabelPriority(nil, nil, algorithm.FakeMinionLister(api.NodeList{Items: test.nodes})) list, err := prioritizer.CalculateNodeLabelPriority(nil, nil, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -554,52 +554,52 @@ func TestBalancedResourceAllocation(t *testing.T) {
}{ }{
{ {
/* /*
Minion1 scores (remaining resources) on 0-10 scale Node1 scores (remaining resources) on 0-10 scale
CPU Fraction: 0 / 4000 = 0% CPU Fraction: 0 / 4000 = 0%
Memory Fraction: 0 / 10000 = 0% Memory Fraction: 0 / 10000 = 0%
Minion1 Score: 10 - (0-0)*10 = 10 Node1 Score: 10 - (0-0)*10 = 10
Minion2 scores (remaining resources) on 0-10 scale Node2 scores (remaining resources) on 0-10 scale
CPU Fraction: 0 / 4000 = 0 % CPU Fraction: 0 / 4000 = 0 %
Memory Fraction: 0 / 10000 = 0% Memory Fraction: 0 / 10000 = 0%
Minion2 Score: 10 - (0-0)*10 = 10 Node2 Score: 10 - (0-0)*10 = 10
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "nothing scheduled, nothing requested", test: "nothing scheduled, nothing requested",
}, },
{ {
/* /*
Minion1 scores on 0-10 scale Node1 scores on 0-10 scale
CPU Fraction: 3000 / 4000= 75% CPU Fraction: 3000 / 4000= 75%
Memory Fraction: 5000 / 10000 = 50% Memory Fraction: 5000 / 10000 = 50%
Minion1 Score: 10 - (0.75-0.5)*10 = 7 Node1 Score: 10 - (0.75-0.5)*10 = 7
Minion2 scores on 0-10 scale Node2 scores on 0-10 scale
CPU Fraction: 3000 / 6000= 50% CPU Fraction: 3000 / 6000= 50%
Memory Fraction: 5000/10000 = 50% Memory Fraction: 5000/10000 = 50%
Minion2 Score: 10 - (0.5-0.5)*10 = 10 Node2 Score: 10 - (0.5-0.5)*10 = 10
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 10}},
test: "nothing scheduled, resources requested, differently sized machines", test: "nothing scheduled, resources requested, differently sized machines",
}, },
{ {
/* /*
Minion1 scores on 0-10 scale Node1 scores on 0-10 scale
CPU Fraction: 0 / 4000= 0% CPU Fraction: 0 / 4000= 0%
Memory Fraction: 0 / 10000 = 0% Memory Fraction: 0 / 10000 = 0%
Minion1 Score: 10 - (0-0)*10 = 10 Node1 Score: 10 - (0-0)*10 = 10
Minion2 scores on 0-10 scale Node2 scores on 0-10 scale
CPU Fraction: 0 / 4000= 0% CPU Fraction: 0 / 4000= 0%
Memory Fraction: 0 / 10000 = 0% Memory Fraction: 0 / 10000 = 0%
Minion2 Score: 10 - (0-0)*10 = 10 Node2 Score: 10 - (0-0)*10 = 10
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}}, expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "no resources requested, pods scheduled", test: "no resources requested, pods scheduled",
pods: []*api.Pod{ pods: []*api.Pod{
@ -611,18 +611,18 @@ func TestBalancedResourceAllocation(t *testing.T) {
}, },
{ {
/* /*
Minion1 scores on 0-10 scale Node1 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60% CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 0 / 20000 = 0% Memory Fraction: 0 / 20000 = 0%
Minion1 Score: 10 - (0.6-0)*10 = 4 Node1 Score: 10 - (0.6-0)*10 = 4
Minion2 scores on 0-10 scale Node2 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60% CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 5000 / 20000 = 25% Memory Fraction: 5000 / 20000 = 25%
Minion2 Score: 10 - (0.6-0.25)*10 = 6 Node2 Score: 10 - (0.6-0.25)*10 = 6
*/ */
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)}, nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []algorithm.HostPriority{{"machine1", 4}, {"machine2", 6}}, expectedList: []algorithm.HostPriority{{"machine1", 4}, {"machine2", 6}},
test: "no resources requested, pods scheduled with resources", test: "no resources requested, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
@ -634,18 +634,18 @@ func TestBalancedResourceAllocation(t *testing.T) {
}, },
{ {
/* /*
Minion1 scores on 0-10 scale Node1 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60% CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 5000 / 20000 = 25% Memory Fraction: 5000 / 20000 = 25%
Minion1 Score: 10 - (0.6-0.25)*10 = 6 Node1 Score: 10 - (0.6-0.25)*10 = 6
Minion2 scores on 0-10 scale Node2 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60% CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 10000 / 20000 = 50% Memory Fraction: 10000 / 20000 = 50%
Minion2 Score: 10 - (0.6-0.5)*10 = 9 Node2 Score: 10 - (0.6-0.5)*10 = 9
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)}, nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 9}}, expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 9}},
test: "resources requested, pods scheduled with resources", test: "resources requested, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
@ -655,18 +655,18 @@ func TestBalancedResourceAllocation(t *testing.T) {
}, },
{ {
/* /*
Minion1 scores on 0-10 scale Node1 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60% CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 5000 / 20000 = 25% Memory Fraction: 5000 / 20000 = 25%
Minion1 Score: 10 - (0.6-0.25)*10 = 6 Node1 Score: 10 - (0.6-0.25)*10 = 6
Minion2 scores on 0-10 scale Node2 scores on 0-10 scale
CPU Fraction: 6000 / 10000 = 60% CPU Fraction: 6000 / 10000 = 60%
Memory Fraction: 10000 / 50000 = 20% Memory Fraction: 10000 / 50000 = 20%
Minion2 Score: 10 - (0.6-0.2)*10 = 6 Node2 Score: 10 - (0.6-0.2)*10 = 6
*/ */
pod: &api.Pod{Spec: cpuAndMemory}, pod: &api.Pod{Spec: cpuAndMemory},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)}, nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 6}}, expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 6}},
test: "resources requested, pods scheduled with resources, differently sized machines", test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []*api.Pod{ pods: []*api.Pod{
@ -676,20 +676,20 @@ func TestBalancedResourceAllocation(t *testing.T) {
}, },
{ {
/* /*
Minion1 scores on 0-10 scale Node1 scores on 0-10 scale
CPU Fraction: 6000 / 4000 > 100% ==> Score := 0 CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
Memory Fraction: 0 / 10000 = 0 Memory Fraction: 0 / 10000 = 0
Minion1 Score: 0 Node1 Score: 0
Minion2 scores on 0-10 scale Node2 scores on 0-10 scale
CPU Fraction: 6000 / 4000 > 100% ==> Score := 0 CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
Memory Fraction 5000 / 10000 = 50% Memory Fraction 5000 / 10000 = 50%
Minion2 Score: 0 Node2 Score: 0
*/ */
pod: &api.Pod{Spec: cpuOnly}, pod: &api.Pod{Spec: cpuOnly},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)}, nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "requested resources exceed minion capacity", test: "requested resources exceed node capacity",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly}, {Spec: cpuOnly},
{Spec: cpuAndMemory}, {Spec: cpuAndMemory},
@ -697,9 +697,9 @@ func TestBalancedResourceAllocation(t *testing.T) {
}, },
{ {
pod: &api.Pod{Spec: noResources}, pod: &api.Pod{Spec: noResources},
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)}, nodes: []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}}, expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "zero minion resources, pods scheduled with resources", test: "zero node resources, pods scheduled with resources",
pods: []*api.Pod{ pods: []*api.Pod{
{Spec: cpuOnly}, {Spec: cpuOnly},
{Spec: cpuAndMemory}, {Spec: cpuAndMemory},
@ -708,7 +708,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
list, err := BalancedResourceAllocation(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(api.NodeList{Items: test.nodes})) list, err := BalancedResourceAllocation(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }

View File

@ -39,7 +39,7 @@ func NewSelectorSpreadPriority(serviceLister algorithm.ServiceLister, controller
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service or replication controller. It counts number of pods that run under // CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service or replication controller. It counts number of pods that run under
// Services or RCs as the pod being scheduled and tries to minimize the number of conflicts. I.e. pushes scheduler towards a Node where there's a smallest number of // Services or RCs as the pod being scheduled and tries to minimize the number of conflicts. I.e. pushes scheduler towards a Node where there's a smallest number of
// pods which match the same selectors of Services and RCs as current pod. // pods which match the same selectors of Services and RCs as current pod.
func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
var maxCount int var maxCount int
var nsPods []*api.Pod var nsPods []*api.Pod
@ -70,7 +70,7 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorit
} }
} }
minions, err := minionLister.List() nodes, err := nodeLister.List()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -87,7 +87,7 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorit
} }
if matches { if matches {
counts[pod.Spec.NodeName]++ counts[pod.Spec.NodeName]++
// Compute the maximum number of pods hosted on any minion // Compute the maximum number of pods hosted on any node
if counts[pod.Spec.NodeName] > maxCount { if counts[pod.Spec.NodeName] > maxCount {
maxCount = counts[pod.Spec.NodeName] maxCount = counts[pod.Spec.NodeName]
} }
@ -98,15 +98,15 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorit
result := []algorithm.HostPriority{} result := []algorithm.HostPriority{}
//score int - scale of 0-10 //score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest // 0 being the lowest priority and 10 being the highest
for _, minion := range minions.Items { for _, node := range nodes.Items {
// initializing to the default/max minion score of 10 // initializing to the default/max node score of 10
fScore := float32(10) fScore := float32(10)
if maxCount > 0 { if maxCount > 0 {
fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount)) fScore = 10 * (float32(maxCount-counts[node.Name]) / float32(maxCount))
} }
result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)}) result = append(result, algorithm.HostPriority{Host: node.Name, Score: int(fScore)})
glog.V(10).Infof( glog.V(10).Infof(
"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, minion.Name, int(fScore), "%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, node.Name, int(fScore),
) )
} }
return result, nil return result, nil
@ -128,7 +128,7 @@ func NewServiceAntiAffinityPriority(serviceLister algorithm.ServiceLister, label
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service // CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
// on machines with the same value for a particular label. // on machines with the same value for a particular label.
// The label to be considered is provided to the struct (ServiceAntiAffinity). // The label to be considered is provided to the struct (ServiceAntiAffinity).
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
var nsServicePods []*api.Pod var nsServicePods []*api.Pod
services, err := s.serviceLister.GetPodServices(pod) services, err := s.serviceLister.GetPodServices(pod)
@ -148,26 +148,26 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLis
} }
} }
minions, err := minionLister.List() nodes, err := nodeLister.List()
if err != nil { if err != nil {
return nil, err return nil, err
} }
// separate out the minions that have the label from the ones that don't // separate out the nodes that have the label from the ones that don't
otherMinions := []string{} otherNodes := []string{}
labeledMinions := map[string]string{} labeledNodes := map[string]string{}
for _, minion := range minions.Items { for _, node := range nodes.Items {
if labels.Set(minion.Labels).Has(s.label) { if labels.Set(node.Labels).Has(s.label) {
label := labels.Set(minion.Labels).Get(s.label) label := labels.Set(node.Labels).Get(s.label)
labeledMinions[minion.Name] = label labeledNodes[node.Name] = label
} else { } else {
otherMinions = append(otherMinions, minion.Name) otherNodes = append(otherNodes, node.Name)
} }
} }
podCounts := map[string]int{} podCounts := map[string]int{}
for _, pod := range nsServicePods { for _, pod := range nsServicePods {
label, exists := labeledMinions[pod.Spec.NodeName] label, exists := labeledNodes[pod.Spec.NodeName]
if !exists { if !exists {
continue continue
} }
@ -178,17 +178,17 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLis
result := []algorithm.HostPriority{} result := []algorithm.HostPriority{}
//score int - scale of 0-10 //score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest // 0 being the lowest priority and 10 being the highest
for minion := range labeledMinions { for node := range labeledNodes {
// initializing to the default/max minion score of 10 // initializing to the default/max node score of 10
fScore := float32(10) fScore := float32(10)
if numServicePods > 0 { if numServicePods > 0 {
fScore = 10 * (float32(numServicePods-podCounts[labeledMinions[minion]]) / float32(numServicePods)) fScore = 10 * (float32(numServicePods-podCounts[labeledNodes[node]]) / float32(numServicePods))
} }
result = append(result, algorithm.HostPriority{Host: minion, Score: int(fScore)}) result = append(result, algorithm.HostPriority{Host: node, Score: int(fScore)})
} }
// add the open minions with a score of 0 // add the open nodes with a score of 0
for _, minion := range otherMinions { for _, node := range otherNodes {
result = append(result, algorithm.HostPriority{Host: minion, Score: 0}) result = append(result, algorithm.HostPriority{Host: node, Score: 0})
} }
return result, nil return result, nil

View File

@ -217,7 +217,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
for _, test := range tests { for _, test := range tests {
selectorSpread := SelectorSpread{serviceLister: algorithm.FakeServiceLister(test.services), controllerLister: algorithm.FakeControllerLister(test.rcs)} selectorSpread := SelectorSpread{serviceLister: algorithm.FakeServiceLister(test.services), controllerLister: algorithm.FakeControllerLister(test.rcs)}
list, err := selectorSpread.CalculateSpreadPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeNodeList(test.nodes))) list, err := selectorSpread.CalculateSpreadPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeNodeLister(makeNodeList(test.nodes)))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -379,13 +379,13 @@ func TestZoneSpreadPriority(t *testing.T) {
expectedList: []algorithm.HostPriority{{"machine11", 7}, {"machine12", 7}, expectedList: []algorithm.HostPriority{{"machine11", 7}, {"machine12", 7},
{"machine21", 5}, {"machine22", 5}, {"machine21", 5}, {"machine22", 5},
{"machine01", 0}, {"machine02", 0}}, {"machine01", 0}, {"machine02", 0}},
test: "service pod on non-zoned minion", test: "service pod on non-zoned node",
}, },
} }
for _, test := range tests { for _, test := range tests {
zoneSpread := ServiceAntiAffinity{serviceLister: algorithm.FakeServiceLister(test.services), label: "zone"} zoneSpread := ServiceAntiAffinity{serviceLister: algorithm.FakeServiceLister(test.services), label: "zone"}
list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeLabeledMinionList(test.nodes))) list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeNodeLister(makeLabeledNodeList(test.nodes)))
if err != nil { if err != nil {
t.Errorf("unexpected error: %v", err) t.Errorf("unexpected error: %v", err)
} }
@ -398,7 +398,7 @@ func TestZoneSpreadPriority(t *testing.T) {
} }
} }
func makeLabeledMinionList(nodeMap map[string]map[string]string) (result api.NodeList) { func makeLabeledNodeList(nodeMap map[string]map[string]string) (result api.NodeList) {
nodes := []api.Node{} nodes := []api.Node{}
for nodeName, labels := range nodeMap { for nodeName, labels := range nodeMap {
nodes = append(nodes, api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName, Labels: labels}}) nodes = append(nodes, api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName, Labels: labels}})

View File

@ -23,5 +23,5 @@ import (
// Scheduler is an interface implemented by things that know how to schedule pods // Scheduler is an interface implemented by things that know how to schedule pods
// onto machines. // onto machines.
type ScheduleAlgorithm interface { type ScheduleAlgorithm interface {
Schedule(*api.Pod, MinionLister) (selectedMachine string, err error) Schedule(*api.Pod, NodeLister) (selectedMachine string, err error)
} }

View File

@ -25,14 +25,14 @@ import (
// Some functions used by multiple scheduler tests. // Some functions used by multiple scheduler tests.
type schedulerTester struct { type schedulerTester struct {
t *testing.T t *testing.T
scheduler ScheduleAlgorithm scheduler ScheduleAlgorithm
minionLister MinionLister nodeLister NodeLister
} }
// Call if you know exactly where pod should get scheduled. // Call if you know exactly where pod should get scheduled.
func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) { func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) {
actual, err := st.scheduler.Schedule(pod, st.minionLister) actual, err := st.scheduler.Schedule(pod, st.nodeLister)
if err != nil { if err != nil {
st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod) st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod)
return return
@ -44,7 +44,7 @@ func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) {
// Call if you can't predict where pod will be scheduled. // Call if you can't predict where pod will be scheduled.
func (st *schedulerTester) expectSuccess(pod *api.Pod) { func (st *schedulerTester) expectSuccess(pod *api.Pod) {
_, err := st.scheduler.Schedule(pod, st.minionLister) _, err := st.scheduler.Schedule(pod, st.nodeLister)
if err != nil { if err != nil {
st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod) st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod)
return return
@ -53,7 +53,7 @@ func (st *schedulerTester) expectSuccess(pod *api.Pod) {
// Call if pod should *not* schedule. // Call if pod should *not* schedule.
func (st *schedulerTester) expectFailure(pod *api.Pod) { func (st *schedulerTester) expectFailure(pod *api.Pod) {
_, err := st.scheduler.Schedule(pod, st.minionLister) _, err := st.scheduler.Schedule(pod, st.nodeLister)
if err == nil { if err == nil {
st.t.Error("Unexpected non-error") st.t.Error("Unexpected non-error")
} }

View File

@ -46,7 +46,7 @@ func (h HostPriorityList) Swap(i, j int) {
h[i], h[j] = h[j], h[i] h[i], h[j] = h[j], h[i]
} }
type PriorityFunction func(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) type PriorityFunction func(pod *api.Pod, podLister PodLister, nodeLister NodeLister) (HostPriorityList, error)
type PriorityConfig struct { type PriorityConfig struct {
Function PriorityFunction Function PriorityFunction

View File

@ -28,7 +28,7 @@ import (
func init() { func init() {
factory.RegisterAlgorithmProvider(factory.DefaultProvider, defaultPredicates(), defaultPriorities()) factory.RegisterAlgorithmProvider(factory.DefaultProvider, defaultPredicates(), defaultPriorities())
// EqualPriority is a prioritizer function that gives an equal weight of one to all minions // EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
// Register the priority function so that its available // Register the priority function so that its available
// but do not include it as part of the default priorities // but do not include it as part of the default priorities
factory.RegisterPriorityFunction("EqualPriority", scheduler.EqualPriority, 1) factory.RegisterPriorityFunction("EqualPriority", scheduler.EqualPriority, 1)

View File

@ -42,7 +42,7 @@ type PriorityPolicy struct {
// For a custom priority, the name can be user-defined // For a custom priority, the name can be user-defined
// For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function // For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function
Name string `json:"name"` Name string `json:"name"`
// The numeric multiplier for the minion scores that the priority function generates // The numeric multiplier for the node scores that the priority function generates
// The weight should be non-zero and can be a positive or a negative integer // The weight should be non-zero and can be a positive or a negative integer
Weight int `json:"weight"` Weight int `json:"weight"`
// Holds the parameters to configure the given priority function // Holds the parameters to configure the given priority function
@ -53,9 +53,9 @@ type PriorityPolicy struct {
// Only one of its members may be specified // Only one of its members may be specified
type PredicateArgument struct { type PredicateArgument struct {
// The predicate that provides affinity for pods belonging to a service // The predicate that provides affinity for pods belonging to a service
// It uses a label to identify minions that belong to the same "group" // It uses a label to identify nodes that belong to the same "group"
ServiceAffinity *ServiceAffinity `json:"serviceAffinity"` ServiceAffinity *ServiceAffinity `json:"serviceAffinity"`
// The predicate that checks whether a particular minion has a certain label // The predicate that checks whether a particular node has a certain label
// defined or not, regardless of value // defined or not, regardless of value
LabelsPresence *LabelsPresence `json:"labelsPresence"` LabelsPresence *LabelsPresence `json:"labelsPresence"`
} }
@ -64,41 +64,41 @@ type PredicateArgument struct {
// Only one of its members may be specified // Only one of its members may be specified
type PriorityArgument struct { type PriorityArgument struct {
// The priority function that ensures a good spread (anti-affinity) for pods belonging to a service // The priority function that ensures a good spread (anti-affinity) for pods belonging to a service
// It uses a label to identify minions that belong to the same "group" // It uses a label to identify nodes that belong to the same "group"
ServiceAntiAffinity *ServiceAntiAffinity `json:"serviceAntiAffinity"` ServiceAntiAffinity *ServiceAntiAffinity `json:"serviceAntiAffinity"`
// The priority function that checks whether a particular minion has a certain label // The priority function that checks whether a particular node has a certain label
// defined or not, regardless of value // defined or not, regardless of value
LabelPreference *LabelPreference `json:"labelPreference"` LabelPreference *LabelPreference `json:"labelPreference"`
} }
// Holds the parameters that are used to configure the corresponding predicate // Holds the parameters that are used to configure the corresponding predicate
type ServiceAffinity struct { type ServiceAffinity struct {
// The list of labels that identify minion "groups" // The list of labels that identify node "groups"
// All of the labels should match for the minion to be considered a fit for hosting the pod // All of the labels should match for the node to be considered a fit for hosting the pod
Labels []string `json:"labels"` Labels []string `json:"labels"`
} }
// Holds the parameters that are used to configure the corresponding predicate // Holds the parameters that are used to configure the corresponding predicate
type LabelsPresence struct { type LabelsPresence struct {
// The list of labels that identify minion "groups" // The list of labels that identify node "groups"
// All of the labels should be either present (or absent) for the minion to be considered a fit for hosting the pod // All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod
Labels []string `json:"labels"` Labels []string `json:"labels"`
// The boolean flag that indicates whether the labels should be present or absent from the minion // The boolean flag that indicates whether the labels should be present or absent from the node
Presence bool `json:"presence"` Presence bool `json:"presence"`
} }
// Holds the parameters that are used to configure the corresponding priority function // Holds the parameters that are used to configure the corresponding priority function
type ServiceAntiAffinity struct { type ServiceAntiAffinity struct {
// Used to identify minion "groups" // Used to identify node "groups"
Label string `json:"label"` Label string `json:"label"`
} }
// Holds the parameters that are used to configure the corresponding priority function // Holds the parameters that are used to configure the corresponding priority function
type LabelPreference struct { type LabelPreference struct {
// Used to identify minion "groups" // Used to identify node "groups"
Label string `json:"label"` Label string `json:"label"`
// This is a boolean flag // This is a boolean flag
// If true, higher priority is given to minions that have the label // If true, higher priority is given to nodes that have the label
// If false, higher priority is given to minions that do not have the label // If false, higher priority is given to nodes that do not have the label
Presence bool `json:"presence"` Presence bool `json:"presence"`
} }

View File

@ -42,7 +42,7 @@ type PriorityPolicy struct {
// For a custom priority, the name can be user-defined // For a custom priority, the name can be user-defined
// For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function // For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function
Name string `json:"name"` Name string `json:"name"`
// The numeric multiplier for the minion scores that the priority function generates // The numeric multiplier for the node scores that the priority function generates
// The weight should be non-zero and can be a positive or a negative integer // The weight should be non-zero and can be a positive or a negative integer
Weight int `json:"weight"` Weight int `json:"weight"`
// Holds the parameters to configure the given priority function // Holds the parameters to configure the given priority function
@ -53,9 +53,9 @@ type PriorityPolicy struct {
// Only one of its members may be specified // Only one of its members may be specified
type PredicateArgument struct { type PredicateArgument struct {
// The predicate that provides affinity for pods belonging to a service // The predicate that provides affinity for pods belonging to a service
// It uses a label to identify minions that belong to the same "group" // It uses a label to identify nodes that belong to the same "group"
ServiceAffinity *ServiceAffinity `json:"serviceAffinity"` ServiceAffinity *ServiceAffinity `json:"serviceAffinity"`
// The predicate that checks whether a particular minion has a certain label // The predicate that checks whether a particular node has a certain label
// defined or not, regardless of value // defined or not, regardless of value
LabelsPresence *LabelsPresence `json:"labelsPresence"` LabelsPresence *LabelsPresence `json:"labelsPresence"`
} }
@ -64,41 +64,41 @@ type PredicateArgument struct {
// Only one of its members may be specified // Only one of its members may be specified
type PriorityArgument struct { type PriorityArgument struct {
// The priority function that ensures a good spread (anti-affinity) for pods belonging to a service // The priority function that ensures a good spread (anti-affinity) for pods belonging to a service
// It uses a label to identify minions that belong to the same "group" // It uses a label to identify nodes that belong to the same "group"
ServiceAntiAffinity *ServiceAntiAffinity `json:"serviceAntiAffinity"` ServiceAntiAffinity *ServiceAntiAffinity `json:"serviceAntiAffinity"`
// The priority function that checks whether a particular minion has a certain label // The priority function that checks whether a particular node has a certain label
// defined or not, regardless of value // defined or not, regardless of value
LabelPreference *LabelPreference `json:"labelPreference"` LabelPreference *LabelPreference `json:"labelPreference"`
} }
// Holds the parameters that are used to configure the corresponding predicate // Holds the parameters that are used to configure the corresponding predicate
type ServiceAffinity struct { type ServiceAffinity struct {
// The list of labels that identify minion "groups" // The list of labels that identify node "groups"
// All of the labels should match for the minion to be considered a fit for hosting the pod // All of the labels should match for the node to be considered a fit for hosting the pod
Labels []string `json:"labels"` Labels []string `json:"labels"`
} }
// Holds the parameters that are used to configure the corresponding predicate // Holds the parameters that are used to configure the corresponding predicate
type LabelsPresence struct { type LabelsPresence struct {
// The list of labels that identify minion "groups" // The list of labels that identify node "groups"
// All of the labels should be either present (or absent) for the minion to be considered a fit for hosting the pod // All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod
Labels []string `json:"labels"` Labels []string `json:"labels"`
// The boolean flag that indicates whether the labels should be present or absent from the minion // The boolean flag that indicates whether the labels should be present or absent from the node
Presence bool `json:"presence"` Presence bool `json:"presence"`
} }
// Holds the parameters that are used to configure the corresponding priority function // Holds the parameters that are used to configure the corresponding priority function
type ServiceAntiAffinity struct { type ServiceAntiAffinity struct {
// Used to identify minion "groups" // Used to identify node "groups"
Label string `json:"label"` Label string `json:"label"`
} }
// Holds the parameters that are used to configure the corresponding priority function // Holds the parameters that are used to configure the corresponding priority function
type LabelPreference struct { type LabelPreference struct {
// Used to identify minion "groups" // Used to identify node "groups"
Label string `json:"label"` Label string `json:"label"`
// This is a boolean flag // This is a boolean flag
// If true, higher priority is given to minions that have the label // If true, higher priority is given to nodes that have the label
// If false, higher priority is given to minions that do not have the label // If false, higher priority is given to nodes that do not have the label
Presence bool `json:"presence"` Presence bool `json:"presence"`
} }

View File

@ -48,7 +48,7 @@ type ConfigFactory struct {
ScheduledPodLister *cache.StoreToPodLister ScheduledPodLister *cache.StoreToPodLister
// a means to list all known scheduled pods and pods assumed to have been scheduled. // a means to list all known scheduled pods and pods assumed to have been scheduled.
PodLister algorithm.PodLister PodLister algorithm.PodLister
// a means to list all minions // a means to list all nodes
NodeLister *cache.StoreToNodeLister NodeLister *cache.StoreToNodeLister
// a means to list all services // a means to list all services
ServiceLister *cache.StoreToServiceLister ServiceLister *cache.StoreToServiceLister
@ -180,9 +180,9 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String)
// Begin populating scheduled pods. // Begin populating scheduled pods.
go f.scheduledPodPopulator.Run(f.StopEverything) go f.scheduledPodPopulator.Run(f.StopEverything)
// Watch minions. // Watch nodes.
// Minions may be listed frequently, so provide a local up-to-date cache. // Nodes may be listed frequently, so provide a local up-to-date cache.
cache.NewReflector(f.createMinionLW(), &api.Node{}, f.NodeLister.Store, 0).RunUntil(f.StopEverything) cache.NewReflector(f.createNodeLW(), &api.Node{}, f.NodeLister.Store, 0).RunUntil(f.StopEverything)
// Watch and cache all service objects. Scheduler needs to find all pods // Watch and cache all service objects. Scheduler needs to find all pods
// created by the same services or ReplicationControllers, so that it can spread them correctly. // created by the same services or ReplicationControllers, so that it can spread them correctly.
@ -209,9 +209,9 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String)
return &scheduler.Config{ return &scheduler.Config{
Modeler: f.modeler, Modeler: f.modeler,
// The scheduler only needs to consider schedulable nodes. // The scheduler only needs to consider schedulable nodes.
MinionLister: f.NodeLister.NodeCondition(api.NodeReady, api.ConditionTrue), NodeLister: f.NodeLister.NodeCondition(api.NodeReady, api.ConditionTrue),
Algorithm: algo, Algorithm: algo,
Binder: &binder{f.Client}, Binder: &binder{f.Client},
NextPod: func() *api.Pod { NextPod: func() *api.Pod {
pod := f.PodQueue.Pop().(*api.Pod) pod := f.PodQueue.Pop().(*api.Pod)
glog.V(2).Infof("About to try and schedule pod %v", pod.Name) glog.V(2).Infof("About to try and schedule pod %v", pod.Name)
@ -245,8 +245,8 @@ func (factory *ConfigFactory) createAssignedPodLW() *cache.ListWatch {
parseSelectorOrDie(client.PodHost+"!=")) parseSelectorOrDie(client.PodHost+"!="))
} }
// createMinionLW returns a cache.ListWatch that gets all changes to minions. // createNodeLW returns a cache.ListWatch that gets all changes to nodes.
func (factory *ConfigFactory) createMinionLW() *cache.ListWatch { func (factory *ConfigFactory) createNodeLW() *cache.ListWatch {
// TODO: Filter out nodes that doesn't have NodeReady condition. // TODO: Filter out nodes that doesn't have NodeReady condition.
fields := fields.Set{client.NodeUnschedulable: "false"}.AsSelector() fields := fields.Set{client.NodeUnschedulable: "false"}.AsSelector()
return cache.NewListWatchFromClient(factory.Client, "nodes", api.NamespaceAll, fields) return cache.NewListWatchFromClient(factory.Client, "nodes", api.NamespaceAll, fields)

View File

@ -122,11 +122,11 @@ func PredicateTwo(pod *api.Pod, existingPods []*api.Pod, node string) (bool, err
return true, nil return true, nil
} }
func PriorityOne(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { func PriorityOne(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
return []algorithm.HostPriority{}, nil return []algorithm.HostPriority{}, nil
} }
func PriorityTwo(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { func PriorityTwo(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
return []algorithm.HostPriority{}, nil return []algorithm.HostPriority{}, nil
} }
@ -179,7 +179,7 @@ func TestDefaultErrorFunc(t *testing.T) {
} }
} }
func TestMinionEnumerator(t *testing.T) { func TestNodeEnumerator(t *testing.T) {
testList := &api.NodeList{ testList := &api.NodeList{
Items: []api.Node{ Items: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: "foo"}}, {ObjectMeta: api.ObjectMeta{Name: "foo"}},

View File

@ -36,7 +36,7 @@ type PluginFactoryArgs struct {
algorithm.PodLister algorithm.PodLister
algorithm.ServiceLister algorithm.ServiceLister
algorithm.ControllerLister algorithm.ControllerLister
NodeLister algorithm.MinionLister NodeLister algorithm.NodeLister
NodeInfo predicates.NodeInfo NodeInfo predicates.NodeInfo
} }

View File

@ -60,21 +60,21 @@ type genericScheduler struct {
randomLock sync.Mutex randomLock sync.Mutex
} }
func (g *genericScheduler) Schedule(pod *api.Pod, minionLister algorithm.MinionLister) (string, error) { func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeLister) (string, error) {
minions, err := minionLister.List() nodes, err := nodeLister.List()
if err != nil { if err != nil {
return "", err return "", err
} }
if len(minions.Items) == 0 { if len(nodes.Items) == 0 {
return "", ErrNoNodesAvailable return "", ErrNoNodesAvailable
} }
filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.pods, g.predicates, minions) filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.pods, g.predicates, nodes)
if err != nil { if err != nil {
return "", err return "", err
} }
priorityList, err := PrioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeMinionLister(filteredNodes)) priorityList, err := PrioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeNodeLister(filteredNodes))
if err != nil { if err != nil {
return "", err return "", err
} }
@ -88,8 +88,8 @@ func (g *genericScheduler) Schedule(pod *api.Pod, minionLister algorithm.MinionL
return g.selectHost(priorityList) return g.selectHost(priorityList)
} }
// This method takes a prioritized list of minions and sorts them in reverse order based on scores // This method takes a prioritized list of nodes and sorts them in reverse order based on scores
// and then picks one randomly from the minions that had the highest score // and then picks one randomly from the nodes that had the highest score
func (g *genericScheduler) selectHost(priorityList algorithm.HostPriorityList) (string, error) { func (g *genericScheduler) selectHost(priorityList algorithm.HostPriorityList) (string, error) {
if len(priorityList) == 0 { if len(priorityList) == 0 {
return "", fmt.Errorf("empty priorityList") return "", fmt.Errorf("empty priorityList")
@ -104,8 +104,8 @@ func (g *genericScheduler) selectHost(priorityList algorithm.HostPriorityList) (
return hosts[ix], nil return hosts[ix], nil
} }
// Filters the minions to find the ones that fit based on the given predicate functions // Filters the nodes to find the ones that fit based on the given predicate functions
// Each minion is passed through the predicate functions to determine if it is a fit // Each node is passed through the predicate functions to determine if it is a fit
func findNodesThatFit(pod *api.Pod, podLister algorithm.PodLister, predicateFuncs map[string]algorithm.FitPredicate, nodes api.NodeList) (api.NodeList, FailedPredicateMap, error) { func findNodesThatFit(pod *api.Pod, podLister algorithm.PodLister, predicateFuncs map[string]algorithm.FitPredicate, nodes api.NodeList) (api.NodeList, FailedPredicateMap, error) {
filtered := []api.Node{} filtered := []api.Node{}
machineToPods, err := predicates.MapPodsToMachines(podLister) machineToPods, err := predicates.MapPodsToMachines(podLister)
@ -141,19 +141,19 @@ func findNodesThatFit(pod *api.Pod, podLister algorithm.PodLister, predicateFunc
return api.NodeList{Items: filtered}, failedPredicateMap, nil return api.NodeList{Items: filtered}, failedPredicateMap, nil
} }
// Prioritizes the minions by running the individual priority functions sequentially. // Prioritizes the nodes by running the individual priority functions sequentially.
// Each priority function is expected to set a score of 0-10 // Each priority function is expected to set a score of 0-10
// 0 is the lowest priority score (least preferred minion) and 10 is the highest // 0 is the lowest priority score (least preferred node) and 10 is the highest
// Each priority function can also have its own weight // Each priority function can also have its own weight
// The minion scores returned by the priority function are multiplied by the weights to get weighted scores // The node scores returned by the priority function are multiplied by the weights to get weighted scores
// All scores are finally combined (added) to get the total weighted scores of all minions // All scores are finally combined (added) to get the total weighted scores of all nodes
func PrioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfigs []algorithm.PriorityConfig, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { func PrioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfigs []algorithm.PriorityConfig, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
result := algorithm.HostPriorityList{} result := algorithm.HostPriorityList{}
// If no priority configs are provided, then the EqualPriority function is applied // If no priority configs are provided, then the EqualPriority function is applied
// This is required to generate the priority list in the required format // This is required to generate the priority list in the required format
if len(priorityConfigs) == 0 { if len(priorityConfigs) == 0 {
return EqualPriority(pod, podLister, minionLister) return EqualPriority(pod, podLister, nodeLister)
} }
combinedScores := map[string]int{} combinedScores := map[string]int{}
@ -164,7 +164,7 @@ func PrioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfig
continue continue
} }
priorityFunc := priorityConfig.Function priorityFunc := priorityConfig.Function
prioritizedList, err := priorityFunc(pod, podLister, minionLister) prioritizedList, err := priorityFunc(pod, podLister, nodeLister)
if err != nil { if err != nil {
return algorithm.HostPriorityList{}, err return algorithm.HostPriorityList{}, err
} }
@ -192,17 +192,17 @@ func getBestHosts(list algorithm.HostPriorityList) []string {
} }
// EqualPriority is a prioritizer function that gives an equal weight of one to all nodes // EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
func EqualPriority(_ *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { func EqualPriority(_ *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
nodes, err := minionLister.List() nodes, err := nodeLister.List()
if err != nil { if err != nil {
glog.Errorf("failed to list nodes: %v", err) glog.Errorf("failed to list nodes: %v", err)
return []algorithm.HostPriority{}, err return []algorithm.HostPriority{}, err
} }
result := []algorithm.HostPriority{} result := []algorithm.HostPriority{}
for _, minion := range nodes.Items { for _, node := range nodes.Items {
result = append(result, algorithm.HostPriority{ result = append(result, algorithm.HostPriority{
Host: minion.Name, Host: node.Name,
Score: 1, Score: 1,
}) })
} }

View File

@ -44,31 +44,31 @@ func hasNoPodsPredicate(pod *api.Pod, existingPods []*api.Pod, node string) (boo
return len(existingPods) == 0, nil return len(existingPods) == 0, nil
} }
func numericPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { func numericPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
nodes, err := minionLister.List() nodes, err := nodeLister.List()
result := []algorithm.HostPriority{} result := []algorithm.HostPriority{}
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to list nodes: %v", err) return nil, fmt.Errorf("failed to list nodes: %v", err)
} }
for _, minion := range nodes.Items { for _, node := range nodes.Items {
score, err := strconv.Atoi(minion.Name) score, err := strconv.Atoi(node.Name)
if err != nil { if err != nil {
return nil, err return nil, err
} }
result = append(result, algorithm.HostPriority{ result = append(result, algorithm.HostPriority{
Host: minion.Name, Host: node.Name,
Score: score, Score: score,
}) })
} }
return result, nil return result, nil
} }
func reverseNumericPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) { func reverseNumericPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
var maxScore float64 var maxScore float64
minScore := math.MaxFloat64 minScore := math.MaxFloat64
reverseResult := []algorithm.HostPriority{} reverseResult := []algorithm.HostPriority{}
result, err := numericPriority(pod, podLister, minionLister) result, err := numericPriority(pod, podLister, nodeLister)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -288,7 +288,7 @@ func TestGenericScheduler(t *testing.T) {
for _, test := range tests { for _, test := range tests {
random := rand.New(rand.NewSource(0)) random := rand.New(rand.NewSource(0))
scheduler := NewGenericScheduler(test.predicates, test.prioritizers, algorithm.FakePodLister(test.pods), random) scheduler := NewGenericScheduler(test.predicates, test.prioritizers, algorithm.FakePodLister(test.pods), random)
machine, err := scheduler.Schedule(test.pod, algorithm.FakeMinionLister(makeNodeList(test.nodes))) machine, err := scheduler.Schedule(test.pod, algorithm.FakeNodeLister(makeNodeList(test.nodes)))
if test.expectsErr { if test.expectsErr {
if err == nil { if err == nil {
t.Error("Unexpected non-error") t.Error("Unexpected non-error")

View File

@ -63,18 +63,18 @@ type SystemModeler interface {
} }
// Scheduler watches for new unscheduled pods. It attempts to find // Scheduler watches for new unscheduled pods. It attempts to find
// minions that they fit on and writes bindings back to the api server. // nodes that they fit on and writes bindings back to the api server.
type Scheduler struct { type Scheduler struct {
config *Config config *Config
} }
type Config struct { type Config struct {
// It is expected that changes made via modeler will be observed // It is expected that changes made via modeler will be observed
// by MinionLister and Algorithm. // by NodeLister and Algorithm.
Modeler SystemModeler Modeler SystemModeler
MinionLister algorithm.MinionLister NodeLister algorithm.NodeLister
Algorithm algorithm.ScheduleAlgorithm Algorithm algorithm.ScheduleAlgorithm
Binder Binder Binder Binder
// Rate at which we can create pods // Rate at which we can create pods
BindPodsRateLimiter util.RateLimiter BindPodsRateLimiter util.RateLimiter
@ -121,7 +121,7 @@ func (s *Scheduler) scheduleOne() {
defer func() { defer func() {
metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start)) metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start))
}() }()
dest, err := s.config.Algorithm.Schedule(pod, s.config.MinionLister) dest, err := s.config.Algorithm.Schedule(pod, s.config.NodeLister)
metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start)) metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start))
if err != nil { if err != nil {
glog.V(1).Infof("Failed to schedule: %+v", pod) glog.V(1).Infof("Failed to schedule: %+v", pod)

View File

@ -60,7 +60,7 @@ type mockScheduler struct {
err error err error
} }
func (es mockScheduler) Schedule(pod *api.Pod, ml algorithm.MinionLister) (string, error) { func (es mockScheduler) Schedule(pod *api.Pod, ml algorithm.NodeLister) (string, error) {
return es.machine, es.err return es.machine, es.err
} }
@ -114,7 +114,7 @@ func TestScheduler(t *testing.T) {
gotAssumedPod = pod gotAssumedPod = pod
}, },
}, },
MinionLister: algorithm.FakeMinionLister( NodeLister: algorithm.FakeNodeLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}}, api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
), ),
Algorithm: item.algo, Algorithm: item.algo,
@ -196,7 +196,7 @@ func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
var gotBinding *api.Binding var gotBinding *api.Binding
c := &Config{ c := &Config{
Modeler: modeler, Modeler: modeler,
MinionLister: algorithm.FakeMinionLister( NodeLister: algorithm.FakeNodeLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}}, api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
), ),
Algorithm: algo, Algorithm: algo,
@ -329,7 +329,7 @@ func TestSchedulerRateLimitsBinding(t *testing.T) {
fr := FakeRateLimiter{util.NewTokenBucketRateLimiter(0.02, 1), []bool{}} fr := FakeRateLimiter{util.NewTokenBucketRateLimiter(0.02, 1), []bool{}}
c := &Config{ c := &Config{
Modeler: modeler, Modeler: modeler,
MinionLister: algorithm.FakeMinionLister( NodeLister: algorithm.FakeNodeLister(
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}}, api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
), ),
Algorithm: algo, Algorithm: algo,

View File

@ -256,7 +256,7 @@ var _ = Describe("DaemonRestart", func() {
}) })
It("Kubelet should not restart containers across restart", func() { It("Kubelet should not restart containers across restart", func() {
nodeIPs, err := getMinionPublicIps(framework.Client) nodeIPs, err := getNodePublicIps(framework.Client)
expectNoError(err) expectNoError(err)
preRestarts, badNodes := getContainerRestarts(framework.Client, ns, labelSelector) preRestarts, badNodes := getContainerRestarts(framework.Client, ns, labelSelector)
if preRestarts != 0 { if preRestarts != 0 {

View File

@ -91,7 +91,7 @@ func gcloudListNodes() {
// -t/--test flag or ginkgo.focus flag. // -t/--test flag or ginkgo.focus flag.
var _ = Describe("Density", func() { var _ = Describe("Density", func() {
var c *client.Client var c *client.Client
var minionCount int var nodeCount int
var RCName string var RCName string
var additionalPodsPrefix string var additionalPodsPrefix string
var ns string var ns string
@ -101,10 +101,10 @@ var _ = Describe("Density", func() {
var err error var err error
c, err = loadClient() c, err = loadClient()
expectNoError(err) expectNoError(err)
minions, err := c.Nodes().List(labels.Everything(), fields.Everything()) nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
expectNoError(err) expectNoError(err)
minionCount = len(minions.Items) nodeCount = len(nodes.Items)
Expect(minionCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())
// Terminating a namespace (deleting the remaining objects from it - which // Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all // generally means events) can affect the current run. Thus we wait for all
@ -136,7 +136,7 @@ var _ = Describe("Density", func() {
} }
By("Removing additional pods if any") By("Removing additional pods if any")
for i := 1; i <= minionCount; i++ { for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i) name := additionalPodsPrefix + "-" + strconv.Itoa(i)
c.Pods(ns).Delete(name, nil) c.Pods(ns).Delete(name, nil)
} }
@ -160,7 +160,7 @@ var _ = Describe("Density", func() {
skip bool skip bool
// Controls if e2e latency tests should be run (they are slow) // Controls if e2e latency tests should be run (they are slow)
runLatencyTest bool runLatencyTest bool
podsPerMinion int podsPerNode int
// Controls how often the apiserver is polled for pods // Controls how often the apiserver is polled for pods
interval time.Duration interval time.Duration
} }
@ -170,17 +170,17 @@ var _ = Describe("Density", func() {
// (metrics from other tests affects this one). // (metrics from other tests affects this one).
// TODO: Reenable once we can measure latency only from a single test. // TODO: Reenable once we can measure latency only from a single test.
// TODO: Expose runLatencyTest as ginkgo flag. // TODO: Expose runLatencyTest as ginkgo flag.
{podsPerMinion: 3, skip: true, runLatencyTest: false, interval: 10 * time.Second}, {podsPerNode: 3, skip: true, runLatencyTest: false, interval: 10 * time.Second},
{podsPerMinion: 30, skip: true, runLatencyTest: true, interval: 10 * time.Second}, {podsPerNode: 30, skip: true, runLatencyTest: true, interval: 10 * time.Second},
// More than 30 pods per node is outside our v1.0 goals. // More than 30 pods per node is outside our v1.0 goals.
// We might want to enable those tests in the future. // We might want to enable those tests in the future.
{podsPerMinion: 50, skip: true, runLatencyTest: false, interval: 10 * time.Second}, {podsPerNode: 50, skip: true, runLatencyTest: false, interval: 10 * time.Second},
{podsPerMinion: 100, skip: true, runLatencyTest: false, interval: 1 * time.Second}, {podsPerNode: 100, skip: true, runLatencyTest: false, interval: 1 * time.Second},
} }
for _, testArg := range densityTests { for _, testArg := range densityTests {
name := fmt.Sprintf("should allow starting %d pods per node", testArg.podsPerMinion) name := fmt.Sprintf("should allow starting %d pods per node", testArg.podsPerNode)
if testArg.podsPerMinion == 30 { if testArg.podsPerNode == 30 {
name = "[Performance suite] " + name name = "[Performance suite] " + name
} }
if testArg.skip { if testArg.skip {
@ -188,7 +188,7 @@ var _ = Describe("Density", func() {
} }
itArg := testArg itArg := testArg
It(name, func() { It(name, func() {
totalPods := itArg.podsPerMinion * minionCount totalPods := itArg.podsPerNode * nodeCount
RCName = "density" + strconv.Itoa(totalPods) + "-" + uuid RCName = "density" + strconv.Itoa(totalPods) + "-" + uuid
fileHndl, err := os.Create(fmt.Sprintf(testContext.OutputDir+"/%s/pod_states.csv", uuid)) fileHndl, err := os.Create(fmt.Sprintf(testContext.OutputDir+"/%s/pod_states.csv", uuid))
expectNoError(err) expectNoError(err)
@ -318,11 +318,11 @@ var _ = Describe("Density", func() {
// Create some additional pods with throughput ~5 pods/sec. // Create some additional pods with throughput ~5 pods/sec.
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(minionCount) wg.Add(nodeCount)
podLabels := map[string]string{ podLabels := map[string]string{
"name": additionalPodsPrefix, "name": additionalPodsPrefix,
} }
for i := 1; i <= minionCount; i++ { for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i) name := additionalPodsPrefix + "-" + strconv.Itoa(i)
go createRunningPod(&wg, c, name, ns, "gcr.io/google_containers/pause:go", podLabels) go createRunningPod(&wg, c, name, ns, "gcr.io/google_containers/pause:go", podLabels)
time.Sleep(200 * time.Millisecond) time.Sleep(200 * time.Millisecond)
@ -330,7 +330,7 @@ var _ = Describe("Density", func() {
wg.Wait() wg.Wait()
Logf("Waiting for all Pods begin observed by the watch...") Logf("Waiting for all Pods begin observed by the watch...")
for start := time.Now(); len(watchTimes) < minionCount && time.Since(start) < timeout; time.Sleep(10 * time.Second) { for start := time.Now(); len(watchTimes) < nodeCount && time.Since(start) < timeout; time.Sleep(10 * time.Second) {
} }
close(stopCh) close(stopCh)
@ -404,7 +404,7 @@ var _ = Describe("Density", func() {
} }
Logf("Approx throughput: %v pods/min", Logf("Approx throughput: %v pods/min",
float64(minionCount)/(e2eLag[len(e2eLag)-1].Latency.Minutes())) float64(nodeCount)/(e2eLag[len(e2eLag)-1].Latency.Minutes()))
} }
}) })
} }

View File

@ -322,9 +322,9 @@ var _ = Describe("Kubectl client", func() {
checkOutput(output, requiredStrings) checkOutput(output, requiredStrings)
// Node // Node
minions, err := c.Nodes().List(labels.Everything(), fields.Everything()) nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
node := minions.Items[0] node := nodes.Items[0]
output = runKubectl("describe", "node", node.Name) output = runKubectl("describe", "node", node.Name)
requiredStrings = [][]string{ requiredStrings = [][]string{
{"Name:", node.Name}, {"Name:", node.Name},

View File

@ -660,7 +660,7 @@ var _ = Describe("Pods", func() {
pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name)) pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name))
req := framework.Client.Get(). req := framework.Client.Get().
Prefix("proxy"). Prefix("proxy").
Resource("minions"). Resource("nodes").
Name(pod.Status.Host). Name(pod.Status.Host).
Suffix("exec", framework.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) Suffix("exec", framework.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
@ -734,7 +734,7 @@ var _ = Describe("Pods", func() {
req := framework.Client.Get(). req := framework.Client.Get().
Prefix("proxy"). Prefix("proxy").
Resource("minions"). Resource("nodes").
Name(pod.Status.Host). Name(pod.Status.Host).
Suffix("portForward", framework.Namespace.Name, pod.Name) Suffix("portForward", framework.Namespace.Name, pod.Name)

View File

@ -413,7 +413,7 @@ var _ = Describe("Services", func() {
t.CreateWebserverRC(1) t.CreateWebserverRC(1)
By("hitting the pod through the service's NodePort") By("hitting the pod through the service's NodePort")
testReachable(pickMinionIP(c), port.NodePort) testReachable(pickNodeIP(c), port.NodePort)
By("hitting the pod through the service's external load balancer") By("hitting the pod through the service's external load balancer")
testLoadBalancerReachable(ingress, inboundPort) testLoadBalancerReachable(ingress, inboundPort)
@ -482,7 +482,7 @@ var _ = Describe("Services", func() {
t.CreateWebserverRC(1) t.CreateWebserverRC(1)
By("hitting the pod through the service's NodePort") By("hitting the pod through the service's NodePort")
testReachable(pickMinionIP(c), port.NodePort) testReachable(pickNodeIP(c), port.NodePort)
By("hitting the pod through the service's external load balancer") By("hitting the pod through the service's external load balancer")
testLoadBalancerReachable(ingress, inboundPort) testLoadBalancerReachable(ingress, inboundPort)
@ -529,7 +529,7 @@ var _ = Describe("Services", func() {
t.CreateWebserverRC(1) t.CreateWebserverRC(1)
By("hitting the pod through the service's NodePort") By("hitting the pod through the service's NodePort")
ip := pickMinionIP(c) ip := pickNodeIP(c)
testReachable(ip, nodePort) testReachable(ip, nodePort)
hosts, err := NodeSSHHosts(c) hosts, err := NodeSSHHosts(c)
@ -605,7 +605,7 @@ var _ = Describe("Services", func() {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service) Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
} }
By("hitting the pod through the service's NodePort") By("hitting the pod through the service's NodePort")
ip := pickMinionIP(f.Client) ip := pickNodeIP(f.Client)
nodePort1 := port.NodePort // Save for later! nodePort1 := port.NodePort // Save for later!
testReachable(ip, nodePort1) testReachable(ip, nodePort1)
@ -638,7 +638,7 @@ var _ = Describe("Services", func() {
Failf("got unexpected Status.LoadBalancer.Ingresss[0] for LoadBalancer service: %v", service) Failf("got unexpected Status.LoadBalancer.Ingresss[0] for LoadBalancer service: %v", service)
} }
By("hitting the pod through the service's NodePort") By("hitting the pod through the service's NodePort")
ip = pickMinionIP(f.Client) ip = pickNodeIP(f.Client)
testReachable(ip, nodePort1) testReachable(ip, nodePort1)
By("hitting the pod through the service's LoadBalancer") By("hitting the pod through the service's LoadBalancer")
testLoadBalancerReachable(ingress1, 80) testLoadBalancerReachable(ingress1, 80)
@ -710,10 +710,10 @@ var _ = Describe("Services", func() {
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for back-to-ClusterIP service: %v", service) Failf("got unexpected len(Status.LoadBalancer.Ingresss) for back-to-ClusterIP service: %v", service)
} }
By("checking the NodePort (original) is closed") By("checking the NodePort (original) is closed")
ip = pickMinionIP(f.Client) ip = pickNodeIP(f.Client)
testNotReachable(ip, nodePort1) testNotReachable(ip, nodePort1)
By("checking the NodePort (updated) is closed") By("checking the NodePort (updated) is closed")
ip = pickMinionIP(f.Client) ip = pickNodeIP(f.Client)
testNotReachable(ip, nodePort2) testNotReachable(ip, nodePort2)
By("checking the LoadBalancer is closed") By("checking the LoadBalancer is closed")
testLoadBalancerNotReachable(ingress2, 80) testLoadBalancerNotReachable(ingress2, 80)
@ -769,7 +769,7 @@ var _ = Describe("Services", func() {
} }
By("hitting the pod through the service's NodePort") By("hitting the pod through the service's NodePort")
ip := pickMinionIP(c) ip := pickNodeIP(c)
testReachable(ip, nodePort) testReachable(ip, nodePort)
By("hitting the pod through the service's LoadBalancer") By("hitting the pod through the service's LoadBalancer")
testLoadBalancerReachable(ingress, 80) testLoadBalancerReachable(ingress, 80)
@ -1249,7 +1249,7 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st
return ips return ips
} }
func getMinionPublicIps(c *client.Client) ([]string, error) { func getNodePublicIps(c *client.Client) ([]string, error) {
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything()) nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
if err != nil { if err != nil {
return nil, err return nil, err
@ -1262,8 +1262,8 @@ func getMinionPublicIps(c *client.Client) ([]string, error) {
return ips, nil return ips, nil
} }
func pickMinionIP(c *client.Client) string { func pickNodeIP(c *client.Client) string {
publicIps, err := getMinionPublicIps(c) publicIps, err := getNodePublicIps(c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(publicIps) == 0 { if len(publicIps) == 0 {
Failf("got unexpected number (%d) of public IPs", len(publicIps)) Failf("got unexpected number (%d) of public IPs", len(publicIps))

View File

@ -330,7 +330,7 @@ func getTestRequests() []struct {
{"GET", path("endpoints", api.NamespaceDefault, "a"), "", code200}, {"GET", path("endpoints", api.NamespaceDefault, "a"), "", code200},
{"DELETE", timeoutPath("endpoints", api.NamespaceDefault, "a"), "", code200}, {"DELETE", timeoutPath("endpoints", api.NamespaceDefault, "a"), "", code200},
// Normal methods on minions // Normal methods on nodes
{"GET", path("nodes", "", ""), "", code200}, {"GET", path("nodes", "", ""), "", code200},
{"POST", timeoutPath("nodes", "", ""), aNode, code201}, {"POST", timeoutPath("nodes", "", ""), aNode, code201},
{"PUT", timeoutPath("nodes", "", "a"), aNode, code200}, {"PUT", timeoutPath("nodes", "", "a"), aNode, code200},
@ -364,7 +364,7 @@ func getTestRequests() []struct {
{"GET", pathWithPrefix("proxy", "nodes", api.NamespaceDefault, "a"), "", code404}, {"GET", pathWithPrefix("proxy", "nodes", api.NamespaceDefault, "a"), "", code404},
{"GET", pathWithPrefix("redirect", "nodes", api.NamespaceDefault, "a"), "", code404}, {"GET", pathWithPrefix("redirect", "nodes", api.NamespaceDefault, "a"), "", code404},
// TODO: test .../watch/..., which doesn't end before the test timeout. // TODO: test .../watch/..., which doesn't end before the test timeout.
// TODO: figure out how to create a minion so that it can successfully proxy/redirect. // TODO: figure out how to create a node so that it can successfully proxy/redirect.
// Non-object endpoints // Non-object endpoints
{"GET", "/", "", code200}, {"GET", "/", "", code200},