mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 23:15:14 +00:00
Merge pull request #13785 from wojtek-t/minion_to_node_2
Auto commit by PR queue bot
This commit is contained in:
commit
bf641078eb
@ -246,7 +246,7 @@ func recoverAssignedSlave(pod *api.Pod) string {
|
||||
|
||||
// Schedule implements the Scheduler interface of Kubernetes.
|
||||
// It returns the selectedMachine's name and error (if there's any).
|
||||
func (k *kubeScheduler) Schedule(pod *api.Pod, unused algorithm.MinionLister) (string, error) {
|
||||
func (k *kubeScheduler) Schedule(pod *api.Pod, unused algorithm.NodeLister) (string, error) {
|
||||
log.Infof("Try to schedule pod %v\n", pod.Name)
|
||||
ctx := api.WithNamespace(api.NewDefaultContext(), pod.Namespace)
|
||||
|
||||
@ -684,7 +684,7 @@ func (k *KubernetesScheduler) NewPluginConfig(terminate <-chan struct{}, mux *ht
|
||||
})
|
||||
return &PluginConfig{
|
||||
Config: &plugin.Config{
|
||||
MinionLister: nil,
|
||||
NodeLister: nil,
|
||||
Algorithm: &kubeScheduler{
|
||||
api: kapi,
|
||||
podUpdates: podUpdates,
|
||||
@ -741,7 +741,7 @@ func (s *schedulingPlugin) Run(done <-chan struct{}) {
|
||||
func (s *schedulingPlugin) scheduleOne() {
|
||||
pod := s.config.NextPod()
|
||||
log.V(3).Infof("Attempting to schedule: %+v", pod)
|
||||
dest, err := s.config.Algorithm.Schedule(pod, s.config.MinionLister) // call kubeScheduler.Schedule
|
||||
dest, err := s.config.Algorithm.Schedule(pod, s.config.NodeLister) // call kubeScheduler.Schedule
|
||||
if err != nil {
|
||||
log.V(1).Infof("Failed to schedule: %+v", pod)
|
||||
s.config.Recorder.Eventf(pod, "FailedScheduling", "Error scheduling: %v", err)
|
||||
|
@ -25,7 +25,7 @@ import (
|
||||
// ValidateEvent makes sure that the event makes sense.
|
||||
func ValidateEvent(event *api.Event) errs.ValidationErrorList {
|
||||
allErrs := errs.ValidationErrorList{}
|
||||
// TODO: There is no namespace required for minion
|
||||
// TODO: There is no namespace required for node.
|
||||
if event.InvolvedObject.Kind != "Node" &&
|
||||
event.Namespace != event.InvolvedObject.Namespace {
|
||||
allErrs = append(allErrs, errs.NewFieldInvalid("involvedObject.namespace", event.InvolvedObject.Namespace, "namespace does not match involvedObject"))
|
||||
|
@ -44,7 +44,7 @@ type policy struct {
|
||||
// providers are in use. Either add "Realm", or assume "user@example.com"
|
||||
// format.
|
||||
|
||||
// TODO: Make the "cluster" Kinds be one API group (minions, bindings,
|
||||
// TODO: Make the "cluster" Kinds be one API group (nodes, bindings,
|
||||
// events, endpoints). The "user" Kinds are another (pods, services,
|
||||
// replicationControllers, operations) Make a "plugin", e.g. build
|
||||
// controller, be another group. That way when we add a new object to a
|
||||
|
2
pkg/client/cache/doc.go
vendored
2
pkg/client/cache/doc.go
vendored
@ -18,7 +18,7 @@ limitations under the License.
|
||||
// reducing the number of server calls you'd otherwise need to make.
|
||||
// Reflector watches a server and updates a Store. Two stores are provided;
|
||||
// one that simply caches objects (for example, to allow a scheduler to
|
||||
// list currently available minions), and one that additionally acts as
|
||||
// list currently available nodes), and one that additionally acts as
|
||||
// a FIFO queue (for example, to allow a scheduler to process incoming
|
||||
// pods).
|
||||
package cache
|
||||
|
10
pkg/client/cache/listers.go
vendored
10
pkg/client/cache/listers.go
vendored
@ -158,19 +158,19 @@ func (s storeToNodeConditionLister) List() (nodes api.NodeList, err error) {
|
||||
|
||||
// TODO Move this back to scheduler as a helper function that takes a Store,
|
||||
// rather than a method of StoreToNodeLister.
|
||||
// GetNodeInfo returns cached data for the minion 'id'.
|
||||
// GetNodeInfo returns cached data for the node 'id'.
|
||||
func (s *StoreToNodeLister) GetNodeInfo(id string) (*api.Node, error) {
|
||||
minion, exists, err := s.Get(&api.Node{ObjectMeta: api.ObjectMeta{Name: id}})
|
||||
node, exists, err := s.Get(&api.Node{ObjectMeta: api.ObjectMeta{Name: id}})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving minion '%v' from cache: %v", id, err)
|
||||
return nil, fmt.Errorf("error retrieving node '%v' from cache: %v", id, err)
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("minion '%v' is not in cache", id)
|
||||
return nil, fmt.Errorf("node '%v' is not in cache", id)
|
||||
}
|
||||
|
||||
return minion.(*api.Node), nil
|
||||
return node.(*api.Node), nil
|
||||
}
|
||||
|
||||
// StoreToReplicationControllerLister gives a store List and Exists methods. The store must contain only ReplicationControllers.
|
||||
|
2
pkg/client/cache/listers_test.go
vendored
2
pkg/client/cache/listers_test.go
vendored
@ -25,7 +25,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
func TestStoreToMinionLister(t *testing.T) {
|
||||
func TestStoreToNodeLister(t *testing.T) {
|
||||
store := NewStore(MetaNamespaceKeyFunc)
|
||||
ids := sets.NewString("foo", "bar", "baz")
|
||||
for id := range ids {
|
||||
|
16
pkg/client/cache/listwatch_test.go
vendored
16
pkg/client/cache/listwatch_test.go
vendored
@ -61,10 +61,10 @@ func TestListWatchesCanList(t *testing.T) {
|
||||
namespace string
|
||||
fieldSelector fields.Selector
|
||||
}{
|
||||
// Minion
|
||||
// Node
|
||||
{
|
||||
location: testapi.Default.ResourcePath("minions", api.NamespaceAll, ""),
|
||||
resource: "minions",
|
||||
location: testapi.Default.ResourcePath("nodes", api.NamespaceAll, ""),
|
||||
resource: "nodes",
|
||||
namespace: api.NamespaceAll,
|
||||
fieldSelector: parseSelectorOrDie(""),
|
||||
},
|
||||
@ -112,22 +112,22 @@ func TestListWatchesCanWatch(t *testing.T) {
|
||||
namespace string
|
||||
fieldSelector fields.Selector
|
||||
}{
|
||||
// Minion
|
||||
// Node
|
||||
{
|
||||
location: buildLocation(
|
||||
testapi.Default.ResourcePathWithPrefix("watch", "minions", api.NamespaceAll, ""),
|
||||
testapi.Default.ResourcePathWithPrefix("watch", "nodes", api.NamespaceAll, ""),
|
||||
buildQueryValues(url.Values{"resourceVersion": []string{""}})),
|
||||
rv: "",
|
||||
resource: "minions",
|
||||
resource: "nodes",
|
||||
namespace: api.NamespaceAll,
|
||||
fieldSelector: parseSelectorOrDie(""),
|
||||
},
|
||||
{
|
||||
location: buildLocation(
|
||||
testapi.Default.ResourcePathWithPrefix("watch", "minions", api.NamespaceAll, ""),
|
||||
testapi.Default.ResourcePathWithPrefix("watch", "nodes", api.NamespaceAll, ""),
|
||||
buildQueryValues(url.Values{"resourceVersion": []string{"42"}})),
|
||||
rv: "42",
|
||||
resource: "minions",
|
||||
resource: "nodes",
|
||||
namespace: api.NamespaceAll,
|
||||
fieldSelector: parseSelectorOrDie(""),
|
||||
},
|
||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
||||
/*
|
||||
Package client contains the implementation of the client side communication with the
|
||||
Kubernetes master. The Client class provides methods for reading, creating, updating,
|
||||
and deleting pods, replication controllers, daemons, services, and minions.
|
||||
and deleting pods, replication controllers, daemons, services, and nodes.
|
||||
|
||||
Most consumers should use the Config object to create a Client:
|
||||
|
||||
|
@ -31,7 +31,7 @@ func getNodesResourceName() string {
|
||||
return "nodes"
|
||||
}
|
||||
|
||||
func TestListMinions(t *testing.T) {
|
||||
func TestListNodes(t *testing.T) {
|
||||
c := &testClient{
|
||||
Request: testRequest{
|
||||
Method: "GET",
|
||||
@ -43,7 +43,7 @@ func TestListMinions(t *testing.T) {
|
||||
c.Validate(t, response, err)
|
||||
}
|
||||
|
||||
func TestListMinionsLabels(t *testing.T) {
|
||||
func TestListNodesLabels(t *testing.T) {
|
||||
labelSelectorQueryParamName := api.LabelSelectorQueryParam(testapi.Default.Version())
|
||||
c := &testClient{
|
||||
Request: testRequest{
|
||||
@ -73,19 +73,19 @@ func TestListMinionsLabels(t *testing.T) {
|
||||
c.Validate(t, receivedNodeList, err)
|
||||
}
|
||||
|
||||
func TestGetMinion(t *testing.T) {
|
||||
func TestGetNode(t *testing.T) {
|
||||
c := &testClient{
|
||||
Request: testRequest{
|
||||
Method: "GET",
|
||||
Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "1"),
|
||||
},
|
||||
Response: Response{StatusCode: 200, Body: &api.Node{ObjectMeta: api.ObjectMeta{Name: "minion-1"}}},
|
||||
Response: Response{StatusCode: 200, Body: &api.Node{ObjectMeta: api.ObjectMeta{Name: "node-1"}}},
|
||||
}
|
||||
response, err := c.Setup(t).Nodes().Get("1")
|
||||
c.Validate(t, response, err)
|
||||
}
|
||||
|
||||
func TestGetMinionWithNoName(t *testing.T) {
|
||||
func TestGetNodeWithNoName(t *testing.T) {
|
||||
c := &testClient{Error: true}
|
||||
receivedNode, err := c.Setup(t).Nodes().Get("")
|
||||
if (err != nil) && (err.Error() != nameRequiredError) {
|
||||
@ -95,10 +95,10 @@ func TestGetMinionWithNoName(t *testing.T) {
|
||||
c.Validate(t, receivedNode, err)
|
||||
}
|
||||
|
||||
func TestCreateMinion(t *testing.T) {
|
||||
requestMinion := &api.Node{
|
||||
func TestCreateNode(t *testing.T) {
|
||||
requestNode := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "minion-1",
|
||||
Name: "node-1",
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
@ -114,17 +114,17 @@ func TestCreateMinion(t *testing.T) {
|
||||
Request: testRequest{
|
||||
Method: "POST",
|
||||
Path: testapi.Default.ResourcePath(getNodesResourceName(), "", ""),
|
||||
Body: requestMinion},
|
||||
Body: requestNode},
|
||||
Response: Response{
|
||||
StatusCode: 200,
|
||||
Body: requestMinion,
|
||||
Body: requestNode,
|
||||
},
|
||||
}
|
||||
receivedMinion, err := c.Setup(t).Nodes().Create(requestMinion)
|
||||
c.Validate(t, receivedMinion, err)
|
||||
receivedNode, err := c.Setup(t).Nodes().Create(requestNode)
|
||||
c.Validate(t, receivedNode, err)
|
||||
}
|
||||
|
||||
func TestDeleteMinion(t *testing.T) {
|
||||
func TestDeleteNode(t *testing.T) {
|
||||
c := &testClient{
|
||||
Request: testRequest{
|
||||
Method: "DELETE",
|
||||
@ -136,8 +136,8 @@ func TestDeleteMinion(t *testing.T) {
|
||||
c.Validate(t, nil, err)
|
||||
}
|
||||
|
||||
func TestUpdateMinion(t *testing.T) {
|
||||
requestMinion := &api.Node{
|
||||
func TestUpdateNode(t *testing.T) {
|
||||
requestNode := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "foo",
|
||||
ResourceVersion: "1",
|
||||
@ -157,8 +157,8 @@ func TestUpdateMinion(t *testing.T) {
|
||||
Method: "PUT",
|
||||
Path: testapi.Default.ResourcePath(getNodesResourceName(), "", "foo"),
|
||||
},
|
||||
Response: Response{StatusCode: 200, Body: requestMinion},
|
||||
Response: Response{StatusCode: 200, Body: requestNode},
|
||||
}
|
||||
response, err := c.Setup(t).Nodes().Update(requestMinion)
|
||||
response, err := c.Setup(t).Nodes().Update(requestNode)
|
||||
c.Validate(t, response, err)
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
// FakeNodes implements MinionInterface. Meant to be embedded into a struct to get a default
|
||||
// FakeNodes implements NodeInterface. Meant to be embedded into a struct to get a default
|
||||
// implementation. This makes faking out just the method you want to test easier.
|
||||
type FakeNodes struct {
|
||||
Fake *Fake
|
||||
@ -47,8 +47,8 @@ func (c *FakeNodes) List(label labels.Selector, field fields.Selector) (*api.Nod
|
||||
return obj.(*api.NodeList), err
|
||||
}
|
||||
|
||||
func (c *FakeNodes) Create(minion *api.Node) (*api.Node, error) {
|
||||
obj, err := c.Fake.Invokes(NewRootCreateAction("nodes", minion), minion)
|
||||
func (c *FakeNodes) Create(node *api.Node) (*api.Node, error) {
|
||||
obj, err := c.Fake.Invokes(NewRootCreateAction("nodes", node), node)
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -56,8 +56,8 @@ func (c *FakeNodes) Create(minion *api.Node) (*api.Node, error) {
|
||||
return obj.(*api.Node), err
|
||||
}
|
||||
|
||||
func (c *FakeNodes) Update(minion *api.Node) (*api.Node, error) {
|
||||
obj, err := c.Fake.Invokes(NewRootUpdateAction("nodes", minion), minion)
|
||||
func (c *FakeNodes) Update(node *api.Node) (*api.Node, error) {
|
||||
obj, err := c.Fake.Invokes(NewRootUpdateAction("nodes", node), node)
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -74,14 +74,14 @@ func (c *FakeNodes) Watch(label labels.Selector, field fields.Selector, resource
|
||||
return c.Fake.InvokesWatch(NewRootWatchAction("nodes", label, field, resourceVersion))
|
||||
}
|
||||
|
||||
func (c *FakeNodes) UpdateStatus(minion *api.Node) (*api.Node, error) {
|
||||
func (c *FakeNodes) UpdateStatus(node *api.Node) (*api.Node, error) {
|
||||
action := CreateActionImpl{}
|
||||
action.Verb = "update"
|
||||
action.Resource = "nodes"
|
||||
action.Subresource = "status"
|
||||
action.Object = minion
|
||||
action.Object = node
|
||||
|
||||
obj, err := c.Fake.Invokes(action, minion)
|
||||
obj, err := c.Fake.Invokes(action, node)
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -23,16 +23,16 @@ import (
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
)
|
||||
|
||||
// MinionLister interface represents anything that can list minions for a scheduler.
|
||||
type MinionLister interface {
|
||||
// NodeLister interface represents anything that can list nodes for a scheduler.
|
||||
type NodeLister interface {
|
||||
List() (list api.NodeList, err error)
|
||||
}
|
||||
|
||||
// FakeMinionLister implements MinionLister on a []string for test purposes.
|
||||
type FakeMinionLister api.NodeList
|
||||
// FakeNodeLister implements NodeLister on a []string for test purposes.
|
||||
type FakeNodeLister api.NodeList
|
||||
|
||||
// List returns minions as a []string.
|
||||
func (f FakeMinionLister) List() (api.NodeList, error) {
|
||||
// List returns nodes as a []string.
|
||||
func (f FakeNodeLister) List() (api.NodeList, error) {
|
||||
return api.NodeList(f), nil
|
||||
}
|
||||
|
||||
|
@ -203,12 +203,12 @@ type NodeSelector struct {
|
||||
info NodeInfo
|
||||
}
|
||||
|
||||
func (n *NodeSelector) PodSelectorMatches(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||
minion, err := n.info.GetNodeInfo(node)
|
||||
func (n *NodeSelector) PodSelectorMatches(pod *api.Pod, existingPods []*api.Pod, nodeID string) (bool, error) {
|
||||
node, err := n.info.GetNodeInfo(nodeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return PodMatchesNodeLabels(pod, minion), nil
|
||||
return PodMatchesNodeLabels(pod, node), nil
|
||||
}
|
||||
|
||||
func PodFitsHost(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||
@ -233,27 +233,27 @@ func NewNodeLabelPredicate(info NodeInfo, labels []string, presence bool) algori
|
||||
return labelChecker.CheckNodeLabelPresence
|
||||
}
|
||||
|
||||
// CheckNodeLabelPresence checks whether all of the specified labels exists on a minion or not, regardless of their value
|
||||
// If "presence" is false, then returns false if any of the requested labels matches any of the minion's labels,
|
||||
// CheckNodeLabelPresence checks whether all of the specified labels exists on a node or not, regardless of their value
|
||||
// If "presence" is false, then returns false if any of the requested labels matches any of the node's labels,
|
||||
// otherwise returns true.
|
||||
// If "presence" is true, then returns false if any of the requested labels does not match any of the minion's labels,
|
||||
// If "presence" is true, then returns false if any of the requested labels does not match any of the node's labels,
|
||||
// otherwise returns true.
|
||||
//
|
||||
// Consider the cases where the minions are placed in regions/zones/racks and these are identified by labels
|
||||
// In some cases, it is required that only minions that are part of ANY of the defined regions/zones/racks be selected
|
||||
// Consider the cases where the nodes are placed in regions/zones/racks and these are identified by labels
|
||||
// In some cases, it is required that only nodes that are part of ANY of the defined regions/zones/racks be selected
|
||||
//
|
||||
// Alternately, eliminating minions that have a certain label, regardless of value, is also useful
|
||||
// A minion may have a label with "retiring" as key and the date as the value
|
||||
// and it may be desirable to avoid scheduling new pods on this minion
|
||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
|
||||
// A node may have a label with "retiring" as key and the date as the value
|
||||
// and it may be desirable to avoid scheduling new pods on this node
|
||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, existingPods []*api.Pod, nodeID string) (bool, error) {
|
||||
var exists bool
|
||||
minion, err := n.info.GetNodeInfo(node)
|
||||
node, err := n.info.GetNodeInfo(nodeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
minionLabels := labels.Set(minion.Labels)
|
||||
nodeLabels := labels.Set(node.Labels)
|
||||
for _, label := range n.labels {
|
||||
exists = minionLabels.Has(label)
|
||||
exists = nodeLabels.Has(label)
|
||||
if (exists && !n.presence) || (!exists && n.presence) {
|
||||
return false, nil
|
||||
}
|
||||
@ -278,16 +278,16 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al
|
||||
return affinity.CheckServiceAffinity
|
||||
}
|
||||
|
||||
// CheckServiceAffinity ensures that only the minions that match the specified labels are considered for scheduling.
|
||||
// CheckServiceAffinity ensures that only the nodes that match the specified labels are considered for scheduling.
|
||||
// The set of labels to be considered are provided to the struct (ServiceAffinity).
|
||||
// The pod is checked for the labels and any missing labels are then checked in the minion
|
||||
// The pod is checked for the labels and any missing labels are then checked in the node
|
||||
// that hosts the service pods (peers) for the given pod.
|
||||
//
|
||||
// We add an implicit selector requiring some particular value V for label L to a pod, if:
|
||||
// - L is listed in the ServiceAffinity object that is passed into the function
|
||||
// - the pod does not have any NodeSelector for L
|
||||
// - some other pod from the same service is already scheduled onto a minion that has value V for label L
|
||||
func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||
// - some other pod from the same service is already scheduled onto a node that has value V for label L
|
||||
func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api.Pod, nodeID string) (bool, error) {
|
||||
var affinitySelector labels.Selector
|
||||
|
||||
// check if the pod being scheduled has the affinity labels specified in its NodeSelector
|
||||
@ -322,8 +322,8 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api
|
||||
}
|
||||
}
|
||||
if len(nsServicePods) > 0 {
|
||||
// consider any service pod and fetch the minion its hosted on
|
||||
otherMinion, err := s.nodeInfo.GetNodeInfo(nsServicePods[0].Spec.NodeName)
|
||||
// consider any service pod and fetch the node its hosted on
|
||||
otherNode, err := s.nodeInfo.GetNodeInfo(nsServicePods[0].Spec.NodeName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -332,28 +332,28 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, existingPods []*api
|
||||
if _, exists := affinityLabels[l]; exists {
|
||||
continue
|
||||
}
|
||||
if labels.Set(otherMinion.Labels).Has(l) {
|
||||
affinityLabels[l] = labels.Set(otherMinion.Labels).Get(l)
|
||||
if labels.Set(otherNode.Labels).Has(l) {
|
||||
affinityLabels[l] = labels.Set(otherNode.Labels).Get(l)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if there are no existing pods in the service, consider all minions
|
||||
// if there are no existing pods in the service, consider all nodes
|
||||
if len(affinityLabels) == 0 {
|
||||
affinitySelector = labels.Everything()
|
||||
} else {
|
||||
affinitySelector = labels.Set(affinityLabels).AsSelector()
|
||||
}
|
||||
|
||||
minion, err := s.nodeInfo.GetNodeInfo(node)
|
||||
node, err := s.nodeInfo.GetNodeInfo(nodeID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// check if the minion matches the selector
|
||||
return affinitySelector.Matches(labels.Set(minion.Labels)), nil
|
||||
// check if the node matches the selector
|
||||
return affinitySelector.Matches(labels.Set(node.Labels)), nil
|
||||
}
|
||||
|
||||
func PodFitsPorts(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
|
||||
|
@ -637,7 +637,7 @@ func TestServiceAffinity(t *testing.T) {
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
||||
fits: true,
|
||||
labels: []string{"region"},
|
||||
test: "service pod on same minion",
|
||||
test: "service pod on same node",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
||||
@ -646,7 +646,7 @@ func TestServiceAffinity(t *testing.T) {
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
||||
fits: true,
|
||||
labels: []string{"region"},
|
||||
test: "service pod on different minion, region match",
|
||||
test: "service pod on different node, region match",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
||||
@ -655,7 +655,7 @@ func TestServiceAffinity(t *testing.T) {
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
||||
fits: false,
|
||||
labels: []string{"region"},
|
||||
test: "service pod on different minion, region mismatch",
|
||||
test: "service pod on different node, region mismatch",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
||||
@ -691,7 +691,7 @@ func TestServiceAffinity(t *testing.T) {
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
||||
fits: false,
|
||||
labels: []string{"region", "zone"},
|
||||
test: "service pod on different minion, multiple labels, not all match",
|
||||
test: "service pod on different node, multiple labels, not all match",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
|
||||
@ -700,7 +700,7 @@ func TestServiceAffinity(t *testing.T) {
|
||||
services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
|
||||
fits: true,
|
||||
labels: []string{"region", "zone"},
|
||||
test: "service pod on different minion, multiple labels, all match",
|
||||
test: "service pod on different node, multiple labels, all match",
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al
|
||||
}
|
||||
}
|
||||
// Add the resources requested by the current pod being scheduled.
|
||||
// This also helps differentiate between differently sized, but empty, minions.
|
||||
// This also helps differentiate between differently sized, but empty, nodes.
|
||||
for _, container := range pod.Spec.Containers {
|
||||
cpu, memory := getNonzeroRequests(&container.Resources.Requests)
|
||||
totalMilliCPU += cpu
|
||||
@ -114,8 +114,8 @@ func calculateResourceOccupancy(pod *api.Pod, node api.Node, pods []*api.Pod) al
|
||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||
// based on the minimum of the average of the fraction of requested to capacity.
|
||||
// Details: cpu((capacity - sum(requested)) * 10 / capacity) + memory((capacity - sum(requested)) * 10 / capacity) / 2
|
||||
func LeastRequestedPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
nodes, err := minionLister.List()
|
||||
func LeastRequestedPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
return algorithm.HostPriorityList{}, err
|
||||
}
|
||||
@ -141,32 +141,32 @@ func NewNodeLabelPriority(label string, presence bool) algorithm.PriorityFunctio
|
||||
return labelPrioritizer.CalculateNodeLabelPriority
|
||||
}
|
||||
|
||||
// CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value.
|
||||
// If presence is true, prioritizes minions that have the specified label, regardless of value.
|
||||
// If presence is false, prioritizes minions that do not have the specified label.
|
||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
// CalculateNodeLabelPriority checks whether a particular label exists on a node or not, regardless of its value.
|
||||
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
|
||||
// If presence is false, prioritizes nodes that do not have the specified label.
|
||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
var score int
|
||||
minions, err := minionLister.List()
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
labeledMinions := map[string]bool{}
|
||||
for _, minion := range minions.Items {
|
||||
exists := labels.Set(minion.Labels).Has(n.label)
|
||||
labeledMinions[minion.Name] = (exists && n.presence) || (!exists && !n.presence)
|
||||
labeledNodes := map[string]bool{}
|
||||
for _, node := range nodes.Items {
|
||||
exists := labels.Set(node.Labels).Has(n.label)
|
||||
labeledNodes[node.Name] = (exists && n.presence) || (!exists && !n.presence)
|
||||
}
|
||||
|
||||
result := []algorithm.HostPriority{}
|
||||
//score int - scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
for minionName, success := range labeledMinions {
|
||||
for nodeName, success := range labeledNodes {
|
||||
if success {
|
||||
score = 10
|
||||
} else {
|
||||
score = 0
|
||||
}
|
||||
result = append(result, algorithm.HostPriority{Host: minionName, Score: score})
|
||||
result = append(result, algorithm.HostPriority{Host: nodeName, Score: score})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@ -177,8 +177,8 @@ func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod *api.Pod, podListe
|
||||
// close the two metrics are to each other.
|
||||
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
|
||||
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
|
||||
func BalancedResourceAllocation(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
nodes, err := minionLister.List()
|
||||
func BalancedResourceAllocation(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
return algorithm.HostPriorityList{}, err
|
||||
}
|
||||
@ -203,7 +203,7 @@ func calculateBalancedResourceAllocation(pod *api.Pod, node api.Node, pods []*ap
|
||||
}
|
||||
}
|
||||
// Add the resources requested by the current pod being scheduled.
|
||||
// This also helps differentiate between differently sized, but empty, minions.
|
||||
// This also helps differentiate between differently sized, but empty, nodes.
|
||||
for _, container := range pod.Spec.Containers {
|
||||
cpu, memory := getNonzeroRequests(&container.Resources.Requests)
|
||||
totalMilliCPU += cpu
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
)
|
||||
|
||||
func makeMinion(node string, milliCPU, memory int64) api.Node {
|
||||
func makeNode(node string, milliCPU, memory int64) api.Node {
|
||||
return api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: node},
|
||||
Status: api.NodeStatus{
|
||||
@ -96,7 +96,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
// and when the zero-request pod is the one being scheduled.
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryRequest*10), makeMinion("machine2", 1000, defaultMemoryRequest*10)},
|
||||
nodes: []api.Node{makeNode("machine1", 1000, defaultMemoryRequest*10), makeNode("machine2", 1000, defaultMemoryRequest*10)},
|
||||
test: "test priority of zero-request pod with machine with zero-request pod",
|
||||
pods: []*api.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
@ -105,7 +105,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: small},
|
||||
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryRequest*10), makeMinion("machine2", 1000, defaultMemoryRequest*10)},
|
||||
nodes: []api.Node{makeNode("machine1", 1000, defaultMemoryRequest*10), makeNode("machine2", 1000, defaultMemoryRequest*10)},
|
||||
test: "test priority of nonzero-request pod with machine with zero-request pod",
|
||||
pods: []*api.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
@ -115,7 +115,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
|
||||
{
|
||||
pod: &api.Pod{Spec: large},
|
||||
nodes: []api.Node{makeMinion("machine1", 1000, defaultMemoryRequest*10), makeMinion("machine2", 1000, defaultMemoryRequest*10)},
|
||||
nodes: []api.Node{makeNode("machine1", 1000, defaultMemoryRequest*10), makeNode("machine2", 1000, defaultMemoryRequest*10)},
|
||||
test: "test priority of larger pod with machine with zero-request pod",
|
||||
pods: []*api.Pod{
|
||||
{Spec: large1}, {Spec: noResources1},
|
||||
@ -133,7 +133,7 @@ func TestZeroRequest(t *testing.T) {
|
||||
// plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go if you want
|
||||
// to test what's actually in production.
|
||||
[]algorithm.PriorityConfig{{Function: LeastRequestedPriority, Weight: 1}, {Function: BalancedResourceAllocation, Weight: 1}, {Function: NewSelectorSpreadPriority(algorithm.FakeServiceLister([]api.Service{}), algorithm.FakeControllerLister([]api.ReplicationController{})), Weight: 1}},
|
||||
algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
|
||||
algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -222,52 +222,52 @@ func TestLeastRequested(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
/*
|
||||
Minion1 scores (remaining resources) on 0-10 scale
|
||||
Node1 scores (remaining resources) on 0-10 scale
|
||||
CPU Score: ((4000 - 0) *10) / 4000 = 10
|
||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||
Minion1 Score: (10 + 10) / 2 = 10
|
||||
Node1 Score: (10 + 10) / 2 = 10
|
||||
|
||||
Minion2 scores (remaining resources) on 0-10 scale
|
||||
Node2 scores (remaining resources) on 0-10 scale
|
||||
CPU Score: ((4000 - 0) *10) / 4000 = 10
|
||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||
Minion2 Score: (10 + 10) / 2 = 10
|
||||
Node2 Score: (10 + 10) / 2 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
Node1 scores on 0-10 scale
|
||||
CPU Score: ((4000 - 3000) *10) / 4000 = 2.5
|
||||
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
||||
Minion1 Score: (2.5 + 5) / 2 = 3
|
||||
Node1 Score: (2.5 + 5) / 2 = 3
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
Node2 scores on 0-10 scale
|
||||
CPU Score: ((6000 - 3000) *10) / 6000 = 5
|
||||
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
||||
Minion2 Score: (5 + 5) / 2 = 5
|
||||
Node2 Score: (5 + 5) / 2 = 5
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 3}, {"machine2", 5}},
|
||||
test: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
Node1 scores on 0-10 scale
|
||||
CPU Score: ((4000 - 0) *10) / 4000 = 10
|
||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||
Minion1 Score: (10 + 10) / 2 = 10
|
||||
Node1 Score: (10 + 10) / 2 = 10
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
Node2 scores on 0-10 scale
|
||||
CPU Score: ((4000 - 0) *10) / 4000 = 10
|
||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||
Minion2 Score: (10 + 10) / 2 = 10
|
||||
Node2 Score: (10 + 10) / 2 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "no resources requested, pods scheduled",
|
||||
pods: []*api.Pod{
|
||||
@ -279,18 +279,18 @@ func TestLeastRequested(t *testing.T) {
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
Node1 scores on 0-10 scale
|
||||
CPU Score: ((10000 - 6000) *10) / 10000 = 4
|
||||
Memory Score: ((20000 - 0) *10) / 20000 = 10
|
||||
Minion1 Score: (4 + 10) / 2 = 7
|
||||
Node1 Score: (4 + 10) / 2 = 7
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
Node2 scores on 0-10 scale
|
||||
CPU Score: ((10000 - 6000) *10) / 10000 = 4
|
||||
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
|
||||
Minion2 Score: (4 + 7.5) / 2 = 5
|
||||
Node2 Score: (4 + 7.5) / 2 = 5
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 5}},
|
||||
test: "no resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
@ -302,18 +302,18 @@ func TestLeastRequested(t *testing.T) {
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
Node1 scores on 0-10 scale
|
||||
CPU Score: ((10000 - 6000) *10) / 10000 = 4
|
||||
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
|
||||
Minion1 Score: (4 + 7.5) / 2 = 5
|
||||
Node1 Score: (4 + 7.5) / 2 = 5
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
Node2 scores on 0-10 scale
|
||||
CPU Score: ((10000 - 6000) *10) / 10000 = 4
|
||||
Memory Score: ((20000 - 10000) *10) / 20000 = 5
|
||||
Minion2 Score: (4 + 5) / 2 = 4
|
||||
Node2 Score: (4 + 5) / 2 = 4
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 4}},
|
||||
test: "resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
@ -323,18 +323,18 @@ func TestLeastRequested(t *testing.T) {
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
Node1 scores on 0-10 scale
|
||||
CPU Score: ((10000 - 6000) *10) / 10000 = 4
|
||||
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
|
||||
Minion1 Score: (4 + 7.5) / 2 = 5
|
||||
Node1 Score: (4 + 7.5) / 2 = 5
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
Node2 scores on 0-10 scale
|
||||
CPU Score: ((10000 - 6000) *10) / 10000 = 4
|
||||
Memory Score: ((50000 - 10000) *10) / 50000 = 8
|
||||
Minion2 Score: (4 + 8) / 2 = 6
|
||||
Node2 Score: (4 + 8) / 2 = 6
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 6}},
|
||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
pods: []*api.Pod{
|
||||
@ -344,20 +344,20 @@ func TestLeastRequested(t *testing.T) {
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
Node1 scores on 0-10 scale
|
||||
CPU Score: ((4000 - 6000) *10) / 4000 = 0
|
||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||
Minion1 Score: (0 + 10) / 2 = 5
|
||||
Node1 Score: (0 + 10) / 2 = 5
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
Node2 scores on 0-10 scale
|
||||
CPU Score: ((4000 - 6000) *10) / 4000 = 0
|
||||
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
||||
Minion2 Score: (0 + 5) / 2 = 2
|
||||
Node2 Score: (0 + 5) / 2 = 2
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuOnly},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 5}, {"machine2", 2}},
|
||||
test: "requested resources exceed minion capacity",
|
||||
test: "requested resources exceed node capacity",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
@ -365,9 +365,9 @@ func TestLeastRequested(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
|
||||
nodes: []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "zero minion resources, pods scheduled with resources",
|
||||
test: "zero node resources, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
@ -376,7 +376,7 @@ func TestLeastRequested(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
list, err := LeastRequestedPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
|
||||
list, err := LeastRequestedPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -470,7 +470,7 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
label: test.label,
|
||||
presence: test.presence,
|
||||
}
|
||||
list, err := prioritizer.CalculateNodeLabelPriority(nil, nil, algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
|
||||
list, err := prioritizer.CalculateNodeLabelPriority(nil, nil, algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -554,52 +554,52 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
/*
|
||||
Minion1 scores (remaining resources) on 0-10 scale
|
||||
Node1 scores (remaining resources) on 0-10 scale
|
||||
CPU Fraction: 0 / 4000 = 0%
|
||||
Memory Fraction: 0 / 10000 = 0%
|
||||
Minion1 Score: 10 - (0-0)*10 = 10
|
||||
Node1 Score: 10 - (0-0)*10 = 10
|
||||
|
||||
Minion2 scores (remaining resources) on 0-10 scale
|
||||
Node2 scores (remaining resources) on 0-10 scale
|
||||
CPU Fraction: 0 / 4000 = 0 %
|
||||
Memory Fraction: 0 / 10000 = 0%
|
||||
Minion2 Score: 10 - (0-0)*10 = 10
|
||||
Node2 Score: 10 - (0-0)*10 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
Node1 scores on 0-10 scale
|
||||
CPU Fraction: 3000 / 4000= 75%
|
||||
Memory Fraction: 5000 / 10000 = 50%
|
||||
Minion1 Score: 10 - (0.75-0.5)*10 = 7
|
||||
Node1 Score: 10 - (0.75-0.5)*10 = 7
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
Node2 scores on 0-10 scale
|
||||
CPU Fraction: 3000 / 6000= 50%
|
||||
Memory Fraction: 5000/10000 = 50%
|
||||
Minion2 Score: 10 - (0.5-0.5)*10 = 10
|
||||
Node2 Score: 10 - (0.5-0.5)*10 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 7}, {"machine2", 10}},
|
||||
test: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
Node1 scores on 0-10 scale
|
||||
CPU Fraction: 0 / 4000= 0%
|
||||
Memory Fraction: 0 / 10000 = 0%
|
||||
Minion1 Score: 10 - (0-0)*10 = 10
|
||||
Node1 Score: 10 - (0-0)*10 = 10
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
Node2 scores on 0-10 scale
|
||||
CPU Fraction: 0 / 4000= 0%
|
||||
Memory Fraction: 0 / 10000 = 0%
|
||||
Minion2 Score: 10 - (0-0)*10 = 10
|
||||
Node2 Score: 10 - (0-0)*10 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 10}, {"machine2", 10}},
|
||||
test: "no resources requested, pods scheduled",
|
||||
pods: []*api.Pod{
|
||||
@ -611,18 +611,18 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
Node1 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 10000 = 60%
|
||||
Memory Fraction: 0 / 20000 = 0%
|
||||
Minion1 Score: 10 - (0.6-0)*10 = 4
|
||||
Node1 Score: 10 - (0.6-0)*10 = 4
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
Node2 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 10000 = 60%
|
||||
Memory Fraction: 5000 / 20000 = 25%
|
||||
Minion2 Score: 10 - (0.6-0.25)*10 = 6
|
||||
Node2 Score: 10 - (0.6-0.25)*10 = 6
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 4}, {"machine2", 6}},
|
||||
test: "no resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
@ -634,18 +634,18 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
Node1 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 10000 = 60%
|
||||
Memory Fraction: 5000 / 20000 = 25%
|
||||
Minion1 Score: 10 - (0.6-0.25)*10 = 6
|
||||
Node1 Score: 10 - (0.6-0.25)*10 = 6
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
Node2 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 10000 = 60%
|
||||
Memory Fraction: 10000 / 20000 = 50%
|
||||
Minion2 Score: 10 - (0.6-0.5)*10 = 9
|
||||
Node2 Score: 10 - (0.6-0.5)*10 = 9
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 9}},
|
||||
test: "resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
@ -655,18 +655,18 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
Node1 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 10000 = 60%
|
||||
Memory Fraction: 5000 / 20000 = 25%
|
||||
Minion1 Score: 10 - (0.6-0.25)*10 = 6
|
||||
Node1 Score: 10 - (0.6-0.25)*10 = 6
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
Node2 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 10000 = 60%
|
||||
Memory Fraction: 10000 / 50000 = 20%
|
||||
Minion2 Score: 10 - (0.6-0.2)*10 = 6
|
||||
Node2 Score: 10 - (0.6-0.2)*10 = 6
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
|
||||
nodes: []api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 6}, {"machine2", 6}},
|
||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
pods: []*api.Pod{
|
||||
@ -676,20 +676,20 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
/*
|
||||
Minion1 scores on 0-10 scale
|
||||
Node1 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
|
||||
Memory Fraction: 0 / 10000 = 0
|
||||
Minion1 Score: 0
|
||||
Node1 Score: 0
|
||||
|
||||
Minion2 scores on 0-10 scale
|
||||
Node2 scores on 0-10 scale
|
||||
CPU Fraction: 6000 / 4000 > 100% ==> Score := 0
|
||||
Memory Fraction 5000 / 10000 = 50%
|
||||
Minion2 Score: 0
|
||||
Node2 Score: 0
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuOnly},
|
||||
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
|
||||
nodes: []api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "requested resources exceed minion capacity",
|
||||
test: "requested resources exceed node capacity",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
@ -697,9 +697,9 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
|
||||
nodes: []api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
expectedList: []algorithm.HostPriority{{"machine1", 0}, {"machine2", 0}},
|
||||
test: "zero minion resources, pods scheduled with resources",
|
||||
test: "zero node resources, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
@ -708,7 +708,7 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
list, err := BalancedResourceAllocation(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(api.NodeList{Items: test.nodes}))
|
||||
list, err := BalancedResourceAllocation(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeNodeLister(api.NodeList{Items: test.nodes}))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ func NewSelectorSpreadPriority(serviceLister algorithm.ServiceLister, controller
|
||||
// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service or replication controller. It counts number of pods that run under
|
||||
// Services or RCs as the pod being scheduled and tries to minimize the number of conflicts. I.e. pushes scheduler towards a Node where there's a smallest number of
|
||||
// pods which match the same selectors of Services and RCs as current pod.
|
||||
func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
var maxCount int
|
||||
var nsPods []*api.Pod
|
||||
|
||||
@ -70,7 +70,7 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorit
|
||||
}
|
||||
}
|
||||
|
||||
minions, err := minionLister.List()
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -87,7 +87,7 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorit
|
||||
}
|
||||
if matches {
|
||||
counts[pod.Spec.NodeName]++
|
||||
// Compute the maximum number of pods hosted on any minion
|
||||
// Compute the maximum number of pods hosted on any node
|
||||
if counts[pod.Spec.NodeName] > maxCount {
|
||||
maxCount = counts[pod.Spec.NodeName]
|
||||
}
|
||||
@ -98,15 +98,15 @@ func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, podLister algorit
|
||||
result := []algorithm.HostPriority{}
|
||||
//score int - scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
for _, minion := range minions.Items {
|
||||
// initializing to the default/max minion score of 10
|
||||
for _, node := range nodes.Items {
|
||||
// initializing to the default/max node score of 10
|
||||
fScore := float32(10)
|
||||
if maxCount > 0 {
|
||||
fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
|
||||
fScore = 10 * (float32(maxCount-counts[node.Name]) / float32(maxCount))
|
||||
}
|
||||
result = append(result, algorithm.HostPriority{Host: minion.Name, Score: int(fScore)})
|
||||
result = append(result, algorithm.HostPriority{Host: node.Name, Score: int(fScore)})
|
||||
glog.V(10).Infof(
|
||||
"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, minion.Name, int(fScore),
|
||||
"%v -> %v: SelectorSpreadPriority, Score: (%d)", pod.Name, node.Name, int(fScore),
|
||||
)
|
||||
}
|
||||
return result, nil
|
||||
@ -128,7 +128,7 @@ func NewServiceAntiAffinityPriority(serviceLister algorithm.ServiceLister, label
|
||||
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
|
||||
// on machines with the same value for a particular label.
|
||||
// The label to be considered is provided to the struct (ServiceAntiAffinity).
|
||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
var nsServicePods []*api.Pod
|
||||
|
||||
services, err := s.serviceLister.GetPodServices(pod)
|
||||
@ -148,26 +148,26 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLis
|
||||
}
|
||||
}
|
||||
|
||||
minions, err := minionLister.List()
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// separate out the minions that have the label from the ones that don't
|
||||
otherMinions := []string{}
|
||||
labeledMinions := map[string]string{}
|
||||
for _, minion := range minions.Items {
|
||||
if labels.Set(minion.Labels).Has(s.label) {
|
||||
label := labels.Set(minion.Labels).Get(s.label)
|
||||
labeledMinions[minion.Name] = label
|
||||
// separate out the nodes that have the label from the ones that don't
|
||||
otherNodes := []string{}
|
||||
labeledNodes := map[string]string{}
|
||||
for _, node := range nodes.Items {
|
||||
if labels.Set(node.Labels).Has(s.label) {
|
||||
label := labels.Set(node.Labels).Get(s.label)
|
||||
labeledNodes[node.Name] = label
|
||||
} else {
|
||||
otherMinions = append(otherMinions, minion.Name)
|
||||
otherNodes = append(otherNodes, node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
podCounts := map[string]int{}
|
||||
for _, pod := range nsServicePods {
|
||||
label, exists := labeledMinions[pod.Spec.NodeName]
|
||||
label, exists := labeledNodes[pod.Spec.NodeName]
|
||||
if !exists {
|
||||
continue
|
||||
}
|
||||
@ -178,17 +178,17 @@ func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, podLis
|
||||
result := []algorithm.HostPriority{}
|
||||
//score int - scale of 0-10
|
||||
// 0 being the lowest priority and 10 being the highest
|
||||
for minion := range labeledMinions {
|
||||
// initializing to the default/max minion score of 10
|
||||
for node := range labeledNodes {
|
||||
// initializing to the default/max node score of 10
|
||||
fScore := float32(10)
|
||||
if numServicePods > 0 {
|
||||
fScore = 10 * (float32(numServicePods-podCounts[labeledMinions[minion]]) / float32(numServicePods))
|
||||
fScore = 10 * (float32(numServicePods-podCounts[labeledNodes[node]]) / float32(numServicePods))
|
||||
}
|
||||
result = append(result, algorithm.HostPriority{Host: minion, Score: int(fScore)})
|
||||
result = append(result, algorithm.HostPriority{Host: node, Score: int(fScore)})
|
||||
}
|
||||
// add the open minions with a score of 0
|
||||
for _, minion := range otherMinions {
|
||||
result = append(result, algorithm.HostPriority{Host: minion, Score: 0})
|
||||
// add the open nodes with a score of 0
|
||||
for _, node := range otherNodes {
|
||||
result = append(result, algorithm.HostPriority{Host: node, Score: 0})
|
||||
}
|
||||
|
||||
return result, nil
|
||||
|
@ -217,7 +217,7 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
|
||||
for _, test := range tests {
|
||||
selectorSpread := SelectorSpread{serviceLister: algorithm.FakeServiceLister(test.services), controllerLister: algorithm.FakeControllerLister(test.rcs)}
|
||||
list, err := selectorSpread.CalculateSpreadPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeNodeList(test.nodes)))
|
||||
list, err := selectorSpread.CalculateSpreadPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeNodeLister(makeNodeList(test.nodes)))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -379,13 +379,13 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
expectedList: []algorithm.HostPriority{{"machine11", 7}, {"machine12", 7},
|
||||
{"machine21", 5}, {"machine22", 5},
|
||||
{"machine01", 0}, {"machine02", 0}},
|
||||
test: "service pod on non-zoned minion",
|
||||
test: "service pod on non-zoned node",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
zoneSpread := ServiceAntiAffinity{serviceLister: algorithm.FakeServiceLister(test.services), label: "zone"}
|
||||
list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeMinionLister(makeLabeledMinionList(test.nodes)))
|
||||
list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, algorithm.FakePodLister(test.pods), algorithm.FakeNodeLister(makeLabeledNodeList(test.nodes)))
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
@ -398,7 +398,7 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeLabeledMinionList(nodeMap map[string]map[string]string) (result api.NodeList) {
|
||||
func makeLabeledNodeList(nodeMap map[string]map[string]string) (result api.NodeList) {
|
||||
nodes := []api.Node{}
|
||||
for nodeName, labels := range nodeMap {
|
||||
nodes = append(nodes, api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName, Labels: labels}})
|
||||
|
@ -23,5 +23,5 @@ import (
|
||||
// Scheduler is an interface implemented by things that know how to schedule pods
|
||||
// onto machines.
|
||||
type ScheduleAlgorithm interface {
|
||||
Schedule(*api.Pod, MinionLister) (selectedMachine string, err error)
|
||||
Schedule(*api.Pod, NodeLister) (selectedMachine string, err error)
|
||||
}
|
||||
|
@ -25,14 +25,14 @@ import (
|
||||
// Some functions used by multiple scheduler tests.
|
||||
|
||||
type schedulerTester struct {
|
||||
t *testing.T
|
||||
scheduler ScheduleAlgorithm
|
||||
minionLister MinionLister
|
||||
t *testing.T
|
||||
scheduler ScheduleAlgorithm
|
||||
nodeLister NodeLister
|
||||
}
|
||||
|
||||
// Call if you know exactly where pod should get scheduled.
|
||||
func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) {
|
||||
actual, err := st.scheduler.Schedule(pod, st.minionLister)
|
||||
actual, err := st.scheduler.Schedule(pod, st.nodeLister)
|
||||
if err != nil {
|
||||
st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod)
|
||||
return
|
||||
@ -44,7 +44,7 @@ func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) {
|
||||
|
||||
// Call if you can't predict where pod will be scheduled.
|
||||
func (st *schedulerTester) expectSuccess(pod *api.Pod) {
|
||||
_, err := st.scheduler.Schedule(pod, st.minionLister)
|
||||
_, err := st.scheduler.Schedule(pod, st.nodeLister)
|
||||
if err != nil {
|
||||
st.t.Errorf("Unexpected error %v\nTried to scheduler: %#v", err, pod)
|
||||
return
|
||||
@ -53,7 +53,7 @@ func (st *schedulerTester) expectSuccess(pod *api.Pod) {
|
||||
|
||||
// Call if pod should *not* schedule.
|
||||
func (st *schedulerTester) expectFailure(pod *api.Pod) {
|
||||
_, err := st.scheduler.Schedule(pod, st.minionLister)
|
||||
_, err := st.scheduler.Schedule(pod, st.nodeLister)
|
||||
if err == nil {
|
||||
st.t.Error("Unexpected non-error")
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ func (h HostPriorityList) Swap(i, j int) {
|
||||
h[i], h[j] = h[j], h[i]
|
||||
}
|
||||
|
||||
type PriorityFunction func(pod *api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error)
|
||||
type PriorityFunction func(pod *api.Pod, podLister PodLister, nodeLister NodeLister) (HostPriorityList, error)
|
||||
|
||||
type PriorityConfig struct {
|
||||
Function PriorityFunction
|
||||
|
@ -28,7 +28,7 @@ import (
|
||||
|
||||
func init() {
|
||||
factory.RegisterAlgorithmProvider(factory.DefaultProvider, defaultPredicates(), defaultPriorities())
|
||||
// EqualPriority is a prioritizer function that gives an equal weight of one to all minions
|
||||
// EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
|
||||
// Register the priority function so that its available
|
||||
// but do not include it as part of the default priorities
|
||||
factory.RegisterPriorityFunction("EqualPriority", scheduler.EqualPriority, 1)
|
||||
|
@ -42,7 +42,7 @@ type PriorityPolicy struct {
|
||||
// For a custom priority, the name can be user-defined
|
||||
// For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function
|
||||
Name string `json:"name"`
|
||||
// The numeric multiplier for the minion scores that the priority function generates
|
||||
// The numeric multiplier for the node scores that the priority function generates
|
||||
// The weight should be non-zero and can be a positive or a negative integer
|
||||
Weight int `json:"weight"`
|
||||
// Holds the parameters to configure the given priority function
|
||||
@ -53,9 +53,9 @@ type PriorityPolicy struct {
|
||||
// Only one of its members may be specified
|
||||
type PredicateArgument struct {
|
||||
// The predicate that provides affinity for pods belonging to a service
|
||||
// It uses a label to identify minions that belong to the same "group"
|
||||
// It uses a label to identify nodes that belong to the same "group"
|
||||
ServiceAffinity *ServiceAffinity `json:"serviceAffinity"`
|
||||
// The predicate that checks whether a particular minion has a certain label
|
||||
// The predicate that checks whether a particular node has a certain label
|
||||
// defined or not, regardless of value
|
||||
LabelsPresence *LabelsPresence `json:"labelsPresence"`
|
||||
}
|
||||
@ -64,41 +64,41 @@ type PredicateArgument struct {
|
||||
// Only one of its members may be specified
|
||||
type PriorityArgument struct {
|
||||
// The priority function that ensures a good spread (anti-affinity) for pods belonging to a service
|
||||
// It uses a label to identify minions that belong to the same "group"
|
||||
// It uses a label to identify nodes that belong to the same "group"
|
||||
ServiceAntiAffinity *ServiceAntiAffinity `json:"serviceAntiAffinity"`
|
||||
// The priority function that checks whether a particular minion has a certain label
|
||||
// The priority function that checks whether a particular node has a certain label
|
||||
// defined or not, regardless of value
|
||||
LabelPreference *LabelPreference `json:"labelPreference"`
|
||||
}
|
||||
|
||||
// Holds the parameters that are used to configure the corresponding predicate
|
||||
type ServiceAffinity struct {
|
||||
// The list of labels that identify minion "groups"
|
||||
// All of the labels should match for the minion to be considered a fit for hosting the pod
|
||||
// The list of labels that identify node "groups"
|
||||
// All of the labels should match for the node to be considered a fit for hosting the pod
|
||||
Labels []string `json:"labels"`
|
||||
}
|
||||
|
||||
// Holds the parameters that are used to configure the corresponding predicate
|
||||
type LabelsPresence struct {
|
||||
// The list of labels that identify minion "groups"
|
||||
// All of the labels should be either present (or absent) for the minion to be considered a fit for hosting the pod
|
||||
// The list of labels that identify node "groups"
|
||||
// All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod
|
||||
Labels []string `json:"labels"`
|
||||
// The boolean flag that indicates whether the labels should be present or absent from the minion
|
||||
// The boolean flag that indicates whether the labels should be present or absent from the node
|
||||
Presence bool `json:"presence"`
|
||||
}
|
||||
|
||||
// Holds the parameters that are used to configure the corresponding priority function
|
||||
type ServiceAntiAffinity struct {
|
||||
// Used to identify minion "groups"
|
||||
// Used to identify node "groups"
|
||||
Label string `json:"label"`
|
||||
}
|
||||
|
||||
// Holds the parameters that are used to configure the corresponding priority function
|
||||
type LabelPreference struct {
|
||||
// Used to identify minion "groups"
|
||||
// Used to identify node "groups"
|
||||
Label string `json:"label"`
|
||||
// This is a boolean flag
|
||||
// If true, higher priority is given to minions that have the label
|
||||
// If false, higher priority is given to minions that do not have the label
|
||||
// If true, higher priority is given to nodes that have the label
|
||||
// If false, higher priority is given to nodes that do not have the label
|
||||
Presence bool `json:"presence"`
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ type PriorityPolicy struct {
|
||||
// For a custom priority, the name can be user-defined
|
||||
// For the Kubernetes provided priority functions, the name is the identifier of the pre-defined priority function
|
||||
Name string `json:"name"`
|
||||
// The numeric multiplier for the minion scores that the priority function generates
|
||||
// The numeric multiplier for the node scores that the priority function generates
|
||||
// The weight should be non-zero and can be a positive or a negative integer
|
||||
Weight int `json:"weight"`
|
||||
// Holds the parameters to configure the given priority function
|
||||
@ -53,9 +53,9 @@ type PriorityPolicy struct {
|
||||
// Only one of its members may be specified
|
||||
type PredicateArgument struct {
|
||||
// The predicate that provides affinity for pods belonging to a service
|
||||
// It uses a label to identify minions that belong to the same "group"
|
||||
// It uses a label to identify nodes that belong to the same "group"
|
||||
ServiceAffinity *ServiceAffinity `json:"serviceAffinity"`
|
||||
// The predicate that checks whether a particular minion has a certain label
|
||||
// The predicate that checks whether a particular node has a certain label
|
||||
// defined or not, regardless of value
|
||||
LabelsPresence *LabelsPresence `json:"labelsPresence"`
|
||||
}
|
||||
@ -64,41 +64,41 @@ type PredicateArgument struct {
|
||||
// Only one of its members may be specified
|
||||
type PriorityArgument struct {
|
||||
// The priority function that ensures a good spread (anti-affinity) for pods belonging to a service
|
||||
// It uses a label to identify minions that belong to the same "group"
|
||||
// It uses a label to identify nodes that belong to the same "group"
|
||||
ServiceAntiAffinity *ServiceAntiAffinity `json:"serviceAntiAffinity"`
|
||||
// The priority function that checks whether a particular minion has a certain label
|
||||
// The priority function that checks whether a particular node has a certain label
|
||||
// defined or not, regardless of value
|
||||
LabelPreference *LabelPreference `json:"labelPreference"`
|
||||
}
|
||||
|
||||
// Holds the parameters that are used to configure the corresponding predicate
|
||||
type ServiceAffinity struct {
|
||||
// The list of labels that identify minion "groups"
|
||||
// All of the labels should match for the minion to be considered a fit for hosting the pod
|
||||
// The list of labels that identify node "groups"
|
||||
// All of the labels should match for the node to be considered a fit for hosting the pod
|
||||
Labels []string `json:"labels"`
|
||||
}
|
||||
|
||||
// Holds the parameters that are used to configure the corresponding predicate
|
||||
type LabelsPresence struct {
|
||||
// The list of labels that identify minion "groups"
|
||||
// All of the labels should be either present (or absent) for the minion to be considered a fit for hosting the pod
|
||||
// The list of labels that identify node "groups"
|
||||
// All of the labels should be either present (or absent) for the node to be considered a fit for hosting the pod
|
||||
Labels []string `json:"labels"`
|
||||
// The boolean flag that indicates whether the labels should be present or absent from the minion
|
||||
// The boolean flag that indicates whether the labels should be present or absent from the node
|
||||
Presence bool `json:"presence"`
|
||||
}
|
||||
|
||||
// Holds the parameters that are used to configure the corresponding priority function
|
||||
type ServiceAntiAffinity struct {
|
||||
// Used to identify minion "groups"
|
||||
// Used to identify node "groups"
|
||||
Label string `json:"label"`
|
||||
}
|
||||
|
||||
// Holds the parameters that are used to configure the corresponding priority function
|
||||
type LabelPreference struct {
|
||||
// Used to identify minion "groups"
|
||||
// Used to identify node "groups"
|
||||
Label string `json:"label"`
|
||||
// This is a boolean flag
|
||||
// If true, higher priority is given to minions that have the label
|
||||
// If false, higher priority is given to minions that do not have the label
|
||||
// If true, higher priority is given to nodes that have the label
|
||||
// If false, higher priority is given to nodes that do not have the label
|
||||
Presence bool `json:"presence"`
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ type ConfigFactory struct {
|
||||
ScheduledPodLister *cache.StoreToPodLister
|
||||
// a means to list all known scheduled pods and pods assumed to have been scheduled.
|
||||
PodLister algorithm.PodLister
|
||||
// a means to list all minions
|
||||
// a means to list all nodes
|
||||
NodeLister *cache.StoreToNodeLister
|
||||
// a means to list all services
|
||||
ServiceLister *cache.StoreToServiceLister
|
||||
@ -180,9 +180,9 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String)
|
||||
// Begin populating scheduled pods.
|
||||
go f.scheduledPodPopulator.Run(f.StopEverything)
|
||||
|
||||
// Watch minions.
|
||||
// Minions may be listed frequently, so provide a local up-to-date cache.
|
||||
cache.NewReflector(f.createMinionLW(), &api.Node{}, f.NodeLister.Store, 0).RunUntil(f.StopEverything)
|
||||
// Watch nodes.
|
||||
// Nodes may be listed frequently, so provide a local up-to-date cache.
|
||||
cache.NewReflector(f.createNodeLW(), &api.Node{}, f.NodeLister.Store, 0).RunUntil(f.StopEverything)
|
||||
|
||||
// Watch and cache all service objects. Scheduler needs to find all pods
|
||||
// created by the same services or ReplicationControllers, so that it can spread them correctly.
|
||||
@ -209,9 +209,9 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String)
|
||||
return &scheduler.Config{
|
||||
Modeler: f.modeler,
|
||||
// The scheduler only needs to consider schedulable nodes.
|
||||
MinionLister: f.NodeLister.NodeCondition(api.NodeReady, api.ConditionTrue),
|
||||
Algorithm: algo,
|
||||
Binder: &binder{f.Client},
|
||||
NodeLister: f.NodeLister.NodeCondition(api.NodeReady, api.ConditionTrue),
|
||||
Algorithm: algo,
|
||||
Binder: &binder{f.Client},
|
||||
NextPod: func() *api.Pod {
|
||||
pod := f.PodQueue.Pop().(*api.Pod)
|
||||
glog.V(2).Infof("About to try and schedule pod %v", pod.Name)
|
||||
@ -245,8 +245,8 @@ func (factory *ConfigFactory) createAssignedPodLW() *cache.ListWatch {
|
||||
parseSelectorOrDie(client.PodHost+"!="))
|
||||
}
|
||||
|
||||
// createMinionLW returns a cache.ListWatch that gets all changes to minions.
|
||||
func (factory *ConfigFactory) createMinionLW() *cache.ListWatch {
|
||||
// createNodeLW returns a cache.ListWatch that gets all changes to nodes.
|
||||
func (factory *ConfigFactory) createNodeLW() *cache.ListWatch {
|
||||
// TODO: Filter out nodes that doesn't have NodeReady condition.
|
||||
fields := fields.Set{client.NodeUnschedulable: "false"}.AsSelector()
|
||||
return cache.NewListWatchFromClient(factory.Client, "nodes", api.NamespaceAll, fields)
|
||||
|
@ -122,11 +122,11 @@ func PredicateTwo(pod *api.Pod, existingPods []*api.Pod, node string) (bool, err
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func PriorityOne(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
func PriorityOne(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
return []algorithm.HostPriority{}, nil
|
||||
}
|
||||
|
||||
func PriorityTwo(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
func PriorityTwo(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
return []algorithm.HostPriority{}, nil
|
||||
}
|
||||
|
||||
@ -179,7 +179,7 @@ func TestDefaultErrorFunc(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMinionEnumerator(t *testing.T) {
|
||||
func TestNodeEnumerator(t *testing.T) {
|
||||
testList := &api.NodeList{
|
||||
Items: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "foo"}},
|
||||
|
@ -36,7 +36,7 @@ type PluginFactoryArgs struct {
|
||||
algorithm.PodLister
|
||||
algorithm.ServiceLister
|
||||
algorithm.ControllerLister
|
||||
NodeLister algorithm.MinionLister
|
||||
NodeLister algorithm.NodeLister
|
||||
NodeInfo predicates.NodeInfo
|
||||
}
|
||||
|
||||
|
@ -60,21 +60,21 @@ type genericScheduler struct {
|
||||
randomLock sync.Mutex
|
||||
}
|
||||
|
||||
func (g *genericScheduler) Schedule(pod *api.Pod, minionLister algorithm.MinionLister) (string, error) {
|
||||
minions, err := minionLister.List()
|
||||
func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeLister) (string, error) {
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(minions.Items) == 0 {
|
||||
if len(nodes.Items) == 0 {
|
||||
return "", ErrNoNodesAvailable
|
||||
}
|
||||
|
||||
filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.pods, g.predicates, minions)
|
||||
filteredNodes, failedPredicateMap, err := findNodesThatFit(pod, g.pods, g.predicates, nodes)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
priorityList, err := PrioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeMinionLister(filteredNodes))
|
||||
priorityList, err := PrioritizeNodes(pod, g.pods, g.prioritizers, algorithm.FakeNodeLister(filteredNodes))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -88,8 +88,8 @@ func (g *genericScheduler) Schedule(pod *api.Pod, minionLister algorithm.MinionL
|
||||
return g.selectHost(priorityList)
|
||||
}
|
||||
|
||||
// This method takes a prioritized list of minions and sorts them in reverse order based on scores
|
||||
// and then picks one randomly from the minions that had the highest score
|
||||
// This method takes a prioritized list of nodes and sorts them in reverse order based on scores
|
||||
// and then picks one randomly from the nodes that had the highest score
|
||||
func (g *genericScheduler) selectHost(priorityList algorithm.HostPriorityList) (string, error) {
|
||||
if len(priorityList) == 0 {
|
||||
return "", fmt.Errorf("empty priorityList")
|
||||
@ -104,8 +104,8 @@ func (g *genericScheduler) selectHost(priorityList algorithm.HostPriorityList) (
|
||||
return hosts[ix], nil
|
||||
}
|
||||
|
||||
// Filters the minions to find the ones that fit based on the given predicate functions
|
||||
// Each minion is passed through the predicate functions to determine if it is a fit
|
||||
// Filters the nodes to find the ones that fit based on the given predicate functions
|
||||
// Each node is passed through the predicate functions to determine if it is a fit
|
||||
func findNodesThatFit(pod *api.Pod, podLister algorithm.PodLister, predicateFuncs map[string]algorithm.FitPredicate, nodes api.NodeList) (api.NodeList, FailedPredicateMap, error) {
|
||||
filtered := []api.Node{}
|
||||
machineToPods, err := predicates.MapPodsToMachines(podLister)
|
||||
@ -141,19 +141,19 @@ func findNodesThatFit(pod *api.Pod, podLister algorithm.PodLister, predicateFunc
|
||||
return api.NodeList{Items: filtered}, failedPredicateMap, nil
|
||||
}
|
||||
|
||||
// Prioritizes the minions by running the individual priority functions sequentially.
|
||||
// Prioritizes the nodes by running the individual priority functions sequentially.
|
||||
// Each priority function is expected to set a score of 0-10
|
||||
// 0 is the lowest priority score (least preferred minion) and 10 is the highest
|
||||
// 0 is the lowest priority score (least preferred node) and 10 is the highest
|
||||
// Each priority function can also have its own weight
|
||||
// The minion scores returned by the priority function are multiplied by the weights to get weighted scores
|
||||
// All scores are finally combined (added) to get the total weighted scores of all minions
|
||||
func PrioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfigs []algorithm.PriorityConfig, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
// The node scores returned by the priority function are multiplied by the weights to get weighted scores
|
||||
// All scores are finally combined (added) to get the total weighted scores of all nodes
|
||||
func PrioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfigs []algorithm.PriorityConfig, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
result := algorithm.HostPriorityList{}
|
||||
|
||||
// If no priority configs are provided, then the EqualPriority function is applied
|
||||
// This is required to generate the priority list in the required format
|
||||
if len(priorityConfigs) == 0 {
|
||||
return EqualPriority(pod, podLister, minionLister)
|
||||
return EqualPriority(pod, podLister, nodeLister)
|
||||
}
|
||||
|
||||
combinedScores := map[string]int{}
|
||||
@ -164,7 +164,7 @@ func PrioritizeNodes(pod *api.Pod, podLister algorithm.PodLister, priorityConfig
|
||||
continue
|
||||
}
|
||||
priorityFunc := priorityConfig.Function
|
||||
prioritizedList, err := priorityFunc(pod, podLister, minionLister)
|
||||
prioritizedList, err := priorityFunc(pod, podLister, nodeLister)
|
||||
if err != nil {
|
||||
return algorithm.HostPriorityList{}, err
|
||||
}
|
||||
@ -192,17 +192,17 @@ func getBestHosts(list algorithm.HostPriorityList) []string {
|
||||
}
|
||||
|
||||
// EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
|
||||
func EqualPriority(_ *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
nodes, err := minionLister.List()
|
||||
func EqualPriority(_ *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
nodes, err := nodeLister.List()
|
||||
if err != nil {
|
||||
glog.Errorf("failed to list nodes: %v", err)
|
||||
return []algorithm.HostPriority{}, err
|
||||
}
|
||||
|
||||
result := []algorithm.HostPriority{}
|
||||
for _, minion := range nodes.Items {
|
||||
for _, node := range nodes.Items {
|
||||
result = append(result, algorithm.HostPriority{
|
||||
Host: minion.Name,
|
||||
Host: node.Name,
|
||||
Score: 1,
|
||||
})
|
||||
}
|
||||
|
@ -44,31 +44,31 @@ func hasNoPodsPredicate(pod *api.Pod, existingPods []*api.Pod, node string) (boo
|
||||
return len(existingPods) == 0, nil
|
||||
}
|
||||
|
||||
func numericPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
nodes, err := minionLister.List()
|
||||
func numericPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
nodes, err := nodeLister.List()
|
||||
result := []algorithm.HostPriority{}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list nodes: %v", err)
|
||||
}
|
||||
for _, minion := range nodes.Items {
|
||||
score, err := strconv.Atoi(minion.Name)
|
||||
for _, node := range nodes.Items {
|
||||
score, err := strconv.Atoi(node.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result = append(result, algorithm.HostPriority{
|
||||
Host: minion.Name,
|
||||
Host: node.Name,
|
||||
Score: score,
|
||||
})
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func reverseNumericPriority(pod *api.Pod, podLister algorithm.PodLister, minionLister algorithm.MinionLister) (algorithm.HostPriorityList, error) {
|
||||
func reverseNumericPriority(pod *api.Pod, podLister algorithm.PodLister, nodeLister algorithm.NodeLister) (algorithm.HostPriorityList, error) {
|
||||
var maxScore float64
|
||||
minScore := math.MaxFloat64
|
||||
reverseResult := []algorithm.HostPriority{}
|
||||
result, err := numericPriority(pod, podLister, minionLister)
|
||||
result, err := numericPriority(pod, podLister, nodeLister)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -288,7 +288,7 @@ func TestGenericScheduler(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
random := rand.New(rand.NewSource(0))
|
||||
scheduler := NewGenericScheduler(test.predicates, test.prioritizers, algorithm.FakePodLister(test.pods), random)
|
||||
machine, err := scheduler.Schedule(test.pod, algorithm.FakeMinionLister(makeNodeList(test.nodes)))
|
||||
machine, err := scheduler.Schedule(test.pod, algorithm.FakeNodeLister(makeNodeList(test.nodes)))
|
||||
if test.expectsErr {
|
||||
if err == nil {
|
||||
t.Error("Unexpected non-error")
|
||||
|
@ -63,18 +63,18 @@ type SystemModeler interface {
|
||||
}
|
||||
|
||||
// Scheduler watches for new unscheduled pods. It attempts to find
|
||||
// minions that they fit on and writes bindings back to the api server.
|
||||
// nodes that they fit on and writes bindings back to the api server.
|
||||
type Scheduler struct {
|
||||
config *Config
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
// It is expected that changes made via modeler will be observed
|
||||
// by MinionLister and Algorithm.
|
||||
Modeler SystemModeler
|
||||
MinionLister algorithm.MinionLister
|
||||
Algorithm algorithm.ScheduleAlgorithm
|
||||
Binder Binder
|
||||
// by NodeLister and Algorithm.
|
||||
Modeler SystemModeler
|
||||
NodeLister algorithm.NodeLister
|
||||
Algorithm algorithm.ScheduleAlgorithm
|
||||
Binder Binder
|
||||
|
||||
// Rate at which we can create pods
|
||||
BindPodsRateLimiter util.RateLimiter
|
||||
@ -121,7 +121,7 @@ func (s *Scheduler) scheduleOne() {
|
||||
defer func() {
|
||||
metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||
}()
|
||||
dest, err := s.config.Algorithm.Schedule(pod, s.config.MinionLister)
|
||||
dest, err := s.config.Algorithm.Schedule(pod, s.config.NodeLister)
|
||||
metrics.SchedulingAlgorithmLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||
if err != nil {
|
||||
glog.V(1).Infof("Failed to schedule: %+v", pod)
|
||||
|
@ -60,7 +60,7 @@ type mockScheduler struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (es mockScheduler) Schedule(pod *api.Pod, ml algorithm.MinionLister) (string, error) {
|
||||
func (es mockScheduler) Schedule(pod *api.Pod, ml algorithm.NodeLister) (string, error) {
|
||||
return es.machine, es.err
|
||||
}
|
||||
|
||||
@ -114,7 +114,7 @@ func TestScheduler(t *testing.T) {
|
||||
gotAssumedPod = pod
|
||||
},
|
||||
},
|
||||
MinionLister: algorithm.FakeMinionLister(
|
||||
NodeLister: algorithm.FakeNodeLister(
|
||||
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
|
||||
),
|
||||
Algorithm: item.algo,
|
||||
@ -196,7 +196,7 @@ func TestSchedulerForgetAssumedPodAfterDelete(t *testing.T) {
|
||||
var gotBinding *api.Binding
|
||||
c := &Config{
|
||||
Modeler: modeler,
|
||||
MinionLister: algorithm.FakeMinionLister(
|
||||
NodeLister: algorithm.FakeNodeLister(
|
||||
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
|
||||
),
|
||||
Algorithm: algo,
|
||||
@ -329,7 +329,7 @@ func TestSchedulerRateLimitsBinding(t *testing.T) {
|
||||
fr := FakeRateLimiter{util.NewTokenBucketRateLimiter(0.02, 1), []bool{}}
|
||||
c := &Config{
|
||||
Modeler: modeler,
|
||||
MinionLister: algorithm.FakeMinionLister(
|
||||
NodeLister: algorithm.FakeNodeLister(
|
||||
api.NodeList{Items: []api.Node{{ObjectMeta: api.ObjectMeta{Name: "machine1"}}}},
|
||||
),
|
||||
Algorithm: algo,
|
||||
|
@ -256,7 +256,7 @@ var _ = Describe("DaemonRestart", func() {
|
||||
})
|
||||
|
||||
It("Kubelet should not restart containers across restart", func() {
|
||||
nodeIPs, err := getMinionPublicIps(framework.Client)
|
||||
nodeIPs, err := getNodePublicIps(framework.Client)
|
||||
expectNoError(err)
|
||||
preRestarts, badNodes := getContainerRestarts(framework.Client, ns, labelSelector)
|
||||
if preRestarts != 0 {
|
||||
|
@ -91,7 +91,7 @@ func gcloudListNodes() {
|
||||
// -t/--test flag or ginkgo.focus flag.
|
||||
var _ = Describe("Density", func() {
|
||||
var c *client.Client
|
||||
var minionCount int
|
||||
var nodeCount int
|
||||
var RCName string
|
||||
var additionalPodsPrefix string
|
||||
var ns string
|
||||
@ -101,10 +101,10 @@ var _ = Describe("Density", func() {
|
||||
var err error
|
||||
c, err = loadClient()
|
||||
expectNoError(err)
|
||||
minions, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
||||
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
||||
expectNoError(err)
|
||||
minionCount = len(minions.Items)
|
||||
Expect(minionCount).NotTo(BeZero())
|
||||
nodeCount = len(nodes.Items)
|
||||
Expect(nodeCount).NotTo(BeZero())
|
||||
|
||||
// Terminating a namespace (deleting the remaining objects from it - which
|
||||
// generally means events) can affect the current run. Thus we wait for all
|
||||
@ -136,7 +136,7 @@ var _ = Describe("Density", func() {
|
||||
}
|
||||
|
||||
By("Removing additional pods if any")
|
||||
for i := 1; i <= minionCount; i++ {
|
||||
for i := 1; i <= nodeCount; i++ {
|
||||
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
|
||||
c.Pods(ns).Delete(name, nil)
|
||||
}
|
||||
@ -160,7 +160,7 @@ var _ = Describe("Density", func() {
|
||||
skip bool
|
||||
// Controls if e2e latency tests should be run (they are slow)
|
||||
runLatencyTest bool
|
||||
podsPerMinion int
|
||||
podsPerNode int
|
||||
// Controls how often the apiserver is polled for pods
|
||||
interval time.Duration
|
||||
}
|
||||
@ -170,17 +170,17 @@ var _ = Describe("Density", func() {
|
||||
// (metrics from other tests affects this one).
|
||||
// TODO: Reenable once we can measure latency only from a single test.
|
||||
// TODO: Expose runLatencyTest as ginkgo flag.
|
||||
{podsPerMinion: 3, skip: true, runLatencyTest: false, interval: 10 * time.Second},
|
||||
{podsPerMinion: 30, skip: true, runLatencyTest: true, interval: 10 * time.Second},
|
||||
{podsPerNode: 3, skip: true, runLatencyTest: false, interval: 10 * time.Second},
|
||||
{podsPerNode: 30, skip: true, runLatencyTest: true, interval: 10 * time.Second},
|
||||
// More than 30 pods per node is outside our v1.0 goals.
|
||||
// We might want to enable those tests in the future.
|
||||
{podsPerMinion: 50, skip: true, runLatencyTest: false, interval: 10 * time.Second},
|
||||
{podsPerMinion: 100, skip: true, runLatencyTest: false, interval: 1 * time.Second},
|
||||
{podsPerNode: 50, skip: true, runLatencyTest: false, interval: 10 * time.Second},
|
||||
{podsPerNode: 100, skip: true, runLatencyTest: false, interval: 1 * time.Second},
|
||||
}
|
||||
|
||||
for _, testArg := range densityTests {
|
||||
name := fmt.Sprintf("should allow starting %d pods per node", testArg.podsPerMinion)
|
||||
if testArg.podsPerMinion == 30 {
|
||||
name := fmt.Sprintf("should allow starting %d pods per node", testArg.podsPerNode)
|
||||
if testArg.podsPerNode == 30 {
|
||||
name = "[Performance suite] " + name
|
||||
}
|
||||
if testArg.skip {
|
||||
@ -188,7 +188,7 @@ var _ = Describe("Density", func() {
|
||||
}
|
||||
itArg := testArg
|
||||
It(name, func() {
|
||||
totalPods := itArg.podsPerMinion * minionCount
|
||||
totalPods := itArg.podsPerNode * nodeCount
|
||||
RCName = "density" + strconv.Itoa(totalPods) + "-" + uuid
|
||||
fileHndl, err := os.Create(fmt.Sprintf(testContext.OutputDir+"/%s/pod_states.csv", uuid))
|
||||
expectNoError(err)
|
||||
@ -318,11 +318,11 @@ var _ = Describe("Density", func() {
|
||||
|
||||
// Create some additional pods with throughput ~5 pods/sec.
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(minionCount)
|
||||
wg.Add(nodeCount)
|
||||
podLabels := map[string]string{
|
||||
"name": additionalPodsPrefix,
|
||||
}
|
||||
for i := 1; i <= minionCount; i++ {
|
||||
for i := 1; i <= nodeCount; i++ {
|
||||
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
|
||||
go createRunningPod(&wg, c, name, ns, "gcr.io/google_containers/pause:go", podLabels)
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
@ -330,7 +330,7 @@ var _ = Describe("Density", func() {
|
||||
wg.Wait()
|
||||
|
||||
Logf("Waiting for all Pods begin observed by the watch...")
|
||||
for start := time.Now(); len(watchTimes) < minionCount && time.Since(start) < timeout; time.Sleep(10 * time.Second) {
|
||||
for start := time.Now(); len(watchTimes) < nodeCount && time.Since(start) < timeout; time.Sleep(10 * time.Second) {
|
||||
}
|
||||
close(stopCh)
|
||||
|
||||
@ -404,7 +404,7 @@ var _ = Describe("Density", func() {
|
||||
}
|
||||
|
||||
Logf("Approx throughput: %v pods/min",
|
||||
float64(minionCount)/(e2eLag[len(e2eLag)-1].Latency.Minutes()))
|
||||
float64(nodeCount)/(e2eLag[len(e2eLag)-1].Latency.Minutes()))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -322,9 +322,9 @@ var _ = Describe("Kubectl client", func() {
|
||||
checkOutput(output, requiredStrings)
|
||||
|
||||
// Node
|
||||
minions, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
||||
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
node := minions.Items[0]
|
||||
node := nodes.Items[0]
|
||||
output = runKubectl("describe", "node", node.Name)
|
||||
requiredStrings = [][]string{
|
||||
{"Name:", node.Name},
|
||||
|
@ -660,7 +660,7 @@ var _ = Describe("Pods", func() {
|
||||
pod.Status.Host, pod.Name, pod.Spec.Containers[0].Name))
|
||||
req := framework.Client.Get().
|
||||
Prefix("proxy").
|
||||
Resource("minions").
|
||||
Resource("nodes").
|
||||
Name(pod.Status.Host).
|
||||
Suffix("exec", framework.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
|
||||
|
||||
@ -734,7 +734,7 @@ var _ = Describe("Pods", func() {
|
||||
|
||||
req := framework.Client.Get().
|
||||
Prefix("proxy").
|
||||
Resource("minions").
|
||||
Resource("nodes").
|
||||
Name(pod.Status.Host).
|
||||
Suffix("portForward", framework.Namespace.Name, pod.Name)
|
||||
|
||||
|
@ -413,7 +413,7 @@ var _ = Describe("Services", func() {
|
||||
t.CreateWebserverRC(1)
|
||||
|
||||
By("hitting the pod through the service's NodePort")
|
||||
testReachable(pickMinionIP(c), port.NodePort)
|
||||
testReachable(pickNodeIP(c), port.NodePort)
|
||||
|
||||
By("hitting the pod through the service's external load balancer")
|
||||
testLoadBalancerReachable(ingress, inboundPort)
|
||||
@ -482,7 +482,7 @@ var _ = Describe("Services", func() {
|
||||
t.CreateWebserverRC(1)
|
||||
|
||||
By("hitting the pod through the service's NodePort")
|
||||
testReachable(pickMinionIP(c), port.NodePort)
|
||||
testReachable(pickNodeIP(c), port.NodePort)
|
||||
|
||||
By("hitting the pod through the service's external load balancer")
|
||||
testLoadBalancerReachable(ingress, inboundPort)
|
||||
@ -529,7 +529,7 @@ var _ = Describe("Services", func() {
|
||||
t.CreateWebserverRC(1)
|
||||
|
||||
By("hitting the pod through the service's NodePort")
|
||||
ip := pickMinionIP(c)
|
||||
ip := pickNodeIP(c)
|
||||
testReachable(ip, nodePort)
|
||||
|
||||
hosts, err := NodeSSHHosts(c)
|
||||
@ -605,7 +605,7 @@ var _ = Describe("Services", func() {
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for NodePort service: %v", service)
|
||||
}
|
||||
By("hitting the pod through the service's NodePort")
|
||||
ip := pickMinionIP(f.Client)
|
||||
ip := pickNodeIP(f.Client)
|
||||
nodePort1 := port.NodePort // Save for later!
|
||||
testReachable(ip, nodePort1)
|
||||
|
||||
@ -638,7 +638,7 @@ var _ = Describe("Services", func() {
|
||||
Failf("got unexpected Status.LoadBalancer.Ingresss[0] for LoadBalancer service: %v", service)
|
||||
}
|
||||
By("hitting the pod through the service's NodePort")
|
||||
ip = pickMinionIP(f.Client)
|
||||
ip = pickNodeIP(f.Client)
|
||||
testReachable(ip, nodePort1)
|
||||
By("hitting the pod through the service's LoadBalancer")
|
||||
testLoadBalancerReachable(ingress1, 80)
|
||||
@ -710,10 +710,10 @@ var _ = Describe("Services", func() {
|
||||
Failf("got unexpected len(Status.LoadBalancer.Ingresss) for back-to-ClusterIP service: %v", service)
|
||||
}
|
||||
By("checking the NodePort (original) is closed")
|
||||
ip = pickMinionIP(f.Client)
|
||||
ip = pickNodeIP(f.Client)
|
||||
testNotReachable(ip, nodePort1)
|
||||
By("checking the NodePort (updated) is closed")
|
||||
ip = pickMinionIP(f.Client)
|
||||
ip = pickNodeIP(f.Client)
|
||||
testNotReachable(ip, nodePort2)
|
||||
By("checking the LoadBalancer is closed")
|
||||
testLoadBalancerNotReachable(ingress2, 80)
|
||||
@ -769,7 +769,7 @@ var _ = Describe("Services", func() {
|
||||
}
|
||||
|
||||
By("hitting the pod through the service's NodePort")
|
||||
ip := pickMinionIP(c)
|
||||
ip := pickNodeIP(c)
|
||||
testReachable(ip, nodePort)
|
||||
By("hitting the pod through the service's LoadBalancer")
|
||||
testLoadBalancerReachable(ingress, 80)
|
||||
@ -1249,7 +1249,7 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st
|
||||
return ips
|
||||
}
|
||||
|
||||
func getMinionPublicIps(c *client.Client) ([]string, error) {
|
||||
func getNodePublicIps(c *client.Client) ([]string, error) {
|
||||
nodes, err := c.Nodes().List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -1262,8 +1262,8 @@ func getMinionPublicIps(c *client.Client) ([]string, error) {
|
||||
return ips, nil
|
||||
}
|
||||
|
||||
func pickMinionIP(c *client.Client) string {
|
||||
publicIps, err := getMinionPublicIps(c)
|
||||
func pickNodeIP(c *client.Client) string {
|
||||
publicIps, err := getNodePublicIps(c)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
if len(publicIps) == 0 {
|
||||
Failf("got unexpected number (%d) of public IPs", len(publicIps))
|
||||
|
@ -330,7 +330,7 @@ func getTestRequests() []struct {
|
||||
{"GET", path("endpoints", api.NamespaceDefault, "a"), "", code200},
|
||||
{"DELETE", timeoutPath("endpoints", api.NamespaceDefault, "a"), "", code200},
|
||||
|
||||
// Normal methods on minions
|
||||
// Normal methods on nodes
|
||||
{"GET", path("nodes", "", ""), "", code200},
|
||||
{"POST", timeoutPath("nodes", "", ""), aNode, code201},
|
||||
{"PUT", timeoutPath("nodes", "", "a"), aNode, code200},
|
||||
@ -364,7 +364,7 @@ func getTestRequests() []struct {
|
||||
{"GET", pathWithPrefix("proxy", "nodes", api.NamespaceDefault, "a"), "", code404},
|
||||
{"GET", pathWithPrefix("redirect", "nodes", api.NamespaceDefault, "a"), "", code404},
|
||||
// TODO: test .../watch/..., which doesn't end before the test timeout.
|
||||
// TODO: figure out how to create a minion so that it can successfully proxy/redirect.
|
||||
// TODO: figure out how to create a node so that it can successfully proxy/redirect.
|
||||
|
||||
// Non-object endpoints
|
||||
{"GET", "/", "", code200},
|
||||
|
Loading…
Reference in New Issue
Block a user