Internal rename api.Minion -> api.Node

This commit is contained in:
Clayton Coleman
2014-12-07 22:44:27 -05:00
parent 650aead4c4
commit 19379b5a38
38 changed files with 212 additions and 213 deletions

View File

@@ -76,18 +76,18 @@ func (g *genericScheduler) selectHost(priorityList HostPriorityList) (string, er
// Filters the minions to find the ones that fit based on the given predicate functions
// Each minion is passed through the predicate functions to determine if it is a fit
func findNodesThatFit(pod api.Pod, podLister PodLister, predicates []FitPredicate, nodes api.MinionList) (api.MinionList, error) {
filtered := []api.Minion{}
func findNodesThatFit(pod api.Pod, podLister PodLister, predicates []FitPredicate, nodes api.NodeList) (api.NodeList, error) {
filtered := []api.Node{}
machineToPods, err := MapPodsToMachines(podLister)
if err != nil {
return api.MinionList{}, err
return api.NodeList{}, err
}
for _, node := range nodes.Items {
fits := true
for _, predicate := range predicates {
fit, err := predicate(pod, machineToPods[node.Name], node.Name)
if err != nil {
return api.MinionList{}, err
return api.NodeList{}, err
}
if !fit {
fits = false
@@ -98,7 +98,7 @@ func findNodesThatFit(pod api.Pod, podLister PodLister, predicates []FitPredicat
filtered = append(filtered, node)
}
}
return api.MinionList{Items: filtered}, nil
return api.NodeList{Items: filtered}, nil
}
// Prioritizes the minions by running the individual priority functions sequentially.

View File

@@ -83,9 +83,9 @@ func reverseNumericPriority(pod api.Pod, podLister PodLister, minionLister Minio
return reverseResult, nil
}
func makeMinionList(nodeNames []string) api.MinionList {
result := api.MinionList{
Items: make([]api.Minion, len(nodeNames)),
func makeMinionList(nodeNames []string) api.NodeList {
result := api.NodeList{
Items: make([]api.Node, len(nodeNames)),
}
for ix := range nodeNames {
result.Items[ix].Name = nodeNames[ix]

View File

@@ -23,15 +23,15 @@ import (
// MinionLister interface represents anything that can list minions for a scheduler.
type MinionLister interface {
List() (list api.MinionList, err error)
List() (list api.NodeList, err error)
}
// FakeMinionLister implements MinionLister on a []string for test purposes.
type FakeMinionLister api.MinionList
type FakeMinionLister api.NodeList
// List returns minions as a []string.
func (f FakeMinionLister) List() (api.MinionList, error) {
return api.MinionList(f), nil
func (f FakeMinionLister) List() (api.NodeList, error) {
return api.NodeList(f), nil
}
// PodLister interface represents anything that can list pods for a scheduler.

View File

@@ -27,14 +27,14 @@ import (
)
type NodeInfo interface {
GetNodeInfo(nodeID string) (*api.Minion, error)
GetNodeInfo(nodeID string) (*api.Node, error)
}
type StaticNodeInfo struct {
*api.MinionList
*api.NodeList
}
func (nodes StaticNodeInfo) GetNodeInfo(nodeID string) (*api.Minion, error) {
func (nodes StaticNodeInfo) GetNodeInfo(nodeID string) (*api.Node, error) {
for ix := range nodes.Items {
if nodes.Items[ix].Name == nodeID {
return &nodes.Items[ix], nil
@@ -47,7 +47,7 @@ type ClientNodeInfo struct {
*client.Client
}
func (nodes ClientNodeInfo) GetNodeInfo(nodeID string) (*api.Minion, error) {
func (nodes ClientNodeInfo) GetNodeInfo(nodeID string) (*api.Node, error) {
return nodes.Minions().Get(nodeID)
}

View File

@@ -25,10 +25,10 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
type FakeNodeInfo api.Minion
type FakeNodeInfo api.Node
func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*api.Minion, error) {
node := api.Minion(n)
func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
node := api.Node(n)
return &node, nil
}
@@ -111,7 +111,7 @@ func TestPodFitsResources(t *testing.T) {
},
}
for _, test := range tests {
node := api.Minion{Spec: api.NodeSpec{Capacity: makeResources(10, 20).Capacity}}
node := api.Node{Spec: api.NodeSpec{Capacity: makeResources(10, 20).Capacity}}
fit := ResourceFit{FakeNodeInfo(node)}
fits, err := fit.PodFitsResources(test.pod, test.existingPods, "machine")
@@ -335,7 +335,7 @@ func TestPodFitsSelector(t *testing.T) {
},
}
for _, test := range tests {
node := api.Minion{ObjectMeta: api.ObjectMeta{Labels: test.labels}}
node := api.Node{ObjectMeta: api.ObjectMeta{Labels: test.labels}}
fit := NodeSelector{FakeNodeInfo(node)}
fits, err := fit.PodSelectorMatches(test.pod, []api.Pod{}, "machine")

View File

@@ -37,7 +37,7 @@ func calculateScore(requested, capacity int, node string) int {
// Calculate the occupancy on a node. 'node' has information about the resources on the node.
// 'pods' is a list of pods currently scheduled on the node.
func calculateOccupancy(pod api.Pod, node api.Minion, pods []api.Pod) HostPriority {
func calculateOccupancy(pod api.Pod, node api.Node, pods []api.Pod) HostPriority {
totalCPU := 0
totalMemory := 0
for _, existingPod := range pods {

View File

@@ -25,8 +25,8 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
func makeMinion(node string, cpu, memory int) api.Minion {
return api.Minion{
func makeMinion(node string, cpu, memory int) api.Node {
return api.Node{
ObjectMeta: api.ObjectMeta{Name: node},
Spec: api.NodeSpec{
Capacity: api.ResourceList{
@@ -70,7 +70,7 @@ func TestLeastRequested(t *testing.T) {
tests := []struct {
pod api.Pod
pods []api.Pod
nodes []api.Minion
nodes []api.Node
expectedList HostPriorityList
test string
}{
@@ -87,7 +87,7 @@ func TestLeastRequested(t *testing.T) {
Minion2 Score: (10 + 10) / 2 = 10
*/
pod: api.Pod{Spec: noResources},
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "nothing scheduled, nothing requested",
},
@@ -104,7 +104,7 @@ func TestLeastRequested(t *testing.T) {
Minion2 Score: (5 + 5) / 2 = 5
*/
pod: api.Pod{Spec: cpuAndMemory},
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 6000, 10000)},
expectedList: []HostPriority{{"machine1", 3}, {"machine2", 5}},
test: "nothing scheduled, resources requested, differently sized machines",
},
@@ -121,7 +121,7 @@ func TestLeastRequested(t *testing.T) {
Minion2 Score: (10 + 10) / 2 = 10
*/
pod: api.Pod{Spec: noResources},
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
test: "no resources requested, pods scheduled",
pods: []api.Pod{
@@ -144,7 +144,7 @@ func TestLeastRequested(t *testing.T) {
Minion2 Score: (4 + 7.5) / 2 = 5
*/
pod: api.Pod{Spec: noResources},
nodes: []api.Minion{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []HostPriority{{"machine1", 7}, {"machine2", 5}},
test: "no resources requested, pods scheduled with resources",
pods: []api.Pod{
@@ -167,7 +167,7 @@ func TestLeastRequested(t *testing.T) {
Minion2 Score: (4 + 5) / 2 = 4
*/
pod: api.Pod{Spec: cpuAndMemory},
nodes: []api.Minion{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 20000)},
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 4}},
test: "resources requested, pods scheduled with resources",
pods: []api.Pod{
@@ -188,7 +188,7 @@ func TestLeastRequested(t *testing.T) {
Minion2 Score: (4 + 8) / 2 = 6
*/
pod: api.Pod{Spec: cpuAndMemory},
nodes: []api.Minion{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
nodes: []api.Node{makeMinion("machine1", 10000, 20000), makeMinion("machine2", 10000, 50000)},
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 6}},
test: "resources requested, pods scheduled with resources, differently sized machines",
pods: []api.Pod{
@@ -209,7 +209,7 @@ func TestLeastRequested(t *testing.T) {
Minion2 Score: (0 + 5) / 2 = 2
*/
pod: api.Pod{Spec: cpuOnly},
nodes: []api.Minion{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
nodes: []api.Node{makeMinion("machine1", 4000, 10000), makeMinion("machine2", 4000, 10000)},
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 2}},
test: "requested resources exceed minion capacity",
pods: []api.Pod{
@@ -219,7 +219,7 @@ func TestLeastRequested(t *testing.T) {
},
{
pod: api.Pod{Spec: noResources},
nodes: []api.Minion{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
nodes: []api.Node{makeMinion("machine1", 0, 0), makeMinion("machine2", 0, 0)},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
test: "zero minion resources, pods scheduled with resources",
pods: []api.Pod{
@@ -230,7 +230,7 @@ func TestLeastRequested(t *testing.T) {
}
for _, test := range tests {
list, err := LeastRequestedPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(api.MinionList{Items: test.nodes}))
list, err := LeastRequestedPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(api.NodeList{Items: test.nodes}))
if err != nil {
t.Errorf("unexpected error: %v", err)
}