mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-17 23:19:26 +00:00
Sync node status from node controller to master.
This commit is contained in:
@@ -17,97 +17,125 @@ limitations under the License.
|
||||
package controller
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/probe"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrRegistration = errors.New("unable to register all nodes.")
|
||||
)
|
||||
|
||||
type NodeController struct {
|
||||
cloud cloudprovider.Interface
|
||||
matchRE string
|
||||
staticResources *api.NodeResources
|
||||
nodes []string
|
||||
kubeClient client.Interface
|
||||
kubeletClient client.KubeletHealthChecker
|
||||
}
|
||||
|
||||
// NewNodeController returns a new node controller to sync instances from cloudprovider.
|
||||
// TODO: NodeController health checker should be a separate package other than
|
||||
// kubeletclient, node health check != kubelet health check.
|
||||
func NewNodeController(
|
||||
cloud cloudprovider.Interface,
|
||||
matchRE string,
|
||||
nodes []string,
|
||||
staticResources *api.NodeResources,
|
||||
kubeClient client.Interface) *NodeController {
|
||||
kubeClient client.Interface,
|
||||
kubeletClient client.KubeletHealthChecker) *NodeController {
|
||||
return &NodeController{
|
||||
cloud: cloud,
|
||||
matchRE: matchRE,
|
||||
nodes: nodes,
|
||||
staticResources: staticResources,
|
||||
kubeClient: kubeClient,
|
||||
kubeletClient: kubeletClient,
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts syncing instances from cloudprovider periodically, or create initial node list.
|
||||
func (s *NodeController) Run(period time.Duration) {
|
||||
if s.cloud != nil && len(s.matchRE) > 0 {
|
||||
// Run creates initial node list and start syncing instances from cloudprovider if any.
|
||||
// It also starts syncing cluster node status.
|
||||
func (s *NodeController) Run(period time.Duration, retryCount int) {
|
||||
// Register intial set of nodes with their status set.
|
||||
var nodes *api.NodeList
|
||||
var err error
|
||||
if s.isRunningCloudProvider() {
|
||||
nodes, err = s.CloudNodes()
|
||||
if err != nil {
|
||||
glog.Errorf("Error loading initial node from cloudprovider: %v", err)
|
||||
}
|
||||
} else {
|
||||
nodes, err = s.StaticNodes()
|
||||
if err != nil {
|
||||
glog.Errorf("Error loading initial static nodes")
|
||||
}
|
||||
}
|
||||
nodes = s.DoChecks(nodes)
|
||||
if err := s.RegisterNodes(nodes, retryCount, period); err != nil {
|
||||
glog.Errorf("Error registrying node list: %+v", nodes)
|
||||
}
|
||||
|
||||
// Start syncing node list from cloudprovider.
|
||||
if s.isRunningCloudProvider() {
|
||||
go util.Forever(func() {
|
||||
if err := s.SyncCloud(); err != nil {
|
||||
if err = s.SyncCloud(); err != nil {
|
||||
glog.Errorf("Error syncing cloud: %v", err)
|
||||
}
|
||||
}, period)
|
||||
} else {
|
||||
go s.SyncStatic(period)
|
||||
}
|
||||
|
||||
// Start syncing node status.
|
||||
go util.Forever(func() {
|
||||
if err = s.SyncNodeStatus(); err != nil {
|
||||
glog.Errorf("Error syncing status: %v", err)
|
||||
}
|
||||
}, period)
|
||||
}
|
||||
|
||||
// SyncStatic registers list of machines from command line flag. It returns after successful
|
||||
// registration of all machines.
|
||||
func (s *NodeController) SyncStatic(period time.Duration) error {
|
||||
// RegisterNodes registers the given list of nodes, it keeps retrying for `retryCount` times.
|
||||
func (s *NodeController) RegisterNodes(nodes *api.NodeList, retryCount int, retryInterval time.Duration) error {
|
||||
registered := util.NewStringSet()
|
||||
for {
|
||||
for _, nodeID := range s.nodes {
|
||||
if registered.Has(nodeID) {
|
||||
for i := 0; i < retryCount; i++ {
|
||||
for _, node := range nodes.Items {
|
||||
if registered.Has(node.Name) {
|
||||
continue
|
||||
}
|
||||
node := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: nodeID},
|
||||
Spec: api.NodeSpec{
|
||||
Capacity: s.staticResources.Capacity,
|
||||
},
|
||||
}
|
||||
addr := net.ParseIP(nodeID)
|
||||
if addr != nil {
|
||||
node.Status.HostIP = nodeID
|
||||
} else {
|
||||
addrs, err := net.LookupIP(nodeID)
|
||||
if err != nil {
|
||||
glog.Errorf("Can't get ip address of node %v", nodeID)
|
||||
} else if len(addrs) == 0 {
|
||||
glog.Errorf("No ip address for node %v", nodeID)
|
||||
} else {
|
||||
node.Status.HostIP = addrs[0].String()
|
||||
}
|
||||
}
|
||||
_, err := s.kubeClient.Nodes().Create(node)
|
||||
_, err := s.kubeClient.Nodes().Create(&node)
|
||||
if err == nil {
|
||||
registered.Insert(nodeID)
|
||||
registered.Insert(node.Name)
|
||||
glog.Infof("Registered node in registry: %s", node.Name)
|
||||
} else {
|
||||
glog.Errorf("Error registrying node %s, retrying: %s", node.Name, err)
|
||||
}
|
||||
if registered.Len() == len(nodes.Items) {
|
||||
glog.Infof("Successfully Registered all nodes")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if registered.Len() == len(s.nodes) {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(period)
|
||||
time.Sleep(retryInterval)
|
||||
}
|
||||
if registered.Len() != len(nodes.Items) {
|
||||
return ErrRegistration
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// SyncCloud syncs list of instances from cloudprovider to master etcd registry.
|
||||
// SyncCloud synchronizes the list of instances from cloudprovider to master server.
|
||||
func (s *NodeController) SyncCloud() error {
|
||||
matches, err := s.cloudNodes()
|
||||
matches, err := s.CloudNodes()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -120,7 +148,7 @@ func (s *NodeController) SyncCloud() error {
|
||||
nodeMap[node.Name] = &node
|
||||
}
|
||||
|
||||
// Create or delete nodes from registry.
|
||||
// Create nodes which have been created in cloud, but not in kubernetes cluster.
|
||||
for _, node := range matches.Items {
|
||||
if _, ok := nodeMap[node.Name]; !ok {
|
||||
glog.Infof("Create node in registry: %s", node.Name)
|
||||
@@ -132,6 +160,7 @@ func (s *NodeController) SyncCloud() error {
|
||||
delete(nodeMap, node.Name)
|
||||
}
|
||||
|
||||
// Delete nodes which have been deleted from cloud, but not from kubernetes cluster.
|
||||
for nodeID := range nodeMap {
|
||||
glog.Infof("Delete node from registry: %s", nodeID)
|
||||
err = s.kubeClient.Nodes().Delete(nodeID)
|
||||
@@ -139,29 +168,121 @@ func (s *NodeController) SyncCloud() error {
|
||||
glog.Errorf("Delete node error: %s", nodeID)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cloudNodes constructs and returns api.NodeList from cloudprovider.
|
||||
func (s *NodeController) cloudNodes() (*api.NodeList, error) {
|
||||
// SyncNodeStatus synchronizes cluster nodes status to master server.
|
||||
func (s *NodeController) SyncNodeStatus() error {
|
||||
nodes, err := s.kubeClient.Nodes().List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oldNodes := make(map[string]api.Node)
|
||||
for _, node := range nodes.Items {
|
||||
oldNodes[node.Name] = node
|
||||
}
|
||||
nodes = s.DoChecks(nodes)
|
||||
for _, node := range nodes.Items {
|
||||
if reflect.DeepEqual(node, oldNodes[node.Name]) {
|
||||
glog.V(2).Infof("skip updating node %v", node.Name)
|
||||
continue
|
||||
}
|
||||
glog.V(2).Infof("updating node %v", node.Name)
|
||||
_, err = s.kubeClient.Nodes().Update(&node)
|
||||
if err != nil {
|
||||
glog.Errorf("error updating node %s: %v", node.Name, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DoChecks performs health checking for given list of nodes.
|
||||
func (s *NodeController) DoChecks(nodes *api.NodeList) *api.NodeList {
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(nodes.Items))
|
||||
for i := range nodes.Items {
|
||||
go func(node *api.Node) {
|
||||
node.Status.Conditions = s.DoCheck(node)
|
||||
wg.Done()
|
||||
}(&nodes.Items[i])
|
||||
}
|
||||
wg.Wait()
|
||||
return nodes
|
||||
}
|
||||
|
||||
// DoCheck performs health checking for given node.
|
||||
func (s *NodeController) DoCheck(node *api.Node) []api.NodeCondition {
|
||||
var conditions []api.NodeCondition
|
||||
switch status, err := s.kubeletClient.HealthCheck(node.Name); {
|
||||
case err != nil:
|
||||
glog.V(2).Infof("NodeController: node %s health check error: %v", node.Name, err)
|
||||
conditions = append(conditions, api.NodeCondition{
|
||||
Kind: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
})
|
||||
case status == probe.Failure:
|
||||
conditions = append(conditions, api.NodeCondition{
|
||||
Kind: api.NodeReady,
|
||||
Status: api.ConditionNone,
|
||||
})
|
||||
default:
|
||||
conditions = append(conditions, api.NodeCondition{
|
||||
Kind: api.NodeReady,
|
||||
Status: api.ConditionFull,
|
||||
})
|
||||
}
|
||||
glog.V(5).Infof("NodeController: node %q status was %+v", node.Name, conditions)
|
||||
return conditions
|
||||
}
|
||||
|
||||
// StaticNodes constructs and returns api.NodeList for static nodes. If error
|
||||
// occurs, an empty NodeList will be returned with a non-nil error info.
|
||||
func (s *NodeController) StaticNodes() (*api.NodeList, error) {
|
||||
result := &api.NodeList{}
|
||||
for _, nodeID := range s.nodes {
|
||||
node := api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: nodeID},
|
||||
Spec: api.NodeSpec{Capacity: s.staticResources.Capacity},
|
||||
}
|
||||
addr := net.ParseIP(nodeID)
|
||||
if addr != nil {
|
||||
node.Status.HostIP = nodeID
|
||||
} else {
|
||||
addrs, err := net.LookupIP(nodeID)
|
||||
if err != nil {
|
||||
glog.Errorf("Can't get ip address of node %v", nodeID)
|
||||
} else if len(addrs) == 0 {
|
||||
glog.Errorf("No ip address for node %v", nodeID)
|
||||
} else {
|
||||
node.Status.HostIP = addrs[0].String()
|
||||
}
|
||||
}
|
||||
result.Items = append(result.Items, node)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// CloudNodes constructs and returns api.NodeList from cloudprovider. If error
|
||||
// occurs, an empty NodeList will be returned with a non-nil error info.
|
||||
func (s *NodeController) CloudNodes() (*api.NodeList, error) {
|
||||
result := &api.NodeList{}
|
||||
instances, ok := s.cloud.Instances()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cloud doesn't support instances")
|
||||
return result, fmt.Errorf("cloud doesn't support instances")
|
||||
}
|
||||
matches, err := instances.List(s.matchRE)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &api.NodeList{
|
||||
Items: make([]api.Node, len(matches)),
|
||||
return result, err
|
||||
}
|
||||
for i := range matches {
|
||||
result.Items[i].Name = matches[i]
|
||||
node := api.Node{}
|
||||
node.Name = matches[i]
|
||||
hostIP, err := instances.IPAddress(matches[i])
|
||||
if err != nil {
|
||||
glog.Errorf("error getting instance ip address for %s: %v", matches[i], err)
|
||||
} else {
|
||||
result.Items[i].Status.HostIP = hostIP.String()
|
||||
node.Status.HostIP = hostIP.String()
|
||||
}
|
||||
resources, err := instances.GetNodeResources(matches[i])
|
||||
if err != nil {
|
||||
@@ -171,8 +292,14 @@ func (s *NodeController) cloudNodes() (*api.NodeList, error) {
|
||||
resources = s.staticResources
|
||||
}
|
||||
if resources != nil {
|
||||
result.Items[i].Spec.Capacity = resources.Capacity
|
||||
node.Spec.Capacity = resources.Capacity
|
||||
}
|
||||
result.Items = append(result.Items, node)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// isRunningCloudProvider checks if cluster is running with cloud provider.
|
||||
func (s *NodeController) isRunningCloudProvider() bool {
|
||||
return s.cloud != nil && len(s.matchRE) > 0
|
||||
}
|
||||
|
@@ -17,19 +17,22 @@ limitations under the License.
|
||||
package controller
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
fake_cloud "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/fake"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/probe"
|
||||
)
|
||||
|
||||
func newNode(name string) *api.Node {
|
||||
return &api.Node{ObjectMeta: api.ObjectMeta{Name: name}}
|
||||
}
|
||||
|
||||
// FakeNodeHandler is a fake implementation of NodesInterface and NodeInterface.
|
||||
type FakeNodeHandler struct {
|
||||
client.Fake
|
||||
client.FakeNodes
|
||||
@@ -41,6 +44,7 @@ type FakeNodeHandler struct {
|
||||
// Output
|
||||
CreatedNodes []*api.Node
|
||||
DeletedNodes []*api.Node
|
||||
UpdatedNodes []*api.Node
|
||||
RequestCount int
|
||||
}
|
||||
|
||||
@@ -51,7 +55,8 @@ func (c *FakeNodeHandler) Nodes() client.NodeInterface {
|
||||
func (m *FakeNodeHandler) Create(node *api.Node) (*api.Node, error) {
|
||||
defer func() { m.RequestCount++ }()
|
||||
if m.CreateHook == nil || m.CreateHook(m, node) {
|
||||
m.CreatedNodes = append(m.CreatedNodes, node)
|
||||
nodeCopy := *node
|
||||
m.CreatedNodes = append(m.CreatedNodes, &nodeCopy)
|
||||
return node, nil
|
||||
} else {
|
||||
return nil, fmt.Errorf("Create error.")
|
||||
@@ -60,18 +65,27 @@ func (m *FakeNodeHandler) Create(node *api.Node) (*api.Node, error) {
|
||||
|
||||
func (m *FakeNodeHandler) List() (*api.NodeList, error) {
|
||||
defer func() { m.RequestCount++ }()
|
||||
nodes := []api.Node{}
|
||||
var nodes []*api.Node
|
||||
for i := 0; i < len(m.UpdatedNodes); i++ {
|
||||
if !contains(m.UpdatedNodes[i], m.DeletedNodes) {
|
||||
nodes = append(nodes, m.UpdatedNodes[i])
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(m.Existing); i++ {
|
||||
if !contains(m.Existing[i], m.DeletedNodes) {
|
||||
nodes = append(nodes, *m.Existing[i])
|
||||
if !contains(m.Existing[i], m.DeletedNodes) && !contains(m.Existing[i], nodes) {
|
||||
nodes = append(nodes, m.Existing[i])
|
||||
}
|
||||
}
|
||||
for i := 0; i < len(m.CreatedNodes); i++ {
|
||||
if !contains(m.Existing[i], m.DeletedNodes) {
|
||||
nodes = append(nodes, *m.CreatedNodes[i])
|
||||
if !contains(m.Existing[i], m.DeletedNodes) && !contains(m.CreatedNodes[i], nodes) {
|
||||
nodes = append(nodes, m.CreatedNodes[i])
|
||||
}
|
||||
}
|
||||
return &api.NodeList{Items: nodes}, nil
|
||||
nodeList := &api.NodeList{}
|
||||
for _, node := range nodes {
|
||||
nodeList.Items = append(nodeList.Items, *node)
|
||||
}
|
||||
return nodeList, nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Delete(id string) error {
|
||||
@@ -80,142 +94,377 @@ func (m *FakeNodeHandler) Delete(id string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestSyncStaticCreateNode(t *testing.T) {
|
||||
fakeNodeHandler := &FakeNodeHandler{
|
||||
CreateHook: func(fake *FakeNodeHandler, node *api.Node) bool {
|
||||
return true
|
||||
func (m *FakeNodeHandler) Update(node *api.Node) (*api.Node, error) {
|
||||
nodeCopy := *node
|
||||
m.UpdatedNodes = append(m.UpdatedNodes, &nodeCopy)
|
||||
m.RequestCount++
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// FakeKubeletClient is a fake implementation of KubeletClient.
|
||||
type FakeKubeletClient struct {
|
||||
Status probe.Status
|
||||
Err error
|
||||
}
|
||||
|
||||
func (c *FakeKubeletClient) GetPodStatus(host, podNamespace, podID string) (api.PodStatusResult, error) {
|
||||
return api.PodStatusResult{}, errors.New("Not Implemented")
|
||||
}
|
||||
|
||||
func (c *FakeKubeletClient) HealthCheck(host string) (probe.Status, error) {
|
||||
return c.Status, c.Err
|
||||
}
|
||||
|
||||
func TestRegisterNodes(t *testing.T) {
|
||||
table := []struct {
|
||||
fakeNodeHandler *FakeNodeHandler
|
||||
machines []string
|
||||
retryCount int
|
||||
expectedRequestCount int
|
||||
expectedCreateCount int
|
||||
expectedFail bool
|
||||
}{
|
||||
{
|
||||
// Register two nodes normally.
|
||||
machines: []string{"node0", "node1"},
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
CreateHook: func(fake *FakeNodeHandler, node *api.Node) bool { return true },
|
||||
},
|
||||
retryCount: 1,
|
||||
expectedRequestCount: 2,
|
||||
expectedCreateCount: 2,
|
||||
expectedFail: false,
|
||||
},
|
||||
{
|
||||
// No machine to register.
|
||||
machines: []string{},
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
CreateHook: func(fake *FakeNodeHandler, node *api.Node) bool { return true },
|
||||
},
|
||||
retryCount: 1,
|
||||
expectedRequestCount: 0,
|
||||
expectedCreateCount: 0,
|
||||
expectedFail: false,
|
||||
},
|
||||
{
|
||||
// Fail the first two requests.
|
||||
machines: []string{"node0", "node1"},
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
CreateHook: func(fake *FakeNodeHandler, node *api.Node) bool {
|
||||
if fake.RequestCount == 0 || fake.RequestCount == 1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
},
|
||||
},
|
||||
retryCount: 10,
|
||||
expectedRequestCount: 4,
|
||||
expectedCreateCount: 2,
|
||||
expectedFail: false,
|
||||
},
|
||||
{
|
||||
// The first node always fails.
|
||||
machines: []string{"node0", "node1"},
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
CreateHook: func(fake *FakeNodeHandler, node *api.Node) bool {
|
||||
if node.Name == "node0" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
},
|
||||
},
|
||||
retryCount: 2,
|
||||
expectedRequestCount: 3, // 2 for node0, 1 for node1
|
||||
expectedCreateCount: 1,
|
||||
expectedFail: true,
|
||||
},
|
||||
}
|
||||
nodeController := NewNodeController(nil, ".*", []string{"node0"}, &api.NodeResources{}, fakeNodeHandler)
|
||||
if err := nodeController.SyncStatic(time.Millisecond); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if fakeNodeHandler.RequestCount != 1 {
|
||||
t.Errorf("Expected 1 call, but got %v.", fakeNodeHandler.RequestCount)
|
||||
}
|
||||
if len(fakeNodeHandler.CreatedNodes) != 1 {
|
||||
t.Errorf("expect only 1 node created, got %v", len(fakeNodeHandler.CreatedNodes))
|
||||
}
|
||||
if fakeNodeHandler.CreatedNodes[0].Name != "node0" {
|
||||
t.Errorf("unexpect node %v created", fakeNodeHandler.CreatedNodes[0].Name)
|
||||
for _, item := range table {
|
||||
nodes := api.NodeList{}
|
||||
for _, machine := range item.machines {
|
||||
nodes.Items = append(nodes.Items, *newNode(machine))
|
||||
}
|
||||
nodeController := NewNodeController(nil, "", item.machines, &api.NodeResources{}, item.fakeNodeHandler, nil)
|
||||
err := nodeController.RegisterNodes(&nodes, item.retryCount, time.Millisecond)
|
||||
if !item.expectedFail && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if item.expectedFail && err == nil {
|
||||
t.Errorf("unexpected non-error")
|
||||
}
|
||||
if item.fakeNodeHandler.RequestCount != item.expectedRequestCount {
|
||||
t.Errorf("expected %v calls, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount)
|
||||
}
|
||||
if len(item.fakeNodeHandler.CreatedNodes) != item.expectedCreateCount {
|
||||
t.Errorf("expected %v nodes, but got %v.", item.expectedCreateCount, item.fakeNodeHandler.CreatedNodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncStaticCreateNodeWithHostIP(t *testing.T) {
|
||||
fakeNodeHandler := &FakeNodeHandler{
|
||||
CreateHook: func(fake *FakeNodeHandler, node *api.Node) bool {
|
||||
return true
|
||||
func TestCreateStaticNodes(t *testing.T) {
|
||||
table := []struct {
|
||||
machines []string
|
||||
expectedNodes *api.NodeList
|
||||
}{
|
||||
{
|
||||
machines: []string{},
|
||||
expectedNodes: &api.NodeList{},
|
||||
},
|
||||
{
|
||||
machines: []string{"node0"},
|
||||
expectedNodes: &api.NodeList{
|
||||
Items: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node0"},
|
||||
Spec: api.NodeSpec{},
|
||||
Status: api.NodeStatus{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nodeController := NewNodeController(nil, ".*", []string{"10.0.0.1"}, &api.NodeResources{}, fakeNodeHandler)
|
||||
if err := nodeController.SyncStatic(time.Millisecond); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if fakeNodeHandler.CreatedNodes[0].Name != "10.0.0.1" {
|
||||
t.Errorf("unexpect node %v created", fakeNodeHandler.CreatedNodes[0].Name)
|
||||
}
|
||||
if fakeNodeHandler.CreatedNodes[0].Status.HostIP != "10.0.0.1" {
|
||||
t.Errorf("unexpect nil node HostIP for node %v", fakeNodeHandler.CreatedNodes[0].Name)
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, "", item.machines, &api.NodeResources{}, nil, nil)
|
||||
nodes, err := nodeController.StaticNodes()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(item.expectedNodes, nodes) {
|
||||
t.Errorf("expected node list %+v, got %+v", item.expectedNodes, nodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncStaticCreateNodeWithError(t *testing.T) {
|
||||
fakeNodeHandler := &FakeNodeHandler{
|
||||
CreateHook: func(fake *FakeNodeHandler, node *api.Node) bool {
|
||||
if fake.RequestCount == 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
func TestCreateCloudNodes(t *testing.T) {
|
||||
resourceList := api.ResourceList{
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(3000, resource.DecimalSI),
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
fakeCloud *fake_cloud.FakeCloud
|
||||
machines []string
|
||||
expectedNodes *api.NodeList
|
||||
}{
|
||||
{
|
||||
fakeCloud: &fake_cloud.FakeCloud{},
|
||||
expectedNodes: &api.NodeList{},
|
||||
},
|
||||
{
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0"},
|
||||
IP: net.ParseIP("1.2.3.4"),
|
||||
NodeResources: &api.NodeResources{Capacity: resourceList},
|
||||
},
|
||||
expectedNodes: &api.NodeList{
|
||||
Items: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node0"},
|
||||
Spec: api.NodeSpec{Capacity: resourceList},
|
||||
Status: api.NodeStatus{HostIP: "1.2.3.4"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
nodeController := NewNodeController(nil, ".*", []string{"node0"}, &api.NodeResources{}, fakeNodeHandler)
|
||||
if err := nodeController.SyncStatic(time.Millisecond); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if fakeNodeHandler.RequestCount != 2 {
|
||||
t.Errorf("Expected 2 call, but got %v.", fakeNodeHandler.RequestCount)
|
||||
}
|
||||
if len(fakeNodeHandler.CreatedNodes) != 1 {
|
||||
t.Errorf("expect only 1 node created, got %v", len(fakeNodeHandler.CreatedNodes))
|
||||
}
|
||||
if fakeNodeHandler.CreatedNodes[0].Name != "node0" {
|
||||
t.Errorf("unexpect node %v created", fakeNodeHandler.CreatedNodes[0].Name)
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(item.fakeCloud, ".*", nil, &api.NodeResources{}, nil, nil)
|
||||
nodes, err := nodeController.CloudNodes()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(item.expectedNodes, nodes) {
|
||||
t.Errorf("expected node list %+v, got %+v", item.expectedNodes, nodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncCloudCreateNode(t *testing.T) {
|
||||
fakeNodeHandler := &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0")},
|
||||
}
|
||||
instances := []string{"node0", "node1"}
|
||||
fakeCloud := fake_cloud.FakeCloud{
|
||||
Machines: instances,
|
||||
}
|
||||
nodeController := NewNodeController(&fakeCloud, ".*", nil, nil, fakeNodeHandler)
|
||||
if err := nodeController.SyncCloud(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
func TestSyncCloud(t *testing.T) {
|
||||
table := []struct {
|
||||
fakeNodeHandler *FakeNodeHandler
|
||||
fakeCloud *fake_cloud.FakeCloud
|
||||
matchRE string
|
||||
expectedRequestCount int
|
||||
expectedCreated []string
|
||||
expectedDeleted []string
|
||||
}{
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0")},
|
||||
},
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0", "node1"},
|
||||
},
|
||||
matchRE: ".*",
|
||||
expectedRequestCount: 2, // List + Create
|
||||
expectedCreated: []string{"node1"},
|
||||
expectedDeleted: []string{},
|
||||
},
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0"), newNode("node1")},
|
||||
},
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0"},
|
||||
},
|
||||
matchRE: ".*",
|
||||
expectedRequestCount: 2, // List + Delete
|
||||
expectedCreated: []string{},
|
||||
expectedDeleted: []string{"node1"},
|
||||
},
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0")},
|
||||
},
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0", "node1", "fake"},
|
||||
},
|
||||
matchRE: "node[0-9]+",
|
||||
expectedRequestCount: 2, // List + Create
|
||||
expectedCreated: []string{"node1"},
|
||||
expectedDeleted: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
if fakeNodeHandler.RequestCount != 2 {
|
||||
t.Errorf("Expected 2 call, but got %v.", fakeNodeHandler.RequestCount)
|
||||
}
|
||||
if len(fakeNodeHandler.CreatedNodes) != 1 {
|
||||
t.Errorf("expect only 1 node created, got %v", len(fakeNodeHandler.CreatedNodes))
|
||||
}
|
||||
if fakeNodeHandler.CreatedNodes[0].Name != "node1" {
|
||||
t.Errorf("unexpect node %v created", fakeNodeHandler.CreatedNodes[0].Name)
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, nil)
|
||||
if err := nodeController.SyncCloud(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if item.fakeNodeHandler.RequestCount != item.expectedRequestCount {
|
||||
t.Errorf("expected %v call, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount)
|
||||
}
|
||||
nodes := sortedNodeNames(item.fakeNodeHandler.CreatedNodes)
|
||||
if !reflect.DeepEqual(item.expectedCreated, nodes) {
|
||||
t.Errorf("expected node list %+v, got %+v", item.expectedCreated, nodes)
|
||||
}
|
||||
nodes = sortedNodeNames(item.fakeNodeHandler.DeletedNodes)
|
||||
if !reflect.DeepEqual(item.expectedDeleted, nodes) {
|
||||
t.Errorf("expected node list %+v, got %+v", item.expectedDeleted, nodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncCloudDeleteNode(t *testing.T) {
|
||||
fakeNodeHandler := &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0"), newNode("node1")},
|
||||
}
|
||||
instances := []string{"node0"}
|
||||
fakeCloud := fake_cloud.FakeCloud{
|
||||
Machines: instances,
|
||||
}
|
||||
nodeController := NewNodeController(&fakeCloud, ".*", nil, nil, fakeNodeHandler)
|
||||
if err := nodeController.SyncCloud(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
func TestHealthCheckNode(t *testing.T) {
|
||||
table := []struct {
|
||||
node *api.Node
|
||||
fakeKubeletClient *FakeKubeletClient
|
||||
expectedConditions []api.NodeCondition
|
||||
}{
|
||||
{
|
||||
node: newNode("node0"),
|
||||
fakeKubeletClient: &FakeKubeletClient{
|
||||
Status: probe.Success,
|
||||
Err: nil,
|
||||
},
|
||||
expectedConditions: []api.NodeCondition{
|
||||
{
|
||||
Kind: api.NodeReady,
|
||||
Status: api.ConditionFull,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
node: newNode("node0"),
|
||||
fakeKubeletClient: &FakeKubeletClient{
|
||||
Status: probe.Failure,
|
||||
Err: nil,
|
||||
},
|
||||
expectedConditions: []api.NodeCondition{
|
||||
{
|
||||
Kind: api.NodeReady,
|
||||
Status: api.ConditionNone,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
node: newNode("node1"),
|
||||
fakeKubeletClient: &FakeKubeletClient{
|
||||
Status: probe.Failure,
|
||||
Err: errors.New("Error"),
|
||||
},
|
||||
expectedConditions: []api.NodeCondition{
|
||||
{
|
||||
Kind: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if fakeNodeHandler.RequestCount != 2 {
|
||||
t.Errorf("Expected 2 call, but got %v.", fakeNodeHandler.RequestCount)
|
||||
}
|
||||
if len(fakeNodeHandler.DeletedNodes) != 1 {
|
||||
t.Errorf("expect only 1 node deleted, got %v", len(fakeNodeHandler.DeletedNodes))
|
||||
}
|
||||
if fakeNodeHandler.DeletedNodes[0].Name != "node1" {
|
||||
t.Errorf("unexpect node %v created", fakeNodeHandler.DeletedNodes[0].Name)
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, "", nil, nil, nil, item.fakeKubeletClient)
|
||||
conditions := nodeController.DoCheck(item.node)
|
||||
if !reflect.DeepEqual(item.expectedConditions, conditions) {
|
||||
t.Errorf("expected conditions %+v, got %+v", item.expectedConditions, conditions)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncCloudRegexp(t *testing.T) {
|
||||
fakeNodeHandler := &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0")},
|
||||
}
|
||||
instances := []string{"node0", "node1", "fake"}
|
||||
fakeCloud := fake_cloud.FakeCloud{
|
||||
Machines: instances,
|
||||
}
|
||||
nodeController := NewNodeController(&fakeCloud, "node[0-9]+", nil, nil, fakeNodeHandler)
|
||||
if err := nodeController.SyncCloud(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
func TestSyncNodeStatus(t *testing.T) {
|
||||
table := []struct {
|
||||
fakeNodeHandler *FakeNodeHandler
|
||||
fakeKubeletClient *FakeKubeletClient
|
||||
expectedNodes []*api.Node
|
||||
expectedRequestCount int
|
||||
}{
|
||||
{
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0"), newNode("node1")},
|
||||
},
|
||||
fakeKubeletClient: &FakeKubeletClient{
|
||||
Status: probe.Success,
|
||||
Err: nil,
|
||||
},
|
||||
expectedNodes: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node0"},
|
||||
Status: api.NodeStatus{Conditions: []api.NodeCondition{{Kind: api.NodeReady, Status: api.ConditionFull}}},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node1"},
|
||||
Status: api.NodeStatus{Conditions: []api.NodeCondition{{Kind: api.NodeReady, Status: api.ConditionFull}}},
|
||||
},
|
||||
},
|
||||
expectedRequestCount: 3, // List + 2xUpdate
|
||||
},
|
||||
}
|
||||
|
||||
if fakeNodeHandler.RequestCount != 2 {
|
||||
t.Errorf("Expected 2 call, but got %v.", fakeNodeHandler.RequestCount)
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, "", nil, nil, item.fakeNodeHandler, item.fakeKubeletClient)
|
||||
if err := nodeController.SyncNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if item.fakeNodeHandler.RequestCount != item.expectedRequestCount {
|
||||
t.Errorf("expected %v call, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount)
|
||||
}
|
||||
if !reflect.DeepEqual(item.expectedNodes, item.fakeNodeHandler.UpdatedNodes) {
|
||||
t.Errorf("expected nodes %+v, got %+v", item.expectedNodes, item.fakeNodeHandler.UpdatedNodes)
|
||||
}
|
||||
item.fakeNodeHandler.RequestCount = 0
|
||||
if err := nodeController.SyncNodeStatus(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if item.fakeNodeHandler.RequestCount != 1 {
|
||||
t.Errorf("expected one list for updating same status, but got %v.", item.fakeNodeHandler.RequestCount)
|
||||
}
|
||||
}
|
||||
if len(fakeNodeHandler.CreatedNodes) != 1 {
|
||||
t.Errorf("expect only 1 node created, got %v", len(fakeNodeHandler.CreatedNodes))
|
||||
}
|
||||
if fakeNodeHandler.CreatedNodes[0].Name != "node1" {
|
||||
t.Errorf("unexpect node %v created", fakeNodeHandler.CreatedNodes[0].Name)
|
||||
}
|
||||
|
||||
func newNode(name string) *api.Node {
|
||||
return &api.Node{ObjectMeta: api.ObjectMeta{Name: name}}
|
||||
}
|
||||
|
||||
func sortedNodeNames(nodes []*api.Node) []string {
|
||||
nodeNames := []string{}
|
||||
for _, node := range nodes {
|
||||
nodeNames = append(nodeNames, node.Name)
|
||||
}
|
||||
sort.Strings(nodeNames)
|
||||
return nodeNames
|
||||
}
|
||||
|
||||
func contains(node *api.Node, nodes []*api.Node) bool {
|
||||
|
Reference in New Issue
Block a user