mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 15:05:27 +00:00
Merge pull request #8164 from cjcullen/cloudprovider
Route creation reconciler loop.
This commit is contained in:
commit
677a4aa1a7
@ -782,8 +782,10 @@ function kube-down {
|
||||
|
||||
# Delete routes.
|
||||
local -a routes
|
||||
# Clean up all routes w/ names like "<cluster-name>-<node-GUID>"
|
||||
# e.g. "kubernetes-12345678-90ab-cdef-1234-567890abcdef"
|
||||
routes=( $(gcloud compute routes list --project "${PROJECT}" \
|
||||
--regexp "${NODE_INSTANCE_PREFIX}-.+" | awk 'NR >= 2 { print $1 }') )
|
||||
--regexp "${INSTANCE_PREFIX}-.{8}-.{4}-.{4}-.{4}-.{12}" | awk 'NR >= 2 { print $1 }') )
|
||||
routes+=("${MASTER_NAME}")
|
||||
while (( "${#routes[@]}" > 0 )); do
|
||||
echo Deleting routes "${routes[*]::10}"
|
||||
|
@ -22,6 +22,19 @@
|
||||
{% set api_servers_with_port = api_servers + ":6443" -%}
|
||||
{% endif -%}
|
||||
|
||||
# Disable registration for the kubelet running on the master on GCE.
|
||||
# TODO(roberthbailey): Make this configurable via an env var in config-default.sh
|
||||
{% if grains.cloud == 'gce' -%}
|
||||
{% if grains['roles'][0] == 'kubernetes-master' -%}
|
||||
{% set api_servers_with_port = "" -%}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set cloud_provider = "" -%}
|
||||
{% if grains.cloud is defined -%}
|
||||
{% set cloud_provider = "--cloud_provider=" + grains.cloud -%}
|
||||
{% endif -%}
|
||||
|
||||
{% set config = "--config=/etc/kubernetes/manifests" -%}
|
||||
{% set hostname_override = "" -%}
|
||||
{% if grains.hostname_override is defined -%}
|
||||
@ -45,4 +58,4 @@
|
||||
{% set configure_cbr0 = "--configure-cbr0=" + pillar['allocate_node_cidrs'] -%}
|
||||
{% endif -%}
|
||||
|
||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{hostname_override}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{configure_cbr0}}"
|
||||
DAEMON_ARGS="{{daemon_args}} {{api_servers_with_port}} {{hostname_override}} {{cloud_provider}} {{config}} --allow_privileged={{pillar['allow_privileged']}} {{pillar['log_level']}} {{cluster_dns}} {{cluster_domain}} {{docker_root}} {{configure_cbr0}}"
|
||||
|
@ -101,7 +101,6 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
|
||||
// Setup
|
||||
servers := []string{}
|
||||
glog.Infof("Creating etcd client pointing to %v", servers)
|
||||
machineList := []string{"localhost", "127.0.0.1"}
|
||||
|
||||
handler := delegateHandler{}
|
||||
apiServer := httptest.NewServer(&handler)
|
||||
@ -196,7 +195,7 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||
}}
|
||||
|
||||
nodeController := nodecontroller.NewNodeController(nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewFakeRateLimiter(),
|
||||
nodeController := nodecontroller.NewNodeController(nil, "", nodeResources, cl, 10, 5*time.Minute, util.NewFakeRateLimiter(),
|
||||
40*time.Second, 60*time.Second, 5*time.Second, nil, false)
|
||||
nodeController.Run(5*time.Second, true)
|
||||
cadvisorInterface := new(cadvisor.Fake)
|
||||
@ -206,7 +205,7 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
|
||||
configFilePath := makeTempDirOrDie("config", testRootDir)
|
||||
glog.Infof("Using %s as root dir for kubelet #1", testRootDir)
|
||||
fakeDocker1.VersionInfo = docker.Env{"ApiVersion=1.15"}
|
||||
kcfg := kubeletapp.SimpleKubelet(cl, &fakeDocker1, machineList[0], testRootDir, firstManifestURL, "127.0.0.1", 10250, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, configFilePath, nil, kubecontainer.FakeOS{})
|
||||
kcfg := kubeletapp.SimpleKubelet(cl, &fakeDocker1, "localhost", testRootDir, firstManifestURL, "127.0.0.1", 10250, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, configFilePath, nil, kubecontainer.FakeOS{})
|
||||
kubeletapp.RunKubelet(kcfg, nil)
|
||||
// Kubelet (machine)
|
||||
// Create a second kubelet so that the guestbook example's two redis slaves both
|
||||
@ -214,7 +213,7 @@ func startComponents(firstManifestURL, secondManifestURL, apiVersion string) (st
|
||||
testRootDir = makeTempDirOrDie("kubelet_integ_2.", "")
|
||||
glog.Infof("Using %s as root dir for kubelet #2", testRootDir)
|
||||
fakeDocker2.VersionInfo = docker.Env{"ApiVersion=1.15"}
|
||||
kcfg = kubeletapp.SimpleKubelet(cl, &fakeDocker2, machineList[1], testRootDir, secondManifestURL, "127.0.0.1", 10251, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, "", nil, kubecontainer.FakeOS{})
|
||||
kcfg = kubeletapp.SimpleKubelet(cl, &fakeDocker2, "127.0.0.1", testRootDir, secondManifestURL, "127.0.0.1", 10251, api.NamespaceDefault, empty_dir.ProbeVolumePlugins(), nil, cadvisorInterface, "", nil, kubecontainer.FakeOS{})
|
||||
kubeletapp.RunKubelet(kcfg, nil)
|
||||
return apiServer.URL, configFilePath
|
||||
}
|
||||
|
@ -33,6 +33,7 @@ import (
|
||||
clientcmdapi "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/nodecontroller"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/routecontroller"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/servicecontroller"
|
||||
replicationControllerPkg "github.com/GoogleCloudPlatform/kubernetes/pkg/controller"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/healthz"
|
||||
@ -228,7 +229,7 @@ func (s *CMServer) Run(_ []string) error {
|
||||
glog.Warning("DEPRECATION NOTICE: sync-node-status flag is being deprecated. It has no effect now and it will be removed in a future version.")
|
||||
}
|
||||
|
||||
nodeController := nodecontroller.NewNodeController(cloud, s.MinionRegexp, s.MachineList, nodeResources,
|
||||
nodeController := nodecontroller.NewNodeController(cloud, s.MinionRegexp, nodeResources,
|
||||
kubeClient, s.RegisterRetryCount, s.PodEvictionTimeout, util.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
|
||||
s.NodeMonitorGracePeriod, s.NodeStartupGracePeriod, s.NodeMonitorPeriod, (*net.IPNet)(&s.ClusterCIDR), s.AllocateNodeCIDRs)
|
||||
nodeController.Run(s.NodeSyncPeriod, s.SyncNodeList)
|
||||
@ -238,6 +239,15 @@ func (s *CMServer) Run(_ []string) error {
|
||||
glog.Errorf("Failed to start service controller: %v", err)
|
||||
}
|
||||
|
||||
if s.AllocateNodeCIDRs {
|
||||
routes, ok := cloud.Routes()
|
||||
if !ok {
|
||||
glog.Fatal("Cloud provider must support routes if allocate-node-cidrs is set")
|
||||
}
|
||||
routeController := routecontroller.New(routes, kubeClient, s.ClusterName, (*net.IPNet)(&s.ClusterCIDR))
|
||||
routeController.Run(s.NodeSyncPeriod)
|
||||
}
|
||||
|
||||
resourceQuotaManager := resourcequota.NewResourceQuotaManager(kubeClient)
|
||||
resourceQuotaManager.Run(s.ResourceQuotaSyncPeriod)
|
||||
|
||||
|
@ -89,6 +89,7 @@ type KubeletServer struct {
|
||||
HealthzBindAddress util.IP
|
||||
OOMScoreAdj int
|
||||
APIServerList util.StringList
|
||||
RegisterNode bool
|
||||
ClusterDomain string
|
||||
MasterServiceNamespace string
|
||||
ClusterDNS util.IP
|
||||
@ -155,6 +156,7 @@ func NewKubeletServer() *KubeletServer {
|
||||
CadvisorPort: 4194,
|
||||
HealthzPort: 10248,
|
||||
HealthzBindAddress: util.IP(net.ParseIP("127.0.0.1")),
|
||||
RegisterNode: true, // will be ignored if no apiserver is configured
|
||||
OOMScoreAdj: -900,
|
||||
MasterServiceNamespace: api.NamespaceDefault,
|
||||
ImageGCHighThresholdPercent: 90,
|
||||
@ -211,6 +213,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.Var(&s.HealthzBindAddress, "healthz-bind-address", "The IP address for the healthz server to serve on, defaulting to 127.0.0.1 (set to 0.0.0.0 for all interfaces)")
|
||||
fs.IntVar(&s.OOMScoreAdj, "oom-score-adj", s.OOMScoreAdj, "The oom_score_adj value for kubelet process. Values must be within the range [-1000, 1000]")
|
||||
fs.Var(&s.APIServerList, "api-servers", "List of Kubernetes API servers for publishing events, and reading pods and services. (ip:port), comma separated.")
|
||||
fs.BoolVar(&s.RegisterNode, "register-node", s.RegisterNode, "Register the node with the apiserver (defaults to true if --api-server is set)")
|
||||
fs.StringVar(&s.ClusterDomain, "cluster-domain", s.ClusterDomain, "Domain for this cluster. If set, kubelet will configure all containers to search this domain in addition to the host's search domains")
|
||||
fs.StringVar(&s.MasterServiceNamespace, "master-service-namespace", s.MasterServiceNamespace, "The namespace from which the kubernetes master services should be injected into pods")
|
||||
fs.Var(&s.ClusterDNS, "cluster-dns", "IP address for a cluster DNS server. If set, kubelet will configure all containers to use this for DNS resolution in addition to the host's DNS servers")
|
||||
@ -318,6 +321,7 @@ func (s *KubeletServer) Run(_ []string) error {
|
||||
MinimumGCAge: s.MinimumGCAge,
|
||||
MaxPerPodContainerCount: s.MaxPerPodContainerCount,
|
||||
MaxContainerCount: s.MaxContainerCount,
|
||||
RegisterNode: s.RegisterNode,
|
||||
ClusterDomain: s.ClusterDomain,
|
||||
ClusterDNS: s.ClusterDNS,
|
||||
Runonce: s.RunOnce,
|
||||
@ -493,6 +497,7 @@ func SimpleKubelet(client *client.Client,
|
||||
MinimumGCAge: 10 * time.Second,
|
||||
MaxPerPodContainerCount: 2,
|
||||
MaxContainerCount: 100,
|
||||
RegisterNode: true,
|
||||
MasterServiceNamespace: masterServiceNamespace,
|
||||
VolumePlugins: volumePlugins,
|
||||
TLSOptions: tlsOptions,
|
||||
@ -618,6 +623,7 @@ type KubeletConfig struct {
|
||||
MinimumGCAge time.Duration
|
||||
MaxPerPodContainerCount int
|
||||
MaxContainerCount int
|
||||
RegisterNode bool
|
||||
ClusterDomain string
|
||||
ClusterDNS util.IP
|
||||
EnableServer bool
|
||||
@ -675,6 +681,7 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
|
||||
kc.RegistryBurst,
|
||||
gcPolicy,
|
||||
pc.SeenAllSources,
|
||||
kc.RegisterNode,
|
||||
kc.ClusterDomain,
|
||||
net.IP(kc.ClusterDNS),
|
||||
kc.MasterServiceNamespace,
|
||||
|
@ -123,7 +123,7 @@ func runScheduler(cl *client.Client) {
|
||||
}
|
||||
|
||||
// RunControllerManager starts a controller
|
||||
func runControllerManager(machineList []string, cl *client.Client, nodeMilliCPU, nodeMemory int64) {
|
||||
func runControllerManager(cl *client.Client, nodeMilliCPU, nodeMemory int64) {
|
||||
nodeResources := &api.NodeResources{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(nodeMilliCPU, resource.DecimalSI),
|
||||
@ -133,7 +133,7 @@ func runControllerManager(machineList []string, cl *client.Client, nodeMilliCPU,
|
||||
|
||||
const nodeSyncPeriod = 10 * time.Second
|
||||
nodeController := nodecontroller.NewNodeController(
|
||||
nil, "", machineList, nodeResources, cl, 10, 5*time.Minute, util.NewTokenBucketRateLimiter(*deletingPodsQps, *deletingPodsBurst),
|
||||
nil, "", nodeResources, cl, 10, 5*time.Minute, util.NewTokenBucketRateLimiter(*deletingPodsQps, *deletingPodsBurst),
|
||||
40*time.Second, 60*time.Second, 5*time.Second, nil, false)
|
||||
nodeController.Run(nodeSyncPeriod, true)
|
||||
|
||||
@ -150,18 +150,16 @@ func runControllerManager(machineList []string, cl *client.Client, nodeMilliCPU,
|
||||
}
|
||||
|
||||
func startComponents(etcdClient tools.EtcdClient, cl *client.Client, addr net.IP, port int) {
|
||||
machineList := []string{"localhost"}
|
||||
|
||||
runApiServer(etcdClient, addr, port, *masterServiceNamespace)
|
||||
runScheduler(cl)
|
||||
runControllerManager(machineList, cl, *nodeMilliCPU, *nodeMemory)
|
||||
runControllerManager(cl, *nodeMilliCPU, *nodeMemory)
|
||||
|
||||
dockerClient := dockertools.ConnectToDockerOrDie(*dockerEndpoint)
|
||||
cadvisorInterface, err := cadvisor.New(0)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to create cAdvisor: %v", err)
|
||||
}
|
||||
kcfg := kubeletapp.SimpleKubelet(cl, dockerClient, machineList[0], "/tmp/kubernetes", "", "127.0.0.1", 10250, *masterServiceNamespace, kubeletapp.ProbeVolumePlugins(), nil, cadvisorInterface, "", nil, kubecontainer.RealOS{})
|
||||
kcfg := kubeletapp.SimpleKubelet(cl, dockerClient, "localhost", "/tmp/kubernetes", "", "127.0.0.1", 10250, *masterServiceNamespace, kubeletapp.ProbeVolumePlugins(), nil, cadvisorInterface, "", nil, kubecontainer.RealOS{})
|
||||
kubeletapp.RunKubelet(kcfg, nil)
|
||||
|
||||
}
|
||||
|
@ -1241,6 +1241,8 @@ func ValidateNodeUpdate(oldNode *api.Node, node *api.Node) errs.ValidationErrorL
|
||||
oldNode.ObjectMeta = node.ObjectMeta
|
||||
// Allow users to update capacity
|
||||
oldNode.Status.Capacity = node.Status.Capacity
|
||||
// Allow the controller manager to assign a CIDR to a node.
|
||||
oldNode.Spec.PodCIDR = node.Spec.PodCIDR
|
||||
// Allow users to unschedule node
|
||||
oldNode.Spec.Unschedulable = node.Spec.Unschedulable
|
||||
// Clear status
|
||||
|
@ -273,6 +273,11 @@ func (aws *AWSCloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return aws, true
|
||||
}
|
||||
|
||||
// Routes returns an implementation of Routes for Amazon Web Services.
|
||||
func (aws *AWSCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// NodeAddresses is an implementation of Instances.NodeAddresses.
|
||||
func (aws *AWSCloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
instance, err := aws.getInstancesByDnsName(name)
|
||||
@ -973,11 +978,3 @@ func (aws *AWSCloud) DeleteVolume(volumeName string) error {
|
||||
}
|
||||
return awsDisk.delete()
|
||||
}
|
||||
|
||||
func (v *AWSCloud) Configure(name string, spec *api.NodeSpec) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *AWSCloud) Release(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cloudprovider
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
@ -33,6 +34,8 @@ type Interface interface {
|
||||
Zones() (Zones, bool)
|
||||
// Clusters returns a clusters interface. Also returns true if the interface is supported, false otherwise.
|
||||
Clusters() (Clusters, bool)
|
||||
// Routes returns a routes interface along with whether the interface is supported.
|
||||
Routes() (Routes, bool)
|
||||
}
|
||||
|
||||
// Clusters is an abstract, pluggable interface for clusters of containers.
|
||||
@ -85,12 +88,34 @@ type Instances interface {
|
||||
List(filter string) ([]string, error)
|
||||
// GetNodeResources gets the resources for a particular node
|
||||
GetNodeResources(name string) (*api.NodeResources, error)
|
||||
// Configure the specified instance using the spec
|
||||
Configure(name string, spec *api.NodeSpec) error
|
||||
// Delete all the configuration related to the instance, including other cloud resources
|
||||
Release(name string) error
|
||||
}
|
||||
|
||||
// Route is a representation of an advanced routing rule.
|
||||
type Route struct {
|
||||
// Name is the name of the routing rule in the cloud-provider.
|
||||
Name string
|
||||
// TargetInstance is the name of the instance as specified in routing rules
|
||||
// for the cloud-provider (in gce: the Instance Name).
|
||||
TargetInstance string
|
||||
// Destination CIDR is the CIDR format IP range that this routing rule
|
||||
// applies to.
|
||||
DestinationCIDR string
|
||||
// Description is a free-form string. It can be useful for tagging Routes.
|
||||
Description string
|
||||
}
|
||||
|
||||
// Routes is an abstract, pluggable interface for advanced routing rules.
|
||||
type Routes interface {
|
||||
// List all routes that match the filter
|
||||
ListRoutes(filter string) ([]*Route, error)
|
||||
// Create the described route
|
||||
CreateRoute(route *Route) error
|
||||
// Delete the specified route
|
||||
DeleteRoute(name string) error
|
||||
}
|
||||
|
||||
var InstanceNotFound = errors.New("instance not found")
|
||||
|
||||
// Zone represents the location of a particular machine.
|
||||
type Zone struct {
|
||||
FailureDomain string
|
||||
|
@ -17,8 +17,10 @@ limitations under the License.
|
||||
package fake_cloud
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
@ -39,7 +41,7 @@ type FakeUpdateBalancerCall struct {
|
||||
Hosts []string
|
||||
}
|
||||
|
||||
// FakeCloud is a test-double implementation of Interface, TCPLoadBalancer and Instances. It is useful for testing.
|
||||
// FakeCloud is a test-double implementation of Interface, TCPLoadBalancer, Instances, and Routes. It is useful for testing.
|
||||
type FakeCloud struct {
|
||||
Exists bool
|
||||
Err error
|
||||
@ -53,6 +55,8 @@ type FakeCloud struct {
|
||||
ExternalIP net.IP
|
||||
Balancers []FakeBalancer
|
||||
UpdateCalls []FakeUpdateBalancerCall
|
||||
RouteMap map[string]*cloudprovider.Route
|
||||
Lock sync.Mutex
|
||||
cloudprovider.Zone
|
||||
}
|
||||
|
||||
@ -94,6 +98,10 @@ func (f *FakeCloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return f, true
|
||||
}
|
||||
|
||||
func (f *FakeCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return f, true
|
||||
}
|
||||
|
||||
// GetTCPLoadBalancer is a stub implementation of TCPLoadBalancer.GetTCPLoadBalancer.
|
||||
func (f *FakeCloud) GetTCPLoadBalancer(name, region string) (endpoint string, exists bool, err error) {
|
||||
return f.ExternalIP.String(), f.Exists, f.Err
|
||||
@ -160,12 +168,39 @@ func (f *FakeCloud) GetNodeResources(name string) (*api.NodeResources, error) {
|
||||
return f.NodeResources, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) Configure(name string, spec *api.NodeSpec) error {
|
||||
f.addCall("configure")
|
||||
return f.Err
|
||||
func (f *FakeCloud) ListRoutes(filter string) ([]*cloudprovider.Route, error) {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.addCall("list-routes")
|
||||
var routes []*cloudprovider.Route
|
||||
for _, route := range f.RouteMap {
|
||||
if match, _ := regexp.MatchString(filter, route.Name); match {
|
||||
routes = append(routes, route)
|
||||
}
|
||||
}
|
||||
return routes, f.Err
|
||||
}
|
||||
|
||||
func (f *FakeCloud) Release(name string) error {
|
||||
f.addCall("release")
|
||||
return f.Err
|
||||
func (f *FakeCloud) CreateRoute(route *cloudprovider.Route) error {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.addCall("create-route")
|
||||
if _, exists := f.RouteMap[route.Name]; exists {
|
||||
f.Err = fmt.Errorf("route with name %q already exists")
|
||||
return f.Err
|
||||
}
|
||||
f.RouteMap[route.Name] = route
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeCloud) DeleteRoute(name string) error {
|
||||
f.Lock.Lock()
|
||||
defer f.Lock.Unlock()
|
||||
f.addCall("delete-route")
|
||||
if _, exists := f.RouteMap[name]; !exists {
|
||||
f.Err = fmt.Errorf("no route found with name %q", name)
|
||||
return f.Err
|
||||
}
|
||||
delete(f.RouteMap, name)
|
||||
return nil
|
||||
}
|
||||
|
@ -195,6 +195,11 @@ func (gce *GCECloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return gce, true
|
||||
}
|
||||
|
||||
// Routes returns an implementation of Routes for Google Compute Engine.
|
||||
func (gce *GCECloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return gce, true
|
||||
}
|
||||
|
||||
func makeHostLink(projectID, zone, host string) string {
|
||||
host = canonicalizeInstanceName(host)
|
||||
return fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/%s/zones/%s/instances/%s",
|
||||
@ -445,7 +450,10 @@ func (gce *GCECloud) getInstanceByName(name string) (*compute.Instance, error) {
|
||||
name = canonicalizeInstanceName(name)
|
||||
res, err := gce.service.Instances.Get(gce.projectID, gce.zone, name).Do()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to retrieve TargetInstance resource for instance:%s", name)
|
||||
glog.Errorf("Failed to retrieve TargetInstance resource for instance: %s", name)
|
||||
if apiErr, ok := err.(*googleapi.Error); ok && apiErr.Code == http.StatusNotFound {
|
||||
return nil, cloudprovider.InstanceNotFound
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return res, nil
|
||||
@ -556,31 +564,45 @@ func getMetadataValue(metadata *compute.Metadata, key string) (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (gce *GCECloud) Configure(name string, spec *api.NodeSpec) error {
|
||||
instanceName := canonicalizeInstanceName(name)
|
||||
func (gce *GCECloud) ListRoutes(filter string) ([]*cloudprovider.Route, error) {
|
||||
listCall := gce.service.Routes.List(gce.projectID)
|
||||
if len(filter) > 0 {
|
||||
listCall = listCall.Filter("name eq " + filter)
|
||||
}
|
||||
res, err := listCall.Do()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var routes []*cloudprovider.Route
|
||||
for _, r := range res.Items {
|
||||
if path.Base(r.Network) != gce.networkName {
|
||||
continue
|
||||
}
|
||||
target := path.Base(r.NextHopInstance)
|
||||
routes = append(routes, &cloudprovider.Route{r.Name, target, r.DestRange, r.Description})
|
||||
}
|
||||
return routes, nil
|
||||
}
|
||||
|
||||
func (gce *GCECloud) CreateRoute(route *cloudprovider.Route) error {
|
||||
instanceName := canonicalizeInstanceName(route.TargetInstance)
|
||||
insertOp, err := gce.service.Routes.Insert(gce.projectID, &compute.Route{
|
||||
Name: instanceName,
|
||||
DestRange: spec.PodCIDR,
|
||||
Name: route.Name,
|
||||
DestRange: route.DestinationCIDR,
|
||||
NextHopInstance: fmt.Sprintf("zones/%s/instances/%s", gce.zone, instanceName),
|
||||
Network: fmt.Sprintf("global/networks/%s", gce.networkName),
|
||||
Priority: 1000,
|
||||
Description: route.Description,
|
||||
}).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := gce.waitForGlobalOp(insertOp); err != nil {
|
||||
if gapiErr, ok := err.(*googleapi.Error); ok && gapiErr.Code == http.StatusConflict {
|
||||
// TODO (cjcullen): Make this actually check the route is correct.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
return gce.waitForGlobalOp(insertOp)
|
||||
}
|
||||
|
||||
func (gce *GCECloud) Release(name string) error {
|
||||
func (gce *GCECloud) DeleteRoute(name string) error {
|
||||
instanceName := canonicalizeInstanceName(name)
|
||||
deleteCall := gce.service.Routes.Delete(gce.projectID, instanceName)
|
||||
deleteOp, err := deleteCall.Do()
|
||||
deleteOp, err := gce.service.Routes.Delete(gce.projectID, instanceName).Do()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -105,6 +105,11 @@ func (c *MesosCloud) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return c, true
|
||||
}
|
||||
|
||||
// Routes always returns nil, false in this implementation.
|
||||
func (c *MesosCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// ListClusters lists the names of the available Mesos clusters.
|
||||
func (c *MesosCloud) ListClusters() ([]string, error) {
|
||||
// Always returns a single cluster (this one!)
|
||||
@ -224,15 +229,3 @@ func (c *MesosCloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
}
|
||||
return []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: ip.String()}}, nil
|
||||
}
|
||||
|
||||
// Configure the specified instance using the spec.
|
||||
// Ths implementation is a noop.
|
||||
func (c *MesosCloud) Configure(name string, spec *api.NodeSpec) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Release deletes all the configuration related to the instance, including other cloud resources.
|
||||
// Ths implementation is a noop.
|
||||
func (c *MesosCloud) Release(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
@ -20,14 +20,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
apierrors "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
@ -57,7 +52,6 @@ type NodeController struct {
|
||||
cloud cloudprovider.Interface
|
||||
matchRE string
|
||||
staticResources *api.NodeResources
|
||||
nodes []string
|
||||
kubeClient client.Interface
|
||||
recorder record.EventRecorder
|
||||
registerRetryCount int
|
||||
@ -100,7 +94,6 @@ type NodeController struct {
|
||||
func NewNodeController(
|
||||
cloud cloudprovider.Interface,
|
||||
matchRE string,
|
||||
nodes []string,
|
||||
staticResources *api.NodeResources,
|
||||
kubeClient client.Interface,
|
||||
registerRetryCount int,
|
||||
@ -125,7 +118,6 @@ func NewNodeController(
|
||||
return &NodeController{
|
||||
cloud: cloud,
|
||||
matchRE: matchRE,
|
||||
nodes: nodes,
|
||||
staticResources: staticResources,
|
||||
kubeClient: kubeClient,
|
||||
recorder: recorder,
|
||||
@ -144,9 +136,9 @@ func NewNodeController(
|
||||
}
|
||||
|
||||
// Generates num pod CIDRs that could be assigned to nodes.
|
||||
func (nc *NodeController) generateCIDRs(num int) util.StringSet {
|
||||
func generateCIDRs(clusterCIDR *net.IPNet, num int) util.StringSet {
|
||||
res := util.NewStringSet()
|
||||
cidrIP := nc.clusterCIDR.IP.To4()
|
||||
cidrIP := clusterCIDR.IP.To4()
|
||||
for i := 0; i < num; i++ {
|
||||
// TODO: Make the CIDRs configurable.
|
||||
b1 := byte(i >> 8)
|
||||
@ -156,98 +148,38 @@ func (nc *NodeController) generateCIDRs(num int) util.StringSet {
|
||||
return res
|
||||
}
|
||||
|
||||
// For each node from newNodes, finds its current spec in registeredNodes.
|
||||
// If it is not there, it gets a new valid CIDR assigned.
|
||||
func (nc *NodeController) reconcilePodCIDRs(newNodes, registeredNodes *api.NodeList) *api.NodeList {
|
||||
registeredCIDRs := make(map[string]string)
|
||||
availableCIDRs := nc.generateCIDRs(len(newNodes.Items) + len(registeredNodes.Items))
|
||||
for _, node := range registeredNodes.Items {
|
||||
registeredCIDRs[node.Name] = node.Spec.PodCIDR
|
||||
availableCIDRs.Delete(node.Spec.PodCIDR)
|
||||
}
|
||||
for i, node := range newNodes.Items {
|
||||
podCIDR, registered := registeredCIDRs[node.Name]
|
||||
if !registered {
|
||||
podCIDR, _ = availableCIDRs.PopAny()
|
||||
// reconcileNodeCIDRs looks at each node and assigns it a valid CIDR
|
||||
// if it doesn't currently have one.
|
||||
func (nc *NodeController) reconcileNodeCIDRs(nodes *api.NodeList) {
|
||||
glog.V(4).Infof("Reconciling cidrs for %d nodes", len(nodes.Items))
|
||||
// TODO(roberthbailey): This seems inefficient. Why re-calculate CIDRs
|
||||
// on each sync period?
|
||||
availableCIDRs := generateCIDRs(nc.clusterCIDR, len(nodes.Items))
|
||||
for _, node := range nodes.Items {
|
||||
if node.Spec.PodCIDR != "" {
|
||||
glog.V(4).Infof("CIDR %s is already being used by node %s", node.Spec.PodCIDR, node.Name)
|
||||
availableCIDRs.Delete(node.Spec.PodCIDR)
|
||||
}
|
||||
newNodes.Items[i].Spec.PodCIDR = podCIDR
|
||||
}
|
||||
return newNodes
|
||||
}
|
||||
|
||||
func (nc *NodeController) configureNodeCIDR(node *api.Node) {
|
||||
instances, ok := nc.cloud.Instances()
|
||||
if !ok {
|
||||
glog.Errorf("Error configuring node %s: CloudProvider does not support Instances()", node.Name)
|
||||
return
|
||||
}
|
||||
err := instances.Configure(node.Name, &node.Spec)
|
||||
if err != nil {
|
||||
glog.Errorf("Error configuring node %s: %s", node.Name, err)
|
||||
// The newly assigned CIDR was not properly configured, so don't save it in the API server.
|
||||
node.Spec.PodCIDR = ""
|
||||
for _, node := range nodes.Items {
|
||||
if node.Spec.PodCIDR == "" {
|
||||
podCIDR, found := availableCIDRs.PopAny()
|
||||
if !found {
|
||||
glog.Errorf("No available CIDR for node %s", node.Name)
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Assigning node %s CIDR %s", node.Name, podCIDR)
|
||||
node.Spec.PodCIDR = podCIDR
|
||||
if _, err := nc.kubeClient.Nodes().Update(&node); err != nil {
|
||||
glog.Errorf("Unable to assign node %s CIDR %s: %v", node.Name, podCIDR, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (nc *NodeController) unassignNodeCIDR(nodeName string) {
|
||||
instances, ok := nc.cloud.Instances()
|
||||
if !ok {
|
||||
glog.Errorf("Error deconfiguring node %s: CloudProvider does not support Instances()", nodeName)
|
||||
return
|
||||
}
|
||||
err := instances.Release(nodeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Error deconfiguring node %s: %s", nodeName, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Run creates initial node list and start syncing instances from cloudprovider, if any.
|
||||
// It also starts syncing or monitoring cluster node status.
|
||||
// 1. registerNodes() is called only once to register all initial nodes (from cloudprovider
|
||||
// or from command line flag). To make cluster bootstrap faster, node controller populates
|
||||
// node addresses.
|
||||
// 2. syncCloudNodes() is called periodically (if enabled) to sync instances from cloudprovider.
|
||||
// Node created here will only have specs.
|
||||
// 3. monitorNodeStatus() is called periodically to incorporate the results of node status
|
||||
// pushed from kubelet to master.
|
||||
// Run starts an asynchronous loop that monitors the status of cluster nodes.
|
||||
func (nc *NodeController) Run(period time.Duration, syncNodeList bool) {
|
||||
// Register intial set of nodes with their status set.
|
||||
var nodes *api.NodeList
|
||||
var err error
|
||||
if nc.isRunningCloudProvider() {
|
||||
if syncNodeList {
|
||||
if nodes, err = nc.getCloudNodesWithSpec(); err != nil {
|
||||
glog.Errorf("Error loading initial node from cloudprovider: %v", err)
|
||||
}
|
||||
} else {
|
||||
nodes = &api.NodeList{}
|
||||
}
|
||||
} else {
|
||||
if nodes, err = nc.getStaticNodesWithSpec(); err != nil {
|
||||
glog.Errorf("Error loading initial static nodes: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if nodes, err = nc.populateAddresses(nodes); err != nil {
|
||||
glog.Errorf("Error getting nodes ips: %v", err)
|
||||
}
|
||||
if nc.isRunningCloudProvider() && nc.allocateNodeCIDRs {
|
||||
nc.reconcilePodCIDRs(nodes, &api.NodeList{})
|
||||
}
|
||||
if err := nc.registerNodes(nodes, nc.registerRetryCount, period); err != nil {
|
||||
glog.Errorf("Error registering node list %+v: %v", nodes, err)
|
||||
}
|
||||
|
||||
// Start syncing node list from cloudprovider.
|
||||
if syncNodeList && nc.isRunningCloudProvider() {
|
||||
go util.Forever(func() {
|
||||
if err := nc.syncCloudNodes(); err != nil {
|
||||
glog.Errorf("Error syncing cloud: %v", err)
|
||||
}
|
||||
}, period)
|
||||
}
|
||||
|
||||
// Start monitoring node status.
|
||||
// Incorporate the results of node status pushed from kubelet to master.
|
||||
go util.Forever(func() {
|
||||
if err := nc.monitorNodeStatus(); err != nil {
|
||||
glog.Errorf("Error monitoring node status: %v", err)
|
||||
@ -255,165 +187,6 @@ func (nc *NodeController) Run(period time.Duration, syncNodeList bool) {
|
||||
}, nc.nodeMonitorPeriod)
|
||||
}
|
||||
|
||||
// registerNodes registers the given list of nodes, it keeps retrying for `retryCount` times.
|
||||
func (nc *NodeController) registerNodes(nodes *api.NodeList, retryCount int, retryInterval time.Duration) error {
|
||||
if len(nodes.Items) == 0 {
|
||||
return nil
|
||||
}
|
||||
nodes = nc.canonicalizeName(nodes)
|
||||
toRegister := util.NewStringSet()
|
||||
var wg sync.WaitGroup
|
||||
var successfullyRegistered int32 = 0
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
if !toRegister.Has(node.Name) {
|
||||
wg.Add(1)
|
||||
toRegister.Insert(node.Name)
|
||||
go func(n *api.Node) {
|
||||
defer wg.Done()
|
||||
for i := 0; i < retryCount; i++ {
|
||||
if nc.isRunningCloudProvider() && nc.allocateNodeCIDRs {
|
||||
nc.configureNodeCIDR(n)
|
||||
}
|
||||
_, err := nc.kubeClient.Nodes().Create(n)
|
||||
if err == nil || apierrors.IsAlreadyExists(err) {
|
||||
glog.Infof("Registered node in registry: %v", n.Name)
|
||||
atomic.AddInt32(&successfullyRegistered, 1)
|
||||
return
|
||||
} else {
|
||||
glog.Errorf("Error registering node %v (retries left: %v): %v", n.Name, retryCount-i-1, err)
|
||||
}
|
||||
time.Sleep(retryInterval)
|
||||
}
|
||||
glog.Errorf("Unable to register node %v", n.Name)
|
||||
}(node)
|
||||
}
|
||||
}
|
||||
wg.Wait()
|
||||
if int32(toRegister.Len()) != atomic.LoadInt32(&successfullyRegistered) {
|
||||
return ErrRegistration
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// syncCloudNodes synchronizes the list of instances from cloudprovider to master server.
|
||||
func (nc *NodeController) syncCloudNodes() error {
|
||||
matches, err := nc.getCloudNodesWithSpec()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodes, err := nc.kubeClient.Nodes().List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
nodeMap := make(map[string]*api.Node)
|
||||
nodeMapLock := sync.Mutex{}
|
||||
for i := range nodes.Items {
|
||||
node := nodes.Items[i]
|
||||
nodeMapLock.Lock()
|
||||
nodeMap[node.Name] = &node
|
||||
nodeMapLock.Unlock()
|
||||
}
|
||||
if nc.allocateNodeCIDRs {
|
||||
nc.reconcilePodCIDRs(matches, nodes)
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(matches.Items))
|
||||
// Create nodes which have been created in cloud, but not in kubernetes cluster
|
||||
// Skip nodes if we hit an error while trying to get their addresses.
|
||||
for i := range matches.Items {
|
||||
go func(node *api.Node) {
|
||||
defer wg.Done()
|
||||
nodeMapLock.Lock()
|
||||
_, ok := nodeMap[node.Name]
|
||||
nodeMapLock.Unlock()
|
||||
if !ok {
|
||||
glog.V(3).Infof("Querying addresses for new node: %s", node.Name)
|
||||
nodeList := &api.NodeList{}
|
||||
nodeList.Items = []api.Node{*node}
|
||||
_, err = nc.populateAddresses(nodeList)
|
||||
if err != nil {
|
||||
glog.Errorf("Error fetching addresses for new node %s: %v", node.Name, err)
|
||||
return
|
||||
}
|
||||
node.Status.Addresses = nodeList.Items[0].Status.Addresses
|
||||
if nc.allocateNodeCIDRs {
|
||||
nc.configureNodeCIDR(node)
|
||||
}
|
||||
glog.Infof("Create node in registry: %s", node.Name)
|
||||
_, err = nc.kubeClient.Nodes().Create(node)
|
||||
if err != nil {
|
||||
glog.Errorf("Create node %s error: %v", node.Name, err)
|
||||
}
|
||||
}
|
||||
nodeMapLock.Lock()
|
||||
delete(nodeMap, node.Name)
|
||||
nodeMapLock.Unlock()
|
||||
}(&matches.Items[i])
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
wg.Add(len(nodeMap))
|
||||
// Delete nodes which have been deleted from cloud, but not from kubernetes cluster.
|
||||
for nodeID := range nodeMap {
|
||||
go func(nodeID string) {
|
||||
defer wg.Done()
|
||||
if nc.allocateNodeCIDRs {
|
||||
nc.unassignNodeCIDR(nodeID)
|
||||
}
|
||||
glog.Infof("Delete node from registry: %s", nodeID)
|
||||
err = nc.kubeClient.Nodes().Delete(nodeID)
|
||||
if err != nil {
|
||||
glog.Errorf("Delete node %s error: %v", nodeID, err)
|
||||
}
|
||||
nc.deletePods(nodeID)
|
||||
}(nodeID)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// populateAddresses queries Address for given list of nodes.
|
||||
func (nc *NodeController) populateAddresses(nodes *api.NodeList) (*api.NodeList, error) {
|
||||
if nc.isRunningCloudProvider() {
|
||||
instances, ok := nc.cloud.Instances()
|
||||
if !ok {
|
||||
return nodes, ErrCloudInstance
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
nodeAddresses, err := instances.NodeAddresses(node.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("error getting instance addresses for %s: %v", node.Name, err)
|
||||
} else {
|
||||
node.Status.Addresses = nodeAddresses
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for i := range nodes.Items {
|
||||
node := &nodes.Items[i]
|
||||
addr := net.ParseIP(node.Name)
|
||||
if addr != nil {
|
||||
address := api.NodeAddress{Type: api.NodeLegacyHostIP, Address: addr.String()}
|
||||
node.Status.Addresses = []api.NodeAddress{address}
|
||||
} else {
|
||||
addrs, err := nc.lookupIP(node.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("Can't get ip address of node %s: %v", node.Name, err)
|
||||
} else if len(addrs) == 0 {
|
||||
glog.Errorf("No ip address for node %v", node.Name)
|
||||
} else {
|
||||
address := api.NodeAddress{Type: api.NodeLegacyHostIP, Address: addrs[0].String()}
|
||||
node.Status.Addresses = []api.NodeAddress{address}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
||||
|
||||
func (nc *NodeController) recordNodeEvent(node *api.Node, event string) {
|
||||
ref := &api.ObjectReference{
|
||||
Kind: "Node",
|
||||
@ -567,6 +340,11 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if nc.allocateNodeCIDRs {
|
||||
// TODO (cjcullen): Use pkg/controller/framework to watch nodes and
|
||||
// reduce lists/decouple this from monitoring status.
|
||||
nc.reconcileNodeCIDRs(nodes)
|
||||
}
|
||||
for i := range nodes.Items {
|
||||
var gracePeriod time.Duration
|
||||
var lastReadyCondition api.NodeCondition
|
||||
@ -595,10 +373,12 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
if lastReadyCondition.Status == api.ConditionFalse &&
|
||||
nc.now().After(nc.nodeStatusMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) {
|
||||
// Node stays in not ready for at least 'podEvictionTimeout' - evict all pods on the unhealthy node.
|
||||
// Makes sure we are not removing pods from to many nodes in the same time.
|
||||
// Makes sure we are not removing pods from too many nodes in the same time.
|
||||
glog.Infof("Evicting pods: %v is later than %v + %v", nc.now(), nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout)
|
||||
if nc.deletingPodsRateLimiter.CanAccept() {
|
||||
nc.deletePods(node.Name)
|
||||
if err := nc.deletePods(node.Name); err != nil {
|
||||
glog.Errorf("Unable to delete pods from node %s: %v", node.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if lastReadyCondition.Status == api.ConditionUnknown &&
|
||||
@ -607,7 +387,9 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
// need to substract monitoring grace period in order to get the real 'podEvictionTimeout'.
|
||||
glog.Infof("Evicting pods2: %v is later than %v + %v", nc.now(), nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod)
|
||||
if nc.deletingPodsRateLimiter.CanAccept() {
|
||||
nc.deletePods(node.Name)
|
||||
if err := nc.deletePods(node.Name); err != nil {
|
||||
glog.Errorf("Unable to delete pods from node %s: %v", node.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -615,71 +397,30 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
if readyCondition.Status != api.ConditionTrue && lastReadyCondition.Status == api.ConditionTrue {
|
||||
nc.recordNodeEvent(node, "NodeNotReady")
|
||||
}
|
||||
|
||||
// Check with the cloud provider to see if the node still exists. If it
|
||||
// doesn't, delete the node and all pods scheduled on the node.
|
||||
if readyCondition.Status != api.ConditionTrue && nc.cloud != nil {
|
||||
instances, ok := nc.cloud.Instances()
|
||||
if !ok {
|
||||
glog.Errorf("%v", ErrCloudInstance)
|
||||
continue
|
||||
}
|
||||
if _, err := instances.ExternalID(node.Name); err != nil && err == cloudprovider.InstanceNotFound {
|
||||
if err := nc.kubeClient.Nodes().Delete(node.Name); err != nil {
|
||||
glog.Errorf("Unable to delete node %s: %v", node.Name, err)
|
||||
continue
|
||||
}
|
||||
if err := nc.deletePods(node.Name); err != nil {
|
||||
glog.Errorf("Unable to delete pods from node %s: %v", node.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getStaticNodesWithSpec constructs and returns api.NodeList for static nodes. If error
|
||||
// occurs, an empty NodeList will be returned with a non-nil error info. The method only
|
||||
// constructs spec fields for nodes.
|
||||
func (nc *NodeController) getStaticNodesWithSpec() (*api.NodeList, error) {
|
||||
result := &api.NodeList{}
|
||||
for _, nodeID := range nc.nodes {
|
||||
node := api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: nodeID},
|
||||
Spec: api.NodeSpec{
|
||||
ExternalID: nodeID,
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: nc.staticResources.Capacity,
|
||||
},
|
||||
}
|
||||
result.Items = append(result.Items, node)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// getCloudNodesWithSpec constructs and returns api.NodeList from cloudprovider. If error
|
||||
// occurs, an empty NodeList will be returned with a non-nil error info. The method only
|
||||
// constructs spec fields for nodes.
|
||||
func (nc *NodeController) getCloudNodesWithSpec() (*api.NodeList, error) {
|
||||
result := &api.NodeList{}
|
||||
instances, ok := nc.cloud.Instances()
|
||||
if !ok {
|
||||
return result, ErrCloudInstance
|
||||
}
|
||||
matches, err := instances.List(nc.matchRE)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
for i := range matches {
|
||||
node := api.Node{}
|
||||
node.Name = matches[i]
|
||||
resources, err := instances.GetNodeResources(matches[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resources == nil {
|
||||
resources = nc.staticResources
|
||||
}
|
||||
if resources != nil {
|
||||
node.Status.Capacity = resources.Capacity
|
||||
if node.Status.Capacity != nil {
|
||||
node.Status.Capacity[api.ResourcePods] = *resource.NewQuantity(0, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
instanceID, err := instances.ExternalID(node.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting instance id for %s: %v", node.Name, err)
|
||||
} else {
|
||||
node.Spec.ExternalID = instanceID
|
||||
}
|
||||
result.Items = append(result.Items, node)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// deletePods will delete all pods from master running on given node.
|
||||
func (nc *NodeController) deletePods(nodeID string) error {
|
||||
glog.V(2).Infof("Delete all pods from %v", nodeID)
|
||||
@ -702,19 +443,6 @@ func (nc *NodeController) deletePods(nodeID string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// isRunningCloudProvider checks if cluster is running with cloud provider.
|
||||
func (nc *NodeController) isRunningCloudProvider() bool {
|
||||
return nc.cloud != nil && len(nc.matchRE) > 0
|
||||
}
|
||||
|
||||
// canonicalizeName takes a node list and lowercases all nodes' name.
|
||||
func (nc *NodeController) canonicalizeName(nodes *api.NodeList) *api.NodeList {
|
||||
for i := range nodes.Items {
|
||||
nodes.Items[i].Name = strings.ToLower(nodes.Items[i].Name)
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
// getCondition returns a condition object for the specific condition
|
||||
// type, nil if the condition is not set.
|
||||
func (nc *NodeController) getCondition(status *api.NodeStatus, conditionType api.NodeConditionType) *api.NodeCondition {
|
||||
|
@ -19,7 +19,6 @@ package nodecontroller
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
@ -30,7 +29,6 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/testclient"
|
||||
fake_cloud "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/fake"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
@ -142,506 +140,6 @@ func (m *FakeNodeHandler) Watch(label labels.Selector, field fields.Selector, re
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func TestRegisterNodes(t *testing.T) {
|
||||
table := []struct {
|
||||
fakeNodeHandler *FakeNodeHandler
|
||||
machines []string
|
||||
retryCount int
|
||||
expectedRequestCount int
|
||||
expectedCreateCount int
|
||||
expectedFail bool
|
||||
}{
|
||||
{
|
||||
// Register two nodes normally.
|
||||
machines: []string{"node0", "node1"},
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
CreateHook: func(fake *FakeNodeHandler, node *api.Node) bool { return true },
|
||||
},
|
||||
retryCount: 1,
|
||||
expectedRequestCount: 2,
|
||||
expectedCreateCount: 2,
|
||||
expectedFail: false,
|
||||
},
|
||||
{
|
||||
// Canonicalize node names.
|
||||
machines: []string{"NODE0", "node1"},
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
CreateHook: func(fake *FakeNodeHandler, node *api.Node) bool {
|
||||
if node.Name == "NODE0" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
},
|
||||
},
|
||||
retryCount: 1,
|
||||
expectedRequestCount: 2,
|
||||
expectedCreateCount: 2,
|
||||
expectedFail: false,
|
||||
},
|
||||
{
|
||||
// No machine to register.
|
||||
machines: []string{},
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
CreateHook: func(fake *FakeNodeHandler, node *api.Node) bool { return true },
|
||||
},
|
||||
retryCount: 1,
|
||||
expectedRequestCount: 0,
|
||||
expectedCreateCount: 0,
|
||||
expectedFail: false,
|
||||
},
|
||||
{
|
||||
// Fail the first two requests.
|
||||
machines: []string{"node0", "node1"},
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
CreateHook: func(fake *FakeNodeHandler, node *api.Node) bool {
|
||||
if fake.RequestCount == 0 || fake.RequestCount == 1 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
},
|
||||
},
|
||||
retryCount: 10,
|
||||
expectedRequestCount: 4,
|
||||
expectedCreateCount: 2,
|
||||
expectedFail: false,
|
||||
},
|
||||
{
|
||||
// One node already exists
|
||||
machines: []string{"node0", "node1"},
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: "node1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
retryCount: 10,
|
||||
expectedRequestCount: 2,
|
||||
expectedCreateCount: 1,
|
||||
expectedFail: false,
|
||||
},
|
||||
{
|
||||
// The first node always fails.
|
||||
machines: []string{"node0", "node1"},
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
CreateHook: func(fake *FakeNodeHandler, node *api.Node) bool {
|
||||
if node.Name == "node0" {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
},
|
||||
},
|
||||
retryCount: 2,
|
||||
expectedRequestCount: 3, // 2 for node0, 1 for node1
|
||||
expectedCreateCount: 1,
|
||||
expectedFail: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
nodes := api.NodeList{}
|
||||
for _, machine := range item.machines {
|
||||
nodes.Items = append(nodes.Items, *newNode(machine))
|
||||
}
|
||||
nodeController := NewNodeController(nil, "", item.machines, &api.NodeResources{}, item.fakeNodeHandler, 10, time.Minute,
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
err := nodeController.registerNodes(&nodes, item.retryCount, time.Millisecond)
|
||||
if !item.expectedFail && err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if item.expectedFail && err == nil {
|
||||
t.Errorf("unexpected non-error")
|
||||
}
|
||||
if item.fakeNodeHandler.RequestCount != item.expectedRequestCount {
|
||||
t.Errorf("expected %v calls, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount)
|
||||
}
|
||||
if len(item.fakeNodeHandler.CreatedNodes) != item.expectedCreateCount {
|
||||
t.Errorf("expected %v nodes, but got %v.", item.expectedCreateCount, item.fakeNodeHandler.CreatedNodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateGetStaticNodesWithSpec(t *testing.T) {
|
||||
table := []struct {
|
||||
machines []string
|
||||
expectedNodes *api.NodeList
|
||||
}{
|
||||
{
|
||||
machines: []string{},
|
||||
expectedNodes: &api.NodeList{},
|
||||
},
|
||||
{
|
||||
machines: []string{"node0"},
|
||||
expectedNodes: &api.NodeList{
|
||||
Items: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node0"},
|
||||
Spec: api.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
machines: []string{"node0", "node1"},
|
||||
expectedNodes: &api.NodeList{
|
||||
Items: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node0"},
|
||||
Spec: api.NodeSpec{
|
||||
ExternalID: "node0",
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node1"},
|
||||
Spec: api.NodeSpec{
|
||||
ExternalID: "node1",
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resources := api.NodeResources{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
}
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, "", item.machines, &resources, nil, 10, time.Minute,
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
nodes, err := nodeController.getStaticNodesWithSpec()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(item.expectedNodes, nodes) {
|
||||
t.Errorf("expected node list %+v, got %+v", item.expectedNodes, nodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCreateGetCloudNodesWithSpec(t *testing.T) {
|
||||
resourceList := api.ResourceList{
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(1000, resource.DecimalSI),
|
||||
api.ResourceMemory: *resource.NewQuantity(3000, resource.DecimalSI),
|
||||
}
|
||||
|
||||
table := []struct {
|
||||
fakeCloud *fake_cloud.FakeCloud
|
||||
machines []string
|
||||
expectedNodes *api.NodeList
|
||||
}{
|
||||
{
|
||||
fakeCloud: &fake_cloud.FakeCloud{},
|
||||
expectedNodes: &api.NodeList{},
|
||||
},
|
||||
{
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0"},
|
||||
NodeResources: &api.NodeResources{Capacity: resourceList},
|
||||
},
|
||||
expectedNodes: &api.NodeList{
|
||||
Items: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node0"},
|
||||
Status: api.NodeStatus{Capacity: resourceList},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0", "node1"},
|
||||
NodeResources: &api.NodeResources{Capacity: resourceList},
|
||||
},
|
||||
expectedNodes: &api.NodeList{
|
||||
Items: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node0"},
|
||||
Status: api.NodeStatus{Capacity: resourceList},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node1"},
|
||||
Status: api.NodeStatus{Capacity: resourceList},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(item.fakeCloud, ".*", nil, &api.NodeResources{}, nil, 10, time.Minute,
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
nodes, err := nodeController.getCloudNodesWithSpec()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(item.expectedNodes, nodes) {
|
||||
t.Errorf("expected node list %+v, got %+v", item.expectedNodes, nodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncCloudNodes(t *testing.T) {
|
||||
table := []struct {
|
||||
fakeNodeHandler *FakeNodeHandler
|
||||
fakeCloud *fake_cloud.FakeCloud
|
||||
matchRE string
|
||||
expectedRequestCount int
|
||||
expectedNameCreated []string
|
||||
expectedExtIDCreated []string
|
||||
expectedAddrsCreated []string
|
||||
expectedDeleted []string
|
||||
}{
|
||||
{
|
||||
// 1 existing node, 1 cloud nodes: do nothing.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0")},
|
||||
},
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0"},
|
||||
ExtID: map[string]string{
|
||||
"node0": "ext-node0",
|
||||
"node1": "ext-node1",
|
||||
},
|
||||
Addresses: []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "1.2.3.4"}},
|
||||
},
|
||||
matchRE: ".*",
|
||||
expectedRequestCount: 1, // List
|
||||
expectedNameCreated: []string{},
|
||||
expectedExtIDCreated: []string{},
|
||||
expectedAddrsCreated: []string{},
|
||||
expectedDeleted: []string{},
|
||||
},
|
||||
{
|
||||
// 1 existing node, 2 cloud nodes: create 1.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0")},
|
||||
},
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0", "node1"},
|
||||
ExtID: map[string]string{
|
||||
"node0": "ext-node0",
|
||||
"node1": "ext-node1",
|
||||
},
|
||||
Addresses: []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "1.2.3.4"}},
|
||||
},
|
||||
matchRE: ".*",
|
||||
expectedRequestCount: 2, // List + Create
|
||||
expectedNameCreated: []string{"node1"},
|
||||
expectedExtIDCreated: []string{"ext-node1"},
|
||||
expectedAddrsCreated: []string{"1.2.3.4"},
|
||||
expectedDeleted: []string{},
|
||||
},
|
||||
{
|
||||
// 2 existing nodes, 1 cloud node: delete 1.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0"), newNode("node1")},
|
||||
},
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0"},
|
||||
ExtID: map[string]string{
|
||||
"node0": "ext-node0",
|
||||
"node1": "ext-node1",
|
||||
},
|
||||
Addresses: []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "1.2.3.4"}},
|
||||
},
|
||||
matchRE: ".*",
|
||||
expectedRequestCount: 2, // List + Delete
|
||||
expectedNameCreated: []string{},
|
||||
expectedExtIDCreated: []string{},
|
||||
expectedAddrsCreated: []string{},
|
||||
expectedDeleted: []string{"node1"},
|
||||
},
|
||||
{
|
||||
// 1 existing node, 3 cloud nodes but only 2 match regex: delete 1.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0")},
|
||||
},
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0", "node1", "fake"},
|
||||
ExtID: map[string]string{
|
||||
"node0": "ext-node0",
|
||||
"node1": "ext-node1",
|
||||
"fake": "ext-fake",
|
||||
},
|
||||
Addresses: []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "1.2.3.4"}},
|
||||
},
|
||||
matchRE: "node[0-9]+",
|
||||
expectedRequestCount: 2, // List + Create
|
||||
expectedNameCreated: []string{"node1"},
|
||||
expectedExtIDCreated: []string{"ext-node1"},
|
||||
expectedAddrsCreated: []string{"1.2.3.4"},
|
||||
expectedDeleted: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
if item.fakeNodeHandler.Fake == nil {
|
||||
item.fakeNodeHandler.Fake = testclient.NewSimpleFake()
|
||||
}
|
||||
nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, 10, time.Minute,
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
if err := nodeController.syncCloudNodes(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if item.fakeNodeHandler.RequestCount != item.expectedRequestCount {
|
||||
t.Errorf("expected %v call, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount)
|
||||
}
|
||||
nodes := sortedNodeNames(item.fakeNodeHandler.CreatedNodes)
|
||||
if !reflect.DeepEqual(item.expectedNameCreated, nodes) {
|
||||
t.Errorf("expected node list %+v, got %+v", item.expectedNameCreated, nodes)
|
||||
}
|
||||
nodeExtIDs := sortedNodeExternalIDs(item.fakeNodeHandler.CreatedNodes)
|
||||
if !reflect.DeepEqual(item.expectedExtIDCreated, nodeExtIDs) {
|
||||
t.Errorf("expected node external id list %+v, got %+v", item.expectedExtIDCreated, nodeExtIDs)
|
||||
}
|
||||
nodeAddrs := sortedNodeAddresses(item.fakeNodeHandler.CreatedNodes)
|
||||
if !reflect.DeepEqual(item.expectedAddrsCreated, nodeAddrs) {
|
||||
t.Errorf("expected node address list %+v, got %+v", item.expectedAddrsCreated, nodeAddrs)
|
||||
}
|
||||
nodes = sortedNodeNames(item.fakeNodeHandler.DeletedNodes)
|
||||
if !reflect.DeepEqual(item.expectedDeleted, nodes) {
|
||||
t.Errorf("expected node list %+v, got %+v", item.expectedDeleted, nodes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncCloudNodesEvictPods(t *testing.T) {
|
||||
table := []struct {
|
||||
fakeNodeHandler *FakeNodeHandler
|
||||
fakeCloud *fake_cloud.FakeCloud
|
||||
matchRE string
|
||||
expectedRequestCount int
|
||||
expectedDeleted []string
|
||||
expectedActions []testclient.FakeAction
|
||||
}{
|
||||
{
|
||||
// No node to delete: do nothing.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0"), newNode("node1")},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0"), *newPod("pod1", "node1")}}),
|
||||
},
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0", "node1"},
|
||||
},
|
||||
matchRE: ".*",
|
||||
expectedRequestCount: 1, // List
|
||||
expectedDeleted: []string{},
|
||||
expectedActions: nil,
|
||||
},
|
||||
{
|
||||
// Delete node1, and pod0 is running on it.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0"), newNode("node1")},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node1")}}),
|
||||
},
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0"},
|
||||
},
|
||||
matchRE: ".*",
|
||||
expectedRequestCount: 2, // List + Delete
|
||||
expectedDeleted: []string{"node1"},
|
||||
expectedActions: []testclient.FakeAction{{Action: "list-pods"}, {Action: "delete-pod", Value: "pod0"}},
|
||||
},
|
||||
{
|
||||
// Delete node1, but pod0 is running on node0.
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{newNode("node0"), newNode("node1")},
|
||||
Fake: testclient.NewSimpleFake(&api.PodList{Items: []api.Pod{*newPod("pod0", "node0")}}),
|
||||
},
|
||||
fakeCloud: &fake_cloud.FakeCloud{
|
||||
Machines: []string{"node0"},
|
||||
},
|
||||
matchRE: ".*",
|
||||
expectedRequestCount: 2, // List + Delete
|
||||
expectedDeleted: []string{"node1"},
|
||||
expectedActions: []testclient.FakeAction{{Action: "list-pods"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
if item.fakeNodeHandler.Fake == nil {
|
||||
item.fakeNodeHandler.Fake = testclient.NewSimpleFake()
|
||||
}
|
||||
nodeController := NewNodeController(item.fakeCloud, item.matchRE, nil, &api.NodeResources{}, item.fakeNodeHandler, 10, time.Minute,
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
if err := nodeController.syncCloudNodes(); err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if item.fakeNodeHandler.RequestCount != item.expectedRequestCount {
|
||||
t.Errorf("expected %v call, but got %v.", item.expectedRequestCount, item.fakeNodeHandler.RequestCount)
|
||||
}
|
||||
nodes := sortedNodeNames(item.fakeNodeHandler.DeletedNodes)
|
||||
if !reflect.DeepEqual(item.expectedDeleted, nodes) {
|
||||
t.Errorf("expected node list %+v, got %+v", item.expectedDeleted, nodes)
|
||||
}
|
||||
if !reflect.DeepEqual(item.expectedActions, item.fakeNodeHandler.Actions) {
|
||||
t.Errorf("time out waiting for deleting pods, expected %+v, got %+v", item.expectedActions, item.fakeNodeHandler.Actions)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPopulateNodeAddresses(t *testing.T) {
|
||||
table := []struct {
|
||||
nodes *api.NodeList
|
||||
fakeCloud *fake_cloud.FakeCloud
|
||||
expectedFail bool
|
||||
expectedAddresses []api.NodeAddress
|
||||
}{
|
||||
{
|
||||
nodes: &api.NodeList{Items: []api.Node{*newNode("node0"), *newNode("node1")}},
|
||||
fakeCloud: &fake_cloud.FakeCloud{Addresses: []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "1.2.3.4"}}},
|
||||
expectedAddresses: []api.NodeAddress{
|
||||
{Type: api.NodeLegacyHostIP, Address: "1.2.3.4"},
|
||||
},
|
||||
},
|
||||
{
|
||||
nodes: &api.NodeList{Items: []api.Node{*newNode("node0"), *newNode("node1")}},
|
||||
fakeCloud: &fake_cloud.FakeCloud{Err: ErrQueryIPAddress},
|
||||
expectedAddresses: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(item.fakeCloud, ".*", nil, nil, nil, 10, time.Minute,
|
||||
util.NewFakeRateLimiter(), testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
result, err := nodeController.populateAddresses(item.nodes)
|
||||
// In case of IP querying error, we should continue.
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
for _, node := range result.Items {
|
||||
if !reflect.DeepEqual(item.expectedAddresses, node.Status.Addresses) {
|
||||
t.Errorf("expect HostIP %s, got %s", item.expectedAddresses, node.Status.Addresses)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
fakeNow := util.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC)
|
||||
evictionTimeout := 10 * time.Minute
|
||||
@ -826,7 +324,7 @@ func TestMonitorNodeStatusEvictPods(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, 10,
|
||||
nodeController := NewNodeController(nil, "", nil, item.fakeNodeHandler, 10,
|
||||
evictionTimeout, util.NewFakeRateLimiter(), testNodeMonitorGracePeriod,
|
||||
testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
nodeController.now = func() util.Time { return fakeNow }
|
||||
@ -1029,7 +527,7 @@ func TestMonitorNodeStatusUpdateStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, item := range table {
|
||||
nodeController := NewNodeController(nil, "", []string{"node0"}, nil, item.fakeNodeHandler, 10, 5*time.Minute, util.NewFakeRateLimiter(),
|
||||
nodeController := NewNodeController(nil, "", nil, item.fakeNodeHandler, 10, 5*time.Minute, util.NewFakeRateLimiter(),
|
||||
testNodeMonitorGracePeriod, testNodeStartupGracePeriod, testNodeMonitorPeriod, nil, false)
|
||||
nodeController.now = func() util.Time { return fakeNow }
|
||||
if err := nodeController.monitorNodeStatus(); err != nil {
|
||||
|
@ -390,14 +390,6 @@ func (i *Instances) GetNodeResources(name string) (*api.NodeResources, error) {
|
||||
return rsrc, nil
|
||||
}
|
||||
|
||||
func (i *Instances) Configure(name string, spec *api.NodeSpec) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Instances) Release(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (os *OpenStack) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return nil, false
|
||||
}
|
||||
@ -688,3 +680,7 @@ func (os *OpenStack) GetZone() (cloudprovider.Zone, error) {
|
||||
|
||||
return cloudprovider.Zone{Region: os.region}, nil
|
||||
}
|
||||
|
||||
func (os *OpenStack) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
@ -130,6 +130,11 @@ func (v *OVirtCloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Routes returns an implementation of Routes for oVirt cloud
|
||||
func (v *OVirtCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// NodeAddresses returns the NodeAddresses of a particular machine instance
|
||||
func (v *OVirtCloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
|
||||
instance, err := v.fetchInstance(name)
|
||||
@ -250,11 +255,3 @@ func (v *OVirtCloud) List(filter string) ([]string, error) {
|
||||
func (v *OVirtCloud) GetNodeResources(name string) (*api.NodeResources, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (v *OVirtCloud) Configure(name string, spec *api.NodeSpec) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *OVirtCloud) Release(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
@ -395,14 +395,6 @@ func (i *Instances) GetNodeResources(name string) (*api.NodeResources, error) {
|
||||
return rsrc, nil
|
||||
}
|
||||
|
||||
func (i *Instances) Configure(name string, spec *api.NodeSpec) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *Instances) Release(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (os *Rackspace) Clusters() (cloudprovider.Clusters, bool) {
|
||||
return nil, false
|
||||
}
|
||||
@ -416,6 +408,11 @@ func (os *Rackspace) Zones() (cloudprovider.Zones, bool) {
|
||||
|
||||
return os, true
|
||||
}
|
||||
|
||||
func (os *Rackspace) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (os *Rackspace) GetZone() (cloudprovider.Zone, error) {
|
||||
glog.V(1).Infof("Current zone is %v", os.region)
|
||||
|
||||
|
19
pkg/cloudprovider/routecontroller/doc.go
Normal file
19
pkg/cloudprovider/routecontroller/doc.go
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package routecontroller contains code for syncing cloud routing rules with
|
||||
// the list of registered nodes.
|
||||
package routecontroller
|
147
pkg/cloudprovider/routecontroller/routecontroller.go
Normal file
147
pkg/cloudprovider/routecontroller/routecontroller.go
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package routecontroller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/fields"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type RouteController struct {
|
||||
routes cloudprovider.Routes
|
||||
kubeClient client.Interface
|
||||
clusterName string
|
||||
clusterCIDR *net.IPNet
|
||||
}
|
||||
|
||||
const k8sNodeRouteTag = "k8s-node-route"
|
||||
|
||||
func New(routes cloudprovider.Routes, kubeClient client.Interface, clusterName string, clusterCIDR *net.IPNet) *RouteController {
|
||||
return &RouteController{
|
||||
routes: routes,
|
||||
kubeClient: kubeClient,
|
||||
clusterName: clusterName,
|
||||
clusterCIDR: clusterCIDR,
|
||||
}
|
||||
}
|
||||
|
||||
func (rc *RouteController) Run(syncPeriod time.Duration) {
|
||||
go util.Forever(func() {
|
||||
if err := rc.reconcileNodeRoutes(); err != nil {
|
||||
glog.Errorf("Couldn't reconcile node routes: %v", err)
|
||||
}
|
||||
}, syncPeriod)
|
||||
}
|
||||
|
||||
func (rc *RouteController) reconcileNodeRoutes() error {
|
||||
routeList, err := rc.routes.ListRoutes(rc.truncatedClusterName() + "-.*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing routes: %v", err)
|
||||
}
|
||||
// TODO (cjcullen): use pkg/controller/framework.NewInformer to watch this
|
||||
// and reduce the number of lists needed.
|
||||
nodeList, err := rc.kubeClient.Nodes().List(labels.Everything(), fields.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("error listing nodes: %v", err)
|
||||
}
|
||||
return rc.reconcile(nodeList.Items, routeList)
|
||||
}
|
||||
|
||||
func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.Route) error {
|
||||
// nodeCIDRs maps nodeName->nodeCIDR
|
||||
nodeCIDRs := make(map[string]string)
|
||||
// routeMap maps routeTargetInstance->route
|
||||
routeMap := make(map[string]*cloudprovider.Route)
|
||||
for _, route := range routes {
|
||||
routeMap[route.TargetInstance] = route
|
||||
}
|
||||
for _, node := range nodes {
|
||||
// Check if we have a route for this node w/ the correct CIDR.
|
||||
r := routeMap[node.Name]
|
||||
if r == nil || r.DestinationCIDR != node.Spec.PodCIDR {
|
||||
// If not, create the route.
|
||||
route := &cloudprovider.Route{
|
||||
Name: rc.truncatedClusterName() + "-" + string(node.UID),
|
||||
TargetInstance: node.Name,
|
||||
DestinationCIDR: node.Spec.PodCIDR,
|
||||
Description: k8sNodeRouteTag,
|
||||
}
|
||||
go func(route *cloudprovider.Route) {
|
||||
if err := rc.routes.CreateRoute(route); err != nil {
|
||||
glog.Errorf("Could not create route %s: %v", route.Name, err)
|
||||
}
|
||||
}(route)
|
||||
}
|
||||
nodeCIDRs[node.Name] = node.Spec.PodCIDR
|
||||
}
|
||||
for _, route := range routes {
|
||||
if rc.isResponsibleForRoute(route) {
|
||||
// Check if this route applies to a node we know about & has correct CIDR.
|
||||
if nodeCIDRs[route.TargetInstance] != route.DestinationCIDR {
|
||||
// Delete the route.
|
||||
go func(routeName string) {
|
||||
if err := rc.routes.DeleteRoute(routeName); err != nil {
|
||||
glog.Errorf("Could not delete route %s: %v", routeName, err)
|
||||
}
|
||||
}(route.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rc *RouteController) truncatedClusterName() string {
|
||||
if len(rc.clusterName) > 26 {
|
||||
return rc.clusterName[:26]
|
||||
}
|
||||
return rc.clusterName
|
||||
}
|
||||
|
||||
func (rc *RouteController) isResponsibleForRoute(route *cloudprovider.Route) bool {
|
||||
_, cidr, err := net.ParseCIDR(route.DestinationCIDR)
|
||||
if err != nil {
|
||||
glog.Errorf("Ignoring route %s, unparsable CIDR: %v", route.Name, err)
|
||||
return false
|
||||
}
|
||||
// Not responsible if this route's CIDR is not within our clusterCIDR
|
||||
lastIP := make([]byte, len(cidr.IP))
|
||||
for i := range lastIP {
|
||||
lastIP[i] = cidr.IP[i] | ^cidr.Mask[i]
|
||||
}
|
||||
if !rc.clusterCIDR.Contains(cidr.IP) || !rc.clusterCIDR.Contains(lastIP) {
|
||||
return false
|
||||
}
|
||||
// Not responsible if route name doesn't start with <clusterName>
|
||||
if !strings.HasPrefix(route.Name, rc.clusterName) {
|
||||
return false
|
||||
}
|
||||
// Not responsible if route description != "k8s-node-route"
|
||||
if route.Description != k8sNodeRouteTag {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
213
pkg/cloudprovider/routecontroller/routecontroller_test.go
Normal file
213
pkg/cloudprovider/routecontroller/routecontroller_test.go
Normal file
@ -0,0 +1,213 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package routecontroller
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/fake"
|
||||
)
|
||||
|
||||
func TestIsResponsibleForRoute(t *testing.T) {
|
||||
myClusterName := "my-awesome-cluster"
|
||||
myClusterRoute := "my-awesome-cluster-12345678-90ab-cdef-1234-567890abcdef"
|
||||
testCases := []struct {
|
||||
clusterCIDR string
|
||||
routeName string
|
||||
routeCIDR string
|
||||
routeDescription string
|
||||
expectedResponsible bool
|
||||
}{
|
||||
// Routes that belong to this cluster
|
||||
{"10.244.0.0/16", myClusterRoute, "10.244.0.0/24", "k8s-node-route", true},
|
||||
{"10.244.0.0/16", myClusterRoute, "10.244.10.0/24", "k8s-node-route", true},
|
||||
{"10.244.0.0/16", myClusterRoute, "10.244.255.0/24", "k8s-node-route", true},
|
||||
{"10.244.0.0/14", myClusterRoute, "10.244.0.0/24", "k8s-node-route", true},
|
||||
{"10.244.0.0/14", myClusterRoute, "10.247.255.0/24", "k8s-node-route", true},
|
||||
// Routes inside our cidr, but not named how we would have named them
|
||||
{"10.244.0.0/16", "background-cluster-route", "10.244.0.0/16", "k8s-node-route", false},
|
||||
{"10.244.0.0/16", "special-single-route", "10.244.12.34/32", "k8s-node-route", false},
|
||||
// Routes inside our cidr, but not tagged how we would have tagged them in the description
|
||||
{"10.244.0.0/16", "my-awesome-cluster-background", "10.244.0.0/16", "", false},
|
||||
{"10.244.0.0/16", "my-awesome-cluster-single-route", "10.244.12.34/32", "this is a route", false},
|
||||
// Routes that match our naming/tagging scheme, but are outside our cidr
|
||||
{"10.244.0.0/16", myClusterRoute, "10.224.0.0/24", "k8s-node-route", false},
|
||||
{"10.244.0.0/16", myClusterRoute, "10.0.10.0/24", "k8s-node-route", false},
|
||||
{"10.244.0.0/16", myClusterRoute, "10.255.255.0/24", "k8s-node-route", false},
|
||||
{"10.244.0.0/14", myClusterRoute, "10.248.0.0/24", "k8s-node-route", false},
|
||||
{"10.244.0.0/14", myClusterRoute, "10.243.255.0/24", "k8s-node-route", false},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
_, cidr, err := net.ParseCIDR(testCase.clusterCIDR)
|
||||
if err != nil {
|
||||
t.Errorf("%d. Error in test case: unparsable cidr %q", i, testCase.clusterCIDR)
|
||||
}
|
||||
rc := New(nil, nil, myClusterName, cidr)
|
||||
route := &cloudprovider.Route{
|
||||
Name: testCase.routeName,
|
||||
TargetInstance: "doesnt-matter-for-this-test",
|
||||
DestinationCIDR: testCase.routeCIDR,
|
||||
Description: testCase.routeDescription,
|
||||
}
|
||||
if resp := rc.isResponsibleForRoute(route); resp != testCase.expectedResponsible {
|
||||
t.Errorf("%d. isResponsibleForRoute() = %t; want %t", i, resp, testCase.expectedResponsible)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconcile(t *testing.T) {
|
||||
cluster := "my-k8s"
|
||||
testCases := []struct {
|
||||
nodes []api.Node
|
||||
initialRoutes []*cloudprovider.Route
|
||||
expectedRoutes []*cloudprovider.Route
|
||||
}{
|
||||
// 2 nodes, routes already there
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}},
|
||||
},
|
||||
initialRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24", "k8s-node-route"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24", "k8s-node-route"},
|
||||
},
|
||||
expectedRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24", "k8s-node-route"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24", "k8s-node-route"},
|
||||
},
|
||||
},
|
||||
// 2 nodes, one route already there
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}},
|
||||
},
|
||||
initialRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24", "k8s-node-route"},
|
||||
},
|
||||
expectedRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24", "k8s-node-route"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24", "k8s-node-route"},
|
||||
},
|
||||
},
|
||||
// 2 nodes, no routes yet
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}},
|
||||
},
|
||||
initialRoutes: []*cloudprovider.Route{},
|
||||
expectedRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24", "k8s-node-route"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24", "k8s-node-route"},
|
||||
},
|
||||
},
|
||||
// 2 nodes, a few too many routes
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}},
|
||||
},
|
||||
initialRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24", "k8s-node-route"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24", "k8s-node-route"},
|
||||
{cluster + "-03", "node-3", "10.120.2.0/24", "k8s-node-route"},
|
||||
{cluster + "-04", "node-4", "10.120.3.0/24", "k8s-node-route"},
|
||||
},
|
||||
expectedRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24", "k8s-node-route"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24", "k8s-node-route"},
|
||||
},
|
||||
},
|
||||
// 2 nodes, 2 routes, but only 1 is right
|
||||
{
|
||||
nodes: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}},
|
||||
},
|
||||
initialRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24", "k8s-node-route"},
|
||||
{cluster + "-03", "node-3", "10.120.2.0/24", "k8s-node-route"},
|
||||
},
|
||||
expectedRoutes: []*cloudprovider.Route{
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24", "k8s-node-route"},
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24", "k8s-node-route"},
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
cloud := &fake_cloud.FakeCloud{RouteMap: make(map[string]*cloudprovider.Route)}
|
||||
for _, route := range testCase.initialRoutes {
|
||||
cloud.RouteMap[route.Name] = route
|
||||
}
|
||||
routes, ok := cloud.Routes()
|
||||
if !ok {
|
||||
t.Error("Error in test: fake_cloud doesn't support Routes()")
|
||||
}
|
||||
_, cidr, _ := net.ParseCIDR("10.120.0.0/16")
|
||||
rc := New(routes, nil, cluster, cidr)
|
||||
if err := rc.reconcile(testCase.nodes, testCase.initialRoutes); err != nil {
|
||||
t.Errorf("%d. Error from rc.reconcile(): %v", i, err)
|
||||
}
|
||||
var finalRoutes []*cloudprovider.Route
|
||||
var err error
|
||||
timeoutChan := time.After(50 * time.Millisecond)
|
||||
tick := time.NewTicker(10 * time.Millisecond)
|
||||
defer tick.Stop()
|
||||
poll:
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
if finalRoutes, err = routes.ListRoutes(""); err == nil && routeListEqual(finalRoutes, testCase.expectedRoutes) {
|
||||
break poll
|
||||
}
|
||||
case <-timeoutChan:
|
||||
t.Errorf("%d. rc.reconcile() = %v, routes:\n%v\nexpected: nil, routes:\n%v\n", i, err, flatten(finalRoutes), flatten(testCase.expectedRoutes))
|
||||
break poll
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func routeListEqual(list1, list2 []*cloudprovider.Route) bool {
|
||||
if len(list1) != len(list2) {
|
||||
return false
|
||||
}
|
||||
routeMap1 := make(map[string]*cloudprovider.Route)
|
||||
for _, route1 := range list1 {
|
||||
routeMap1[route1.Name] = route1
|
||||
}
|
||||
for _, route2 := range list2 {
|
||||
if route1, exists := routeMap1[route2.Name]; !exists || *route1 != *route2 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func flatten(list []*cloudprovider.Route) []cloudprovider.Route {
|
||||
var structList []cloudprovider.Route
|
||||
for _, route := range list {
|
||||
structList = append(structList, *route)
|
||||
}
|
||||
return structList
|
||||
}
|
@ -99,6 +99,11 @@ func (v *VagrantCloud) Zones() (cloudprovider.Zones, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Routes returns an implementation of Routes for Vagrant cloud.
|
||||
func (v *VagrantCloud) Routes() (cloudprovider.Routes, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getInstanceByAddress retuns
|
||||
func (v *VagrantCloud) getInstanceByAddress(address string) (*SaltMinion, error) {
|
||||
token, err := v.saltLogin()
|
||||
@ -239,11 +244,3 @@ func (v *VagrantCloud) List(filter string) ([]string, error) {
|
||||
func (v *VagrantCloud) GetNodeResources(name string) (*api.NodeResources, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (v *VagrantCloud) Configure(name string, spec *api.NodeSpec) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *VagrantCloud) Release(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
apierrors "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
|
||||
@ -66,11 +67,6 @@ const (
|
||||
// Max amount of time to wait for the container runtime to come up.
|
||||
maxWaitForContainerRuntime = 5 * time.Minute
|
||||
|
||||
// Initial node status update frequency and incremental frequency, for faster cluster startup.
|
||||
// The update frequency will be increameted linearly, until it reaches status_update_frequency.
|
||||
initialNodeStatusUpdateFrequency = 100 * time.Millisecond
|
||||
nodeStatusUpdateFrequencyInc = 500 * time.Millisecond
|
||||
|
||||
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
|
||||
nodeStatusUpdateRetry = 5
|
||||
|
||||
@ -122,6 +118,7 @@ func NewMainKubelet(
|
||||
pullBurst int,
|
||||
containerGCPolicy ContainerGCPolicy,
|
||||
sourcesReady SourcesReadyFn,
|
||||
registerNode bool,
|
||||
clusterDomain string,
|
||||
clusterDNS net.IP,
|
||||
masterServiceNamespace string,
|
||||
@ -224,6 +221,7 @@ func NewMainKubelet(
|
||||
readinessManager: readinessManager,
|
||||
httpClient: &http.Client{},
|
||||
sourcesReady: sourcesReady,
|
||||
registerNode: registerNode,
|
||||
clusterDomain: clusterDomain,
|
||||
clusterDNS: clusterDNS,
|
||||
serviceLister: serviceLister,
|
||||
@ -373,6 +371,9 @@ type Kubelet struct {
|
||||
// cAdvisor used for container information.
|
||||
cadvisor cadvisor.Interface
|
||||
|
||||
// Set to true to have the node register itself with the apiserver.
|
||||
registerNode bool
|
||||
|
||||
// If non-empty, use this for container DNS search.
|
||||
clusterDomain string
|
||||
|
||||
@ -657,26 +658,22 @@ func (kl *Kubelet) Run(updates <-chan PodUpdate) {
|
||||
glog.Infof("Running in container %q", kl.resourceContainer)
|
||||
}
|
||||
|
||||
err := kl.imageManager.Start()
|
||||
if err != nil {
|
||||
if err := kl.imageManager.Start(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, "kubeletSetupFailed", "Failed to start ImageManager %v", err)
|
||||
glog.Errorf("Failed to start ImageManager, images may not be garbage collected: %v", err)
|
||||
}
|
||||
|
||||
err = kl.cadvisor.Start()
|
||||
if err != nil {
|
||||
if err := kl.cadvisor.Start(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, "kubeletSetupFailed", "Failed to start CAdvisor %v", err)
|
||||
glog.Errorf("Failed to start CAdvisor, system may not be properly monitored: %v", err)
|
||||
}
|
||||
|
||||
err = kl.containerManager.Start()
|
||||
if err != nil {
|
||||
if err := kl.containerManager.Start(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, "kubeletSetupFailed", "Failed to start ContainerManager %v", err)
|
||||
glog.Errorf("Failed to start ContainerManager, system may not be properly isolated: %v", err)
|
||||
}
|
||||
|
||||
err = kl.oomWatcher.Start(kl.nodeRef)
|
||||
if err != nil {
|
||||
if err := kl.oomWatcher.Start(kl.nodeRef); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, "kubeletSetupFailed", "Failed to start OOM watcher %v", err)
|
||||
glog.Errorf("Failed to start OOM watching: %v", err)
|
||||
}
|
||||
@ -688,20 +685,83 @@ func (kl *Kubelet) Run(updates <-chan PodUpdate) {
|
||||
kl.syncLoop(updates, kl)
|
||||
}
|
||||
|
||||
func (kl *Kubelet) initialNodeStatus() (*api.Node, error) {
|
||||
node := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: kl.hostname,
|
||||
Labels: map[string]string{"kubernetes.io/hostname": kl.hostname},
|
||||
},
|
||||
}
|
||||
if kl.cloud != nil {
|
||||
instances, ok := kl.cloud.Instances()
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to get instances from cloud provider")
|
||||
}
|
||||
// TODO(roberthbailey): Can we do this without having credentials to talk
|
||||
// to the cloud provider?
|
||||
instanceID, err := instances.ExternalID(kl.hostname)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get instance ID from cloud provider: %v", err)
|
||||
}
|
||||
node.Spec.ExternalID = instanceID
|
||||
} else {
|
||||
node.Spec.ExternalID = kl.hostname
|
||||
}
|
||||
if err := kl.setNodeStatus(node); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return node, nil
|
||||
}
|
||||
|
||||
// registerWithApiserver registers the node with the cluster master.
|
||||
func (kl *Kubelet) registerWithApiserver() {
|
||||
step := 100 * time.Millisecond
|
||||
for {
|
||||
time.Sleep(step)
|
||||
step = step * 2
|
||||
if step >= 7*time.Second {
|
||||
step = 7 * time.Second
|
||||
}
|
||||
|
||||
node, err := kl.initialNodeStatus()
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to construct api.Node object for kubelet: %v", err)
|
||||
continue
|
||||
}
|
||||
glog.V(2).Infof("Attempting to register node %s", node.Name)
|
||||
if _, err := kl.kubeClient.Nodes().Create(node); err != nil {
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
currentNode, err := kl.kubeClient.Nodes().Get(kl.hostname)
|
||||
if err != nil {
|
||||
glog.Errorf("error getting node %q: %v", kl.hostname, err)
|
||||
continue
|
||||
}
|
||||
if currentNode == nil {
|
||||
glog.Errorf("no node instance returned for %q", kl.hostname)
|
||||
continue
|
||||
}
|
||||
if currentNode.Spec.ExternalID == node.Spec.ExternalID {
|
||||
glog.Infof("Node %s was previously registered", node.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(2).Infof("Unable to register %s with the apiserver: %v", node.Name, err)
|
||||
continue
|
||||
}
|
||||
glog.Infof("Successfully registered node %s", node.Name)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// syncNodeStatus periodically synchronizes node status to master.
|
||||
func (kl *Kubelet) syncNodeStatus() {
|
||||
if kl.kubeClient == nil {
|
||||
return
|
||||
}
|
||||
glog.Infof("Starting node status updates")
|
||||
for feq := initialNodeStatusUpdateFrequency; feq < kl.nodeStatusUpdateFrequency; feq += nodeStatusUpdateFrequencyInc {
|
||||
select {
|
||||
case <-time.After(feq):
|
||||
if err := kl.updateNodeStatus(); err != nil {
|
||||
glog.Errorf("Unable to update node status: %v", err)
|
||||
}
|
||||
}
|
||||
if kl.registerNode {
|
||||
kl.registerWithApiserver()
|
||||
}
|
||||
glog.Infof("Starting node status updates")
|
||||
for {
|
||||
select {
|
||||
case <-time.After(kl.nodeStatusUpdateFrequency):
|
||||
@ -1706,14 +1766,13 @@ func (kl *Kubelet) reconcileCBR0(podCIDR string) error {
|
||||
// updateNodeStatus updates node status to master with retries.
|
||||
func (kl *Kubelet) updateNodeStatus() error {
|
||||
for i := 0; i < nodeStatusUpdateRetry; i++ {
|
||||
err := kl.tryUpdateNodeStatus()
|
||||
if err != nil {
|
||||
glog.Errorf("error updating node status, will retry: %v", err)
|
||||
if err := kl.tryUpdateNodeStatus(); err != nil {
|
||||
glog.Errorf("Error updating node status, will retry: %v", err)
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("Update node status exceeds retry count")
|
||||
return fmt.Errorf("update node status exceeds retry count")
|
||||
}
|
||||
|
||||
func (kl *Kubelet) recordNodeStatusEvent(event string) {
|
||||
@ -1726,15 +1785,36 @@ func (kl *Kubelet) recordNodeStatusEvent(event string) {
|
||||
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
|
||||
var oldNodeUnschedulable bool
|
||||
|
||||
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
|
||||
// is set, this function will also confirm that cbr0 is configured correctly.
|
||||
func (kl *Kubelet) tryUpdateNodeStatus() error {
|
||||
node, err := kl.kubeClient.Nodes().Get(kl.hostname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting node %q: %v", kl.hostname, err)
|
||||
}
|
||||
if node == nil {
|
||||
return fmt.Errorf("no node instance returned for %q", kl.hostname)
|
||||
// setNodeStatus fills in the Status fields of the given Node, overwriting
|
||||
// any fields that are currently set.
|
||||
func (kl *Kubelet) setNodeStatus(node *api.Node) error {
|
||||
// Set addresses for the node.
|
||||
if kl.cloud != nil {
|
||||
instances, ok := kl.cloud.Instances()
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to get instances from cloud provider")
|
||||
}
|
||||
// TODO(roberthbailey): Can we do this without having credentials to talk
|
||||
// to the cloud provider?
|
||||
nodeAddresses, err := instances.NodeAddresses(kl.hostname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get node address from cloud provider: %v", err)
|
||||
}
|
||||
node.Status.Addresses = nodeAddresses
|
||||
} else {
|
||||
addr := net.ParseIP(kl.hostname)
|
||||
if addr != nil {
|
||||
node.Status.Addresses = []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: addr.String()}}
|
||||
} else {
|
||||
addrs, err := net.LookupIP(node.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get ip address of node %s: %v", node.Name, err)
|
||||
} else if len(addrs) == 0 {
|
||||
return fmt.Errorf("no ip address for node %v", node.Name)
|
||||
} else {
|
||||
node.Status.Addresses = []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: addrs[0].String()}}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
networkConfigured := true
|
||||
@ -1749,7 +1829,13 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
||||
// cAdvisor locally, e.g. for test-cmd.sh, and in integration test.
|
||||
info, err := kl.GetCachedMachineInfo()
|
||||
if err != nil {
|
||||
glog.Errorf("error getting machine info: %v", err)
|
||||
// TODO(roberthbailey): This is required for test-cmd.sh to pass.
|
||||
// See if the test should be updated instead.
|
||||
node.Status.Capacity = api.ResourceList{
|
||||
api.ResourceCPU: *resource.NewMilliQuantity(0, resource.DecimalSI),
|
||||
api.ResourceMemory: resource.MustParse("0Gi"),
|
||||
}
|
||||
glog.Errorf("Error getting machine info: %v", err)
|
||||
} else {
|
||||
node.Status.NodeInfo.MachineID = info.MachineID
|
||||
node.Status.NodeInfo.SystemUUID = info.SystemUUID
|
||||
@ -1768,7 +1854,7 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
||||
|
||||
verinfo, err := kl.cadvisor.VersionInfo()
|
||||
if err != nil {
|
||||
glog.Errorf("error getting version info: %v", err)
|
||||
glog.Errorf("Error getting version info: %v", err)
|
||||
} else {
|
||||
node.Status.NodeInfo.KernelVersion = verinfo.KernelVersion
|
||||
node.Status.NodeInfo.OsImage = verinfo.ContainerOsVersion
|
||||
@ -1844,7 +1930,22 @@ func (kl *Kubelet) tryUpdateNodeStatus() error {
|
||||
}
|
||||
oldNodeUnschedulable = node.Spec.Unschedulable
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
|
||||
// is set, this function will also confirm that cbr0 is configured correctly.
|
||||
func (kl *Kubelet) tryUpdateNodeStatus() error {
|
||||
node, err := kl.kubeClient.Nodes().Get(kl.hostname)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting node %q: %v", kl.hostname, err)
|
||||
}
|
||||
if node == nil {
|
||||
return fmt.Errorf("no node instance returned for %q", kl.hostname)
|
||||
}
|
||||
if err := kl.setNodeStatus(node); err != nil {
|
||||
return err
|
||||
}
|
||||
// Update the current status on the API server
|
||||
_, err = kl.kubeClient.Nodes().UpdateStatus(node)
|
||||
return err
|
||||
|
@ -34,6 +34,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
|
||||
apierrors "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/capabilities"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
|
||||
@ -44,6 +45,7 @@ import (
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/metrics"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/network"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
|
||||
"github.com/GoogleCloudPlatform/kubernetes/pkg/version"
|
||||
@ -3239,7 +3241,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
kubelet := testKubelet.kubelet
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.ReactFn = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "testnode"}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"}},
|
||||
}}).ReactFn
|
||||
machineInfo := &cadvisorApi.MachineInfo{
|
||||
MachineID: "123",
|
||||
@ -3257,7 +3259,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
}
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
expectedNode := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "testnode"},
|
||||
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
|
||||
Spec: api.NodeSpec{},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
@ -3284,6 +3286,7 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
|
||||
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
Addresses: []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -3317,7 +3320,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.ReactFn = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "testnode"},
|
||||
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
|
||||
Spec: api.NodeSpec{},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
@ -3353,7 +3356,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
}
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
expectedNode := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "testnode"},
|
||||
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
|
||||
Spec: api.NodeSpec{},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
@ -3380,6 +3383,7 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
|
||||
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
Addresses: []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -3419,7 +3423,7 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
|
||||
fakeDocker.VersionInfo = []string{}
|
||||
|
||||
kubeClient.ReactFn = testclient.NewSimpleFake(&api.NodeList{Items: []api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "testnode"}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"}},
|
||||
}}).ReactFn
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
machineInfo := &cadvisorApi.MachineInfo{
|
||||
@ -3438,7 +3442,7 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
|
||||
expectedNode := &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "testnode"},
|
||||
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
|
||||
Spec: api.NodeSpec{},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
@ -3465,6 +3469,7 @@ func TestUpdateNodeStatusWithoutContainerRuntime(t *testing.T) {
|
||||
api.ResourceMemory: *resource.NewQuantity(1024, resource.BinarySI),
|
||||
api.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
},
|
||||
Addresses: []api.NodeAddress{{Type: api.NodeLegacyHostIP, Address: "127.0.0.1"}},
|
||||
},
|
||||
}
|
||||
|
||||
@ -4402,6 +4407,62 @@ func TestFilterOutTerminatedPods(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestRegisterExistingNodeWithApiserver(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t)
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.hostname = "127.0.0.1"
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.ReactFn = func(action testclient.FakeAction) (runtime.Object, error) {
|
||||
segments := strings.Split(action.Action, "-")
|
||||
if len(segments) < 2 {
|
||||
return nil, fmt.Errorf("unrecognized action, need two or three segments <verb>-<resource> or <verb>-<subresource>-<resource>: %s", action.Action)
|
||||
}
|
||||
verb := segments[0]
|
||||
switch verb {
|
||||
case "create":
|
||||
// Return an error on create.
|
||||
return &api.Node{}, &apierrors.StatusError{
|
||||
ErrStatus: api.Status{Reason: api.StatusReasonAlreadyExists},
|
||||
}
|
||||
case "get":
|
||||
// Return an existing (matching) node on get.
|
||||
return &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
|
||||
Spec: api.NodeSpec{ExternalID: "127.0.0.1"},
|
||||
}, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("no reaction implemented for %s", action.Action)
|
||||
}
|
||||
}
|
||||
machineInfo := &cadvisorApi.MachineInfo{
|
||||
MachineID: "123",
|
||||
SystemUUID: "abc",
|
||||
BootID: "1b3",
|
||||
NumCores: 2,
|
||||
MemoryCapacity: 1024,
|
||||
}
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
||||
versionInfo := &cadvisorApi.VersionInfo{
|
||||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
||||
DockerVersion: "1.5.0",
|
||||
}
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
kubelet.registerWithApiserver()
|
||||
done <- struct{}{}
|
||||
}()
|
||||
select {
|
||||
case <-time.After(5 * time.Second):
|
||||
t.Errorf("timed out waiting for registration")
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func TestMakePortMappings(t *testing.T) {
|
||||
tests := []struct {
|
||||
container *api.Container
|
||||
|
Loading…
Reference in New Issue
Block a user