Revert "Allow nodename to be != hostname, use AWS instance ID on AWS"

This commit is contained in:
Satnam Singh 2015-06-18 11:27:55 -07:00
parent fb07b34cb8
commit e4f5529a2d
17 changed files with 113 additions and 166 deletions

View File

@ -565,30 +565,8 @@ func SimpleKubelet(client *client.Client,
// Eventually, #2 will be replaced with instances of #3
func RunKubelet(kcfg *KubeletConfig, builder KubeletBuilder) error {
kcfg.Hostname = nodeutil.GetHostname(kcfg.HostnameOverride)
if kcfg.NodeName == "" {
// Query the cloud provider for our node name, default to Hostname
nodeName := kcfg.Hostname
if kcfg.Cloud != nil {
var err error
instances, ok := kcfg.Cloud.Instances()
if !ok {
return fmt.Errorf("failed to get instances from cloud provider")
}
nodeName, err = instances.CurrentNodeName(kcfg.Hostname)
if err != nil {
return fmt.Errorf("error fetching current instance name from cloud provider: %v", err)
}
glog.V(2).Infof("cloud provider determined current node name to be %s", nodeName)
}
kcfg.NodeName = nodeName
}
eventBroadcaster := record.NewBroadcaster()
kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: kcfg.NodeName})
kcfg.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: "kubelet", Host: kcfg.Hostname})
eventBroadcaster.StartLogging(glog.Infof)
if kcfg.KubeClient != nil {
glog.V(4).Infof("Sending events to api server.")
@ -647,17 +625,17 @@ func makePodSourceConfig(kc *KubeletConfig) *config.PodConfig {
// define file config source
if kc.ConfigFile != "" {
glog.Infof("Adding manifest file: %v", kc.ConfigFile)
config.NewSourceFile(kc.ConfigFile, kc.NodeName, kc.FileCheckFrequency, cfg.Channel(kubelet.FileSource))
config.NewSourceFile(kc.ConfigFile, kc.Hostname, kc.FileCheckFrequency, cfg.Channel(kubelet.FileSource))
}
// define url config source
if kc.ManifestURL != "" {
glog.Infof("Adding manifest url: %v", kc.ManifestURL)
config.NewSourceURL(kc.ManifestURL, kc.NodeName, kc.HTTPCheckFrequency, cfg.Channel(kubelet.HTTPSource))
config.NewSourceURL(kc.ManifestURL, kc.Hostname, kc.HTTPCheckFrequency, cfg.Channel(kubelet.HTTPSource))
}
if kc.KubeClient != nil {
glog.Infof("Watching apiserver")
config.NewSourceApiserver(kc.KubeClient, kc.NodeName, cfg.Channel(kubelet.ApiserverSource))
config.NewSourceApiserver(kc.KubeClient, kc.Hostname, cfg.Channel(kubelet.ApiserverSource))
}
return cfg
}
@ -678,7 +656,6 @@ type KubeletConfig struct {
FileCheckFrequency time.Duration
HTTPCheckFrequency time.Duration
Hostname string
NodeName string
PodInfraContainerImage string
SyncFrequency time.Duration
RegistryPullQPS float64
@ -738,7 +715,6 @@ func createAndInitKubelet(kc *KubeletConfig) (k KubeletBootstrap, pc *config.Pod
pc = makePodSourceConfig(kc)
k, err = kubelet.NewMainKubelet(
kc.Hostname,
kc.NodeName,
kc.DockerClient,
kubeClient,
kc.RootDirectory,

View File

@ -259,8 +259,6 @@ func (s *KubeletExecutorServer) Run(hks hyperkube.Interface, _ []string) error {
DockerExecHandler: dockerExecHandler,
}
kcfg.NodeName = kcfg.Hostname
err = app.RunKubelet(&kcfg, app.KubeletBuilder(func(kc *app.KubeletConfig) (app.KubeletBootstrap, *kconfig.PodConfig, error) {
return s.createAndInitKubelet(kc, hks, clientConfig, shutdownCloser)
}))
@ -321,7 +319,6 @@ func (ks *KubeletExecutorServer) createAndInitKubelet(
klet, err := kubelet.NewMainKubelet(
kc.Hostname,
kc.NodeName,
kc.DockerClient,
kubeClient,
kc.RootDirectory,

View File

@ -234,14 +234,6 @@ func (self *AWSCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error
return errors.New("unimplemented")
}
func (a *AWSCloud) CurrentNodeName(hostname string) (string, error) {
selfInstance, err := a.getSelfAWSInstance()
if err != nil {
return "", err
}
return selfInstance.awsID, nil
}
// Implementation of EC2.Instances
func (self *awsSdkEC2) DescribeInstances(request *ec2.DescribeInstancesInput) ([]*ec2.Instance, error) {
// Instances are paged
@ -555,7 +547,7 @@ func (aws *AWSCloud) Routes() (cloudprovider.Routes, bool) {
// NodeAddresses is an implementation of Instances.NodeAddresses.
func (aws *AWSCloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
instance, err := aws.getInstanceById(name)
instance, err := aws.getInstanceByDnsName(name)
if err != nil {
return nil, err
}
@ -589,8 +581,7 @@ func (aws *AWSCloud) NodeAddresses(name string) ([]api.NodeAddress, error) {
// ExternalID returns the cloud provider ID of the specified instance (deprecated).
func (aws *AWSCloud) ExternalID(name string) (string, error) {
// TODO: Do we need to verify it exists, or can we just return name
inst, err := aws.getInstanceById(name)
inst, err := aws.getInstanceByDnsName(name)
if err != nil {
return "", err
}
@ -599,8 +590,7 @@ func (aws *AWSCloud) ExternalID(name string) (string, error) {
// InstanceID returns the cloud provider ID of the specified instance.
func (aws *AWSCloud) InstanceID(name string) (string, error) {
// TODO: Do we need to verify it exists, or can we just construct it knowing our AZ (or via caching?)
inst, err := aws.getInstanceById(name)
inst, err := aws.getInstanceByDnsName(name)
if err != nil {
return "", err
}
@ -609,6 +599,46 @@ func (aws *AWSCloud) InstanceID(name string) (string, error) {
return "/" + orEmpty(inst.Placement.AvailabilityZone) + "/" + orEmpty(inst.InstanceID), nil
}
// Return the instances matching the relevant private dns name.
func (s *AWSCloud) getInstanceByDnsName(name string) (*ec2.Instance, error) {
filters := []*ec2.Filter{
newEc2Filter("private-dns-name", name),
}
filters = s.addFilters(filters)
request := &ec2.DescribeInstancesInput{
Filters: filters,
}
instances, err := s.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
matchingInstances := []*ec2.Instance{}
for _, instance := range instances {
// TODO: Push running logic down into filter?
if !isAlive(instance) {
continue
}
if orEmpty(instance.PrivateDNSName) != name {
// TODO: Should we warn here? - the filter should have caught this
// (this will happen in the tests if they don't fully mock the EC2 API)
continue
}
matchingInstances = append(matchingInstances, instance)
}
if len(matchingInstances) == 0 {
return nil, fmt.Errorf("no instances found for host: %s", name)
}
if len(matchingInstances) > 1 {
return nil, fmt.Errorf("multiple instances found for host: %s", name)
}
return matchingInstances[0], nil
}
// Check if the instance is alive (running or pending)
// We typically ignore instances that are not alive
func isAlive(instance *ec2.Instance) bool {
@ -668,9 +698,16 @@ func (s *AWSCloud) getInstancesByRegex(regex string) ([]string, error) {
continue
}
privateDNSName := orEmpty(instance.PrivateDNSName)
if privateDNSName == "" {
glog.V(2).Infof("skipping EC2 instance (no PrivateDNSName): %s",
orEmpty(instance.InstanceID))
continue
}
for _, tag := range instance.Tags {
if orEmpty(tag.Key) == "Name" && re.MatchString(orEmpty(tag.Value)) {
matchingInstances = append(matchingInstances, orEmpty(instance.InstanceID))
matchingInstances = append(matchingInstances, privateDNSName)
break
}
}
@ -687,7 +724,7 @@ func (aws *AWSCloud) List(filter string) ([]string, error) {
// GetNodeResources implements Instances.GetNodeResources
func (aws *AWSCloud) GetNodeResources(name string) (*api.NodeResources, error) {
instance, err := aws.getInstanceById(name)
instance, err := aws.getInstanceByDnsName(name)
if err != nil {
return nil, err
}
@ -1160,7 +1197,7 @@ func (aws *AWSCloud) getAwsInstance(instanceName string) (*awsInstance, error) {
return nil, fmt.Errorf("error getting self-instance: %v", err)
}
} else {
instance, err := aws.getInstanceById(instanceName)
instance, err := aws.getInstanceByDnsName(instanceName)
if err != nil {
return nil, fmt.Errorf("error finding instance: %v", err)
}
@ -1627,7 +1664,7 @@ func (s *AWSCloud) CreateTCPLoadBalancer(name, region string, publicIP net.IP, p
return nil, fmt.Errorf("publicIP cannot be specified for AWS ELB")
}
instances, err := s.getInstancesByIds(hosts)
instances, err := s.getInstancesByDnsNames(hosts)
if err != nil {
return nil, err
}
@ -2039,7 +2076,7 @@ func (s *AWSCloud) EnsureTCPLoadBalancerDeleted(name, region string) error {
// UpdateTCPLoadBalancer implements TCPLoadBalancer.UpdateTCPLoadBalancer
func (s *AWSCloud) UpdateTCPLoadBalancer(name, region string, hosts []string) error {
instances, err := s.getInstancesByIds(hosts)
instances, err := s.getInstancesByDnsNames(hosts)
if err != nil {
return err
}
@ -2114,40 +2151,21 @@ func (s *AWSCloud) UpdateTCPLoadBalancer(name, region string, hosts []string) er
}
// TODO: Make efficient
func (a *AWSCloud) getInstancesByIds(ids []string) ([]*ec2.Instance, error) {
func (a *AWSCloud) getInstancesByDnsNames(names []string) ([]*ec2.Instance, error) {
instances := []*ec2.Instance{}
for _, id := range ids {
instance, err := a.getInstanceById(id)
for _, name := range names {
instance, err := a.getInstanceByDnsName(name)
if err != nil {
return nil, err
}
if instance == nil {
return nil, fmt.Errorf("unable to find instance " + id)
return nil, fmt.Errorf("unable to find instance " + name)
}
instances = append(instances, instance)
}
return instances, nil
}
// Returns the instance with the specified ID
func (a *AWSCloud) getInstanceById(instanceID string) (*ec2.Instance, error) {
request := &ec2.DescribeInstancesInput{
InstanceIDs: []*string{&instanceID},
}
instances, err := a.ec2.DescribeInstances(request)
if err != nil {
return nil, err
}
if len(instances) == 0 {
return nil, fmt.Errorf("no instances found for instance: %s", instanceID)
}
if len(instances) > 1 {
return nil, fmt.Errorf("multiple instances found for instance: %s", instanceID)
}
return instances[0], nil
}
// Add additional filters, to match on our tags
// This lets us run multiple k8s clusters in a single EC2 AZ
func (s *AWSCloud) addFilters(filters []*ec2.Filter) []*ec2.Filter {

View File

@ -412,7 +412,7 @@ func TestList(t *testing.T) {
Value: aws.String("foo"),
}
instance0.Tags = []*ec2.Tag{&tag0}
instance0.InstanceID = aws.String("instance0")
instance0.PrivateDNSName = aws.String("instance1")
state0 := ec2.InstanceState{
Name: aws.String("running"),
}
@ -424,7 +424,7 @@ func TestList(t *testing.T) {
Value: aws.String("bar"),
}
instance1.Tags = []*ec2.Tag{&tag1}
instance1.InstanceID = aws.String("instance1")
instance1.PrivateDNSName = aws.String("instance2")
state1 := ec2.InstanceState{
Name: aws.String("running"),
}
@ -436,7 +436,7 @@ func TestList(t *testing.T) {
Value: aws.String("baz"),
}
instance2.Tags = []*ec2.Tag{&tag2}
instance2.InstanceID = aws.String("instance2")
instance2.PrivateDNSName = aws.String("instance3")
state2 := ec2.InstanceState{
Name: aws.String("running"),
}
@ -448,7 +448,7 @@ func TestList(t *testing.T) {
Value: aws.String("quux"),
}
instance3.Tags = []*ec2.Tag{&tag3}
instance3.InstanceID = aws.String("instance3")
instance3.PrivateDNSName = aws.String("instance4")
state3 := ec2.InstanceState{
Name: aws.String("running"),
}
@ -462,8 +462,8 @@ func TestList(t *testing.T) {
expect []string
}{
{"blahonga", []string{}},
{"quux", []string{"instance3"}},
{"a", []string{"instance1", "instance2"}},
{"quux", []string{"instance4"}},
{"a", []string{"instance2", "instance3"}},
}
for _, item := range table {
@ -493,7 +493,7 @@ func TestNodeAddresses(t *testing.T) {
var instance1 ec2.Instance
//0
instance0.InstanceID = aws.String("instance-same")
instance0.PrivateDNSName = aws.String("instance1")
instance0.PrivateIPAddress = aws.String("192.168.0.1")
instance0.PublicIPAddress = aws.String("1.2.3.4")
instance0.InstanceType = aws.String("c3.large")
@ -503,7 +503,7 @@ func TestNodeAddresses(t *testing.T) {
instance0.State = &state0
//1
instance1.InstanceID = aws.String("instance-same")
instance1.PrivateDNSName = aws.String("instance1")
instance1.PrivateIPAddress = aws.String("192.168.0.2")
instance1.InstanceType = aws.String("c3.large")
state1 := ec2.InstanceState{
@ -514,19 +514,19 @@ func TestNodeAddresses(t *testing.T) {
instances := []*ec2.Instance{&instance0, &instance1}
aws1 := mockInstancesResp([]*ec2.Instance{})
_, err1 := aws1.NodeAddresses("instance-mismatch")
_, err1 := aws1.NodeAddresses("instance")
if err1 == nil {
t.Errorf("Should error when no instance found")
}
aws2 := mockInstancesResp(instances)
_, err2 := aws2.NodeAddresses("instance-same")
_, err2 := aws2.NodeAddresses("instance1")
if err2 == nil {
t.Errorf("Should error when multiple instances found")
}
aws3 := mockInstancesResp(instances[0:1])
addrs3, err3 := aws3.NodeAddresses("instance-same")
addrs3, err3 := aws3.NodeAddresses("instance1")
if err3 != nil {
t.Errorf("Should not error when instance found")
}
@ -562,7 +562,7 @@ func TestGetResources(t *testing.T) {
var instance2 ec2.Instance
//0
instance0.InstanceID = aws.String("m3.medium")
instance0.PrivateDNSName = aws.String("m3.medium")
instance0.InstanceType = aws.String("m3.medium")
state0 := ec2.InstanceState{
Name: aws.String("running"),
@ -570,7 +570,7 @@ func TestGetResources(t *testing.T) {
instance0.State = &state0
//1
instance1.InstanceID = aws.String("r3.8xlarge")
instance1.PrivateDNSName = aws.String("r3.8xlarge")
instance1.InstanceType = aws.String("r3.8xlarge")
state1 := ec2.InstanceState{
Name: aws.String("running"),
@ -578,7 +578,7 @@ func TestGetResources(t *testing.T) {
instance1.State = &state1
//2
instance2.InstanceID = aws.String("unknown.type")
instance2.PrivateDNSName = aws.String("unknown.type")
instance2.InstanceType = aws.String("unknown.type")
state2 := ec2.InstanceState{
Name: aws.String("running"),

View File

@ -111,9 +111,6 @@ type Instances interface {
// AddSSHKeyToAllInstances adds an SSH public key as a legal identity for all instances
// expected format for the key is standard ssh-keygen format: <protocol> <blob>
AddSSHKeyToAllInstances(user string, keyData []byte) error
// Returns the name of the node we are currently running on
// On most clouds (e.g. GCE) this is the hostname, so we provide the hostname
CurrentNodeName(hostname string) (string, error)
}
// Route is a representation of an advanced routing rule.

View File

@ -149,11 +149,6 @@ func (f *FakeCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return errors.New("unimplemented")
}
// Implementation of Instances.CurrentNodeName
func (f *FakeCloud) CurrentNodeName(hostname string) (string, error) {
return hostname, nil
}
// NodeAddresses is a test-spy implementation of Instances.NodeAddresses.
// It adds an entry "node-addresses" into the internal method call record.
func (f *FakeCloud) NodeAddresses(instance string) ([]api.NodeAddress, error) {

View File

@ -483,11 +483,6 @@ func (gce *GCECloud) getInstanceByName(name string) (*compute.Instance, error) {
return res, nil
}
// Implementation of Instances.CurrentNodeName
func (gce *GCECloud) CurrentNodeName(hostname string) (string, error) {
return hostname, nil
}
func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return wait.Poll(2*time.Second, 30*time.Second, func() (bool, error) {
project, err := gce.service.Projects.Get(gce.projectID).Do()

View File

@ -78,11 +78,6 @@ func newMesosCloud(configReader io.Reader) (*MesosCloud, error) {
}
}
// Implementation of Instances.CurrentNodeName
func (c *MesosCloud) CurrentNodeName(hostname string) (string, error) {
return hostname, nil
}
func (c *MesosCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return errors.New("unimplemented")
}

View File

@ -317,11 +317,6 @@ func getAddressByName(api *gophercloud.ServiceClient, name string) (string, erro
return s, nil
}
// Implementation of Instances.CurrentNodeName
func (i *Instances) CurrentNodeName(hostname string) (string, error) {
return hostname, nil
}
func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return errors.New("unimplemented")
}

View File

@ -275,11 +275,6 @@ func (v *OVirtCloud) GetNodeResources(name string) (*api.NodeResources, error) {
return nil, nil
}
// Implementation of Instances.CurrentNodeName
func (v *OVirtCloud) CurrentNodeName(hostname string) (string, error) {
return hostname, nil
}
func (v *OVirtCloud) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return errors.New("unimplemented")
}

View File

@ -380,11 +380,6 @@ func (i *Instances) AddSSHKeyToAllInstances(user string, keyData []byte) error {
return errors.New("unimplemented")
}
// Implementation of Instances.CurrentNodeName
func (i *Instances) CurrentNodeName(hostname string) (string, error) {
return hostname, nil
}
func (i *Instances) GetNodeResources(name string) (*api.NodeResources, error) {
glog.V(2).Infof("GetNodeResources(%v) called", name)

View File

@ -135,11 +135,6 @@ func (v *VagrantCloud) AddSSHKeyToAllInstances(user string, keyData []byte) erro
return errors.New("unimplemented")
}
// Implementation of Instances.CurrentNodeName
func (v *VagrantCloud) CurrentNodeName(hostname string) (string, error) {
return hostname, nil
}
// NodeAddresses returns the NodeAddresses of a particular machine instance.
func (v *VagrantCloud) NodeAddresses(instance string) ([]api.NodeAddress, error) {
// Due to vagrant not running with a dedicated DNS setup, we return the IP address of a minion as its hostname at this time

View File

@ -26,8 +26,8 @@ import (
)
// NewSourceApiserver creates a config source that watches and pulls from the apiserver.
func NewSourceApiserver(c *client.Client, nodeName string, updates chan<- interface{}) {
lw := cache.NewListWatchFromClient(c, "pods", api.NamespaceAll, fields.OneTermEqualSelector(client.PodHost, nodeName))
func NewSourceApiserver(c *client.Client, hostname string, updates chan<- interface{}) {
lw := cache.NewListWatchFromClient(c, "pods", api.NamespaceAll, fields.OneTermEqualSelector(client.PodHost, hostname))
newSourceApiserverFromLW(lw, updates)
}

View File

@ -33,16 +33,16 @@ import (
"github.com/golang/glog"
)
// Generate a pod name that is unique among nodes by appending the nodeName.
func generatePodName(name, nodeName string) string {
return fmt.Sprintf("%s-%s", name, nodeName)
// Generate a pod name that is unique among nodes by appending the hostname.
func generatePodName(name, hostname string) string {
return fmt.Sprintf("%s-%s", name, hostname)
}
func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName string) error {
func applyDefaults(pod *api.Pod, source string, isFile bool, hostname string) error {
if len(pod.UID) == 0 {
hasher := md5.New()
if isFile {
fmt.Fprintf(hasher, "host:%s", nodeName)
fmt.Fprintf(hasher, "host:%s", hostname)
fmt.Fprintf(hasher, "file:%s", source)
} else {
fmt.Fprintf(hasher, "url:%s", source)
@ -57,7 +57,7 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName string) er
if len(pod.Name) == 0 {
pod.Name = string(pod.UID)
}
pod.Name = generatePodName(pod.Name, nodeName)
pod.Name = generatePodName(pod.Name, hostname)
glog.V(5).Infof("Generated Name %q for UID %q from URL %s", pod.Name, pod.UID, source)
if pod.Namespace == "" {
@ -66,7 +66,7 @@ func applyDefaults(pod *api.Pod, source string, isFile bool, nodeName string) er
glog.V(5).Infof("Using namespace %q for pod %q from %s", pod.Namespace, pod.Name, source)
// Set the Host field to indicate this pod is scheduled on the current node.
pod.Spec.NodeName = nodeName
pod.Spec.NodeName = hostname
pod.ObjectMeta.SelfLink = getSelfLink(pod.Name, pod.Namespace)
return nil

View File

@ -34,14 +34,14 @@ import (
type sourceFile struct {
path string
nodeName string
hostname string
updates chan<- interface{}
}
func NewSourceFile(path string, nodeName string, period time.Duration, updates chan<- interface{}) {
func NewSourceFile(path string, hostname string, period time.Duration, updates chan<- interface{}) {
config := &sourceFile{
path: path,
nodeName: nodeName,
hostname: hostname,
updates: updates,
}
glog.V(1).Infof("Watching path %q", path)
@ -55,7 +55,7 @@ func (s *sourceFile) run() {
}
func (s *sourceFile) applyDefaults(pod *api.Pod, source string) error {
return applyDefaults(pod, source, true, s.nodeName)
return applyDefaults(pod, source, true, s.hostname)
}
func (s *sourceFile) extractFromPath() error {

View File

@ -33,15 +33,15 @@ import (
type sourceURL struct {
url string
nodeName string
hostname string
updates chan<- interface{}
data []byte
}
func NewSourceURL(url, nodeName string, period time.Duration, updates chan<- interface{}) {
func NewSourceURL(url, hostname string, period time.Duration, updates chan<- interface{}) {
config := &sourceURL{
url: url,
nodeName: nodeName,
hostname: hostname,
updates: updates,
data: nil,
}
@ -56,7 +56,7 @@ func (s *sourceURL) run() {
}
func (s *sourceURL) applyDefaults(pod *api.Pod) error {
return applyDefaults(pod, s.url, false, s.nodeName)
return applyDefaults(pod, s.url, false, s.hostname)
}
func (s *sourceURL) extractFromURL() error {

View File

@ -114,7 +114,6 @@ func waitUntilRuntimeIsUp(cr kubecontainer.Runtime, timeout time.Duration) error
// New creates a new Kubelet for use in main
func NewMainKubelet(
hostname string,
nodeName string,
dockerClient dockertools.DockerInterface,
kubeClient client.Interface,
rootDirectory string,
@ -180,7 +179,7 @@ func NewMainKubelet(
if kubeClient != nil {
// TODO: cache.NewListWatchFromClient is limited as it takes a client implementation rather
// than an interface. There is no way to construct a list+watcher using resource name.
fieldSelector := fields.Set{client.ObjectNameField: nodeName}.AsSelector()
fieldSelector := fields.Set{client.ObjectNameField: hostname}.AsSelector()
listWatch := &cache.ListWatch{
ListFunc: func() (runtime.Object, error) {
return kubeClient.Nodes().List(labels.Everything(), fieldSelector)
@ -198,8 +197,8 @@ func NewMainKubelet(
// TODO: what is namespace for node?
nodeRef := &api.ObjectReference{
Kind: "Node",
Name: nodeName,
UID: types.UID(nodeName),
Name: hostname,
UID: types.UID(hostname),
Namespace: "",
}
@ -225,7 +224,6 @@ func NewMainKubelet(
klet := &Kubelet{
hostname: hostname,
nodeName: nodeName,
dockerClient: dockerClient,
kubeClient: kubeClient,
rootDirectory: rootDirectory,
@ -364,7 +362,6 @@ type nodeLister interface {
// Kubelet is the main kubelet implementation.
type Kubelet struct {
hostname string
nodeName string
dockerClient dockertools.DockerInterface
runtimeCache kubecontainer.RuntimeCache
kubeClient client.Interface
@ -640,13 +637,13 @@ func (kl *Kubelet) GetNode() (*api.Node, error) {
if err != nil {
return nil, errors.New("cannot list nodes")
}
nodeName := kl.nodeName
host := kl.GetHostname()
for _, n := range l.Items {
if n.Name == nodeName {
if n.Name == host {
return &n, nil
}
}
return nil, fmt.Errorf("node %v not found", nodeName)
return nil, fmt.Errorf("node %v not found", host)
}
// Starts garbage collection theads.
@ -712,7 +709,7 @@ func (kl *Kubelet) Run(updates <-chan PodUpdate) {
func (kl *Kubelet) initialNodeStatus() (*api.Node, error) {
node := &api.Node{
ObjectMeta: api.ObjectMeta{
Name: kl.nodeName,
Name: kl.hostname,
Labels: map[string]string{"kubernetes.io/hostname": kl.hostname},
},
}
@ -721,20 +718,18 @@ func (kl *Kubelet) initialNodeStatus() (*api.Node, error) {
if !ok {
return nil, fmt.Errorf("failed to get instances from cloud provider")
}
// TODO(roberthbailey): Can we do this without having credentials to talk
// to the cloud provider?
// TODO: ExternalID is deprecated, we'll have to drop this code
externalID, err := instances.ExternalID(kl.nodeName)
externalID, err := instances.ExternalID(kl.hostname)
if err != nil {
return nil, fmt.Errorf("failed to get external ID from cloud provider: %v", err)
}
node.Spec.ExternalID = externalID
// TODO: We can't assume that the node has credentials to talk to the
// cloudprovider from arbitrary nodes. At most, we should talk to a
// local metadata server here.
node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(kl.cloud, kl.nodeName)
node.Spec.ProviderID, err = cloudprovider.GetInstanceProviderID(kl.cloud, kl.hostname)
if err != nil {
return nil, err
}
@ -765,13 +760,13 @@ func (kl *Kubelet) registerWithApiserver() {
glog.V(2).Infof("Attempting to register node %s", node.Name)
if _, err := kl.kubeClient.Nodes().Create(node); err != nil {
if apierrors.IsAlreadyExists(err) {
currentNode, err := kl.kubeClient.Nodes().Get(kl.nodeName)
currentNode, err := kl.kubeClient.Nodes().Get(kl.hostname)
if err != nil {
glog.Errorf("error getting node %q: %v", kl.nodeName, err)
glog.Errorf("error getting node %q: %v", kl.hostname, err)
continue
}
if currentNode == nil {
glog.Errorf("no node instance returned for %q", kl.nodeName)
glog.Errorf("no node instance returned for %q", kl.hostname)
continue
}
if currentNode.Spec.ExternalID == node.Spec.ExternalID {
@ -1835,10 +1830,10 @@ func (kl *Kubelet) updateNodeStatus() error {
}
func (kl *Kubelet) recordNodeStatusEvent(event string) {
glog.V(2).Infof("Recording %s event message for node %s", event, kl.nodeName)
glog.V(2).Infof("Recording %s event message for node %s", event, kl.hostname)
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
kl.recorder.Eventf(kl.nodeRef, event, "Node %s status is now: %s", kl.nodeName, event)
kl.recorder.Eventf(kl.nodeRef, event, "Node %s status is now: %s", kl.hostname, event)
}
// Maintains Node.Spec.Unschedulable value from previous run of tryUpdateNodeStatus()
@ -1855,8 +1850,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
}
// TODO(roberthbailey): Can we do this without having credentials to talk
// to the cloud provider?
// TODO(justinsb): We can if CurrentNodeName() was actually CurrentNode() and returned an interface
nodeAddresses, err := instances.NodeAddresses(kl.nodeName)
nodeAddresses, err := instances.NodeAddresses(kl.hostname)
if err != nil {
return fmt.Errorf("failed to get node address from cloud provider: %v", err)
}
@ -1910,7 +1904,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
// TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055.
kl.recorder.Eventf(kl.nodeRef, "rebooted",
"Node %s has been rebooted, boot id: %s", kl.nodeName, info.BootID)
"Node %s has been rebooted, boot id: %s", kl.hostname, info.BootID)
}
node.Status.NodeInfo.BootID = info.BootID
}
@ -1999,12 +1993,12 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error {
// tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0
// is set, this function will also confirm that cbr0 is configured correctly.
func (kl *Kubelet) tryUpdateNodeStatus() error {
node, err := kl.kubeClient.Nodes().Get(kl.nodeName)
node, err := kl.kubeClient.Nodes().Get(kl.hostname)
if err != nil {
return fmt.Errorf("error getting node %q: %v", kl.nodeName, err)
return fmt.Errorf("error getting node %q: %v", kl.hostname, err)
}
if node == nil {
return fmt.Errorf("no node instance returned for %q", kl.nodeName)
return fmt.Errorf("no node instance returned for %q", kl.hostname)
}
if err := kl.setNodeStatus(node); err != nil {
return err