diff --git a/cluster/gce/configure-vm.sh b/cluster/gce/configure-vm.sh index 9ac82ace2df..c73dfb8921d 100644 --- a/cluster/gce/configure-vm.sh +++ b/cluster/gce/configure-vm.sh @@ -501,7 +501,7 @@ EOF cat <>/etc/salt/minion.d/grains.conf cloud_config: /etc/gce.conf advertise_address: '${EXTERNAL_IP}' - proxy_ssh_user: '${INSTANCE_PREFIX}' + proxy_ssh_user: '${PROXY_SSH_USER}' EOF fi } diff --git a/pkg/cloudprovider/gce/gce.go b/pkg/cloudprovider/gce/gce.go index 3ecf3dfb23c..80649a682da 100644 --- a/pkg/cloudprovider/gce/gce.go +++ b/pkg/cloudprovider/gce/gce.go @@ -22,7 +22,6 @@ import ( "io/ioutil" "net" "net/http" - "os" "path" "strconv" "strings" @@ -490,12 +489,7 @@ func (gce *GCECloud) AddSSHKeyToAllInstances(user string, keyData []byte) error glog.Errorf("Could not get project: %v", err) return false, nil } - hostname, err := os.Hostname() - if err != nil { - glog.Errorf("Could not get hostname: %v", err) - return false, nil - } - keyString := fmt.Sprintf("%s:%s %s@%s", user, strings.TrimSpace(string(keyData)), user, hostname) + keyString := fmt.Sprintf("%s:%s %s@%s", user, strings.TrimSpace(string(keyData)), user, user) found := false for _, item := range project.CommonInstanceMetadata.Items { if item.Key == "sshKeys" { diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 5d422690494..6ef3f473806 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1663,6 +1663,11 @@ func (kl *Kubelet) admitPods(allPods []*api.Pod, podSyncTypes map[types.UID]Sync func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) { glog.Info("Starting kubelet main sync loop.") for { + if !kl.containerRuntimeUp() { + time.Sleep(5 * time.Second) + glog.Infof("Skipping pod synchronization, container runtime is not up.") + continue + } unsyncedPod := false podSyncTypes := make(map[types.UID]SyncPodType) select { @@ -1923,11 +1928,7 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { } // Check whether container runtime can be reported as up. - containerRuntimeUp := func() bool { - kl.runtimeMutex.Lock() - defer kl.runtimeMutex.Unlock() - return kl.lastTimestampRuntimeUp.Add(kl.runtimeUpThreshold).After(time.Now()) - }() + containerRuntimeUp := kl.containerRuntimeUp() currentTime := util.Now() var newNodeReadyCondition api.NodeCondition @@ -1990,6 +1991,12 @@ func (kl *Kubelet) setNodeStatus(node *api.Node) error { return nil } +func (kl *Kubelet) containerRuntimeUp() bool { + kl.runtimeMutex.Lock() + defer kl.runtimeMutex.Unlock() + return kl.lastTimestampRuntimeUp.Add(kl.runtimeUpThreshold).After(time.Now()) +} + // tryUpdateNodeStatus tries to update node status to master. If ReconcileCBR0 // is set, this function will also confirm that cbr0 is configured correctly. func (kl *Kubelet) tryUpdateNodeStatus() error {