From 6d29cfc0cc3253c905d6b888cde5095157dc2042 Mon Sep 17 00:00:00 2001 From: Klaus Ma Date: Sat, 11 Mar 2017 09:42:44 +0800 Subject: [PATCH] Registered node before other initialization. --- pkg/kubelet/cm/BUILD | 1 + pkg/kubelet/cm/node_container_manager.go | 23 +++++++++++++++++------ pkg/kubelet/dockertools/docker_manager.go | 1 - pkg/kubelet/kubelet.go | 1 + 4 files changed, 19 insertions(+), 7 deletions(-) diff --git a/pkg/kubelet/cm/BUILD b/pkg/kubelet/cm/BUILD index 8d91a13f147..367fd7c86e1 100644 --- a/pkg/kubelet/cm/BUILD +++ b/pkg/kubelet/cm/BUILD @@ -49,6 +49,7 @@ go_library( "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", + "//vendor/k8s.io/client-go/pkg/api/v1:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library", ], ) diff --git a/pkg/kubelet/cm/node_container_manager.go b/pkg/kubelet/cm/node_container_manager.go index 29b2fd1122e..f0ffb12879f 100644 --- a/pkg/kubelet/cm/node_container_manager.go +++ b/pkg/kubelet/cm/node_container_manager.go @@ -26,6 +26,8 @@ import ( "github.com/golang/glog" "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/types" + clientv1 "k8s.io/client-go/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/kubelet/events" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" @@ -69,6 +71,15 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { Name: CgroupName(cm.cgroupRoot), ResourceParameters: getCgroupConfig(nodeAllocatable), } + + // Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail. + nodeRef := &clientv1.ObjectReference{ + Kind: "Node", + Name: cm.nodeInfo.Name, + UID: types.UID(cm.nodeInfo.Name), + Namespace: "", + } + // If Node Allocatable is enforced on a node that has not been drained or is updated on an existing node to a lower value, // existing memory usage across pods might be higher that current Node Allocatable Memory Limits. // Pod Evictions are expected to bring down memory usage to below Node Allocatable limits. @@ -79,11 +90,11 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { for { err := cm.cgroupManager.Update(cgroupConfig) if err == nil { - cm.recorder.Event(cm.nodeInfo, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated Node Allocatable limit across pods") + cm.recorder.Event(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated Node Allocatable limit across pods") return } message := fmt.Sprintf("Failed to update Node Allocatable Limits %q: %v", cm.cgroupRoot, err) - cm.recorder.Event(cm.nodeInfo, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) + cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) time.Sleep(time.Minute) } }() @@ -93,19 +104,19 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error { glog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved) if err := enforceExistingCgroup(cm.cgroupManager, nc.SystemReservedCgroupName, nc.SystemReserved); err != nil { message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) - cm.recorder.Event(cm.nodeInfo, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) + cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) return fmt.Errorf(message) } - cm.recorder.Eventf(cm.nodeInfo, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName) + cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName) } if nc.EnforceNodeAllocatable.Has(KubeReservedEnforcementKey) { glog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved) if err := enforceExistingCgroup(cm.cgroupManager, nc.KubeReservedCgroupName, nc.KubeReserved); err != nil { message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) - cm.recorder.Event(cm.nodeInfo, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) + cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) return fmt.Errorf(message) } - cm.recorder.Eventf(cm.nodeInfo, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName) + cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName) } return nil } diff --git a/pkg/kubelet/dockertools/docker_manager.go b/pkg/kubelet/dockertools/docker_manager.go index 0073988c5c3..a6d8489c26b 100644 --- a/pkg/kubelet/dockertools/docker_manager.go +++ b/pkg/kubelet/dockertools/docker_manager.go @@ -1887,7 +1887,6 @@ type versionInfo struct { // -1: older than expected version // 0 : same version func (dm *DockerManager) checkDockerAPIVersion(expectedVersion string) (int, error) { - value, err := dm.versionCache.Get(dm.machineInfo.MachineID) if err != nil { return 0, err diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 80ba8a1acad..e4c3e062b81 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1251,6 +1251,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) { if kl.kubeClient == nil { glog.Warning("No api server defined - no node status update will be sent.") } + if err := kl.initializeModules(); err != nil { kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error()) glog.Error(err)