Merge pull request #42939 from k82cn/k8s_42701

Automatic merge from submit-queue

Used ObjectReference for events.

fixes #42701 

```release-note
None
```
This commit is contained in:
Kubernetes Submit Queue 2017-04-17 23:26:06 -07:00 committed by GitHub
commit c20e63bfb9
4 changed files with 19 additions and 7 deletions

View File

@ -49,6 +49,7 @@ go_library(
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library", "//vendor/k8s.io/client-go/tools/record:go_default_library",
], ],
) )

View File

@ -26,6 +26,8 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
clientv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
@ -69,6 +71,15 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
Name: CgroupName(cm.cgroupRoot), Name: CgroupName(cm.cgroupRoot),
ResourceParameters: getCgroupConfig(nodeAllocatable), ResourceParameters: getCgroupConfig(nodeAllocatable),
} }
// Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail.
nodeRef := &clientv1.ObjectReference{
Kind: "Node",
Name: cm.nodeInfo.Name,
UID: types.UID(cm.nodeInfo.Name),
Namespace: "",
}
// If Node Allocatable is enforced on a node that has not been drained or is updated on an existing node to a lower value, // If Node Allocatable is enforced on a node that has not been drained or is updated on an existing node to a lower value,
// existing memory usage across pods might be higher that current Node Allocatable Memory Limits. // existing memory usage across pods might be higher that current Node Allocatable Memory Limits.
// Pod Evictions are expected to bring down memory usage to below Node Allocatable limits. // Pod Evictions are expected to bring down memory usage to below Node Allocatable limits.
@ -79,11 +90,11 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
for { for {
err := cm.cgroupManager.Update(cgroupConfig) err := cm.cgroupManager.Update(cgroupConfig)
if err == nil { if err == nil {
cm.recorder.Event(cm.nodeInfo, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated Node Allocatable limit across pods") cm.recorder.Event(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated Node Allocatable limit across pods")
return return
} }
message := fmt.Sprintf("Failed to update Node Allocatable Limits %q: %v", cm.cgroupRoot, err) message := fmt.Sprintf("Failed to update Node Allocatable Limits %q: %v", cm.cgroupRoot, err)
cm.recorder.Event(cm.nodeInfo, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
time.Sleep(time.Minute) time.Sleep(time.Minute)
} }
}() }()
@ -93,19 +104,19 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
glog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved) glog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved)
if err := enforceExistingCgroup(cm.cgroupManager, nc.SystemReservedCgroupName, nc.SystemReserved); err != nil { if err := enforceExistingCgroup(cm.cgroupManager, nc.SystemReservedCgroupName, nc.SystemReserved); err != nil {
message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err)
cm.recorder.Event(cm.nodeInfo, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
return fmt.Errorf(message) return fmt.Errorf(message)
} }
cm.recorder.Eventf(cm.nodeInfo, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName) cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
} }
if nc.EnforceNodeAllocatable.Has(KubeReservedEnforcementKey) { if nc.EnforceNodeAllocatable.Has(KubeReservedEnforcementKey) {
glog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved) glog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved)
if err := enforceExistingCgroup(cm.cgroupManager, nc.KubeReservedCgroupName, nc.KubeReserved); err != nil { if err := enforceExistingCgroup(cm.cgroupManager, nc.KubeReservedCgroupName, nc.KubeReserved); err != nil {
message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err)
cm.recorder.Event(cm.nodeInfo, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message) cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
return fmt.Errorf(message) return fmt.Errorf(message)
} }
cm.recorder.Eventf(cm.nodeInfo, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName) cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName)
} }
return nil return nil
} }

View File

@ -1887,7 +1887,6 @@ type versionInfo struct {
// -1: older than expected version // -1: older than expected version
// 0 : same version // 0 : same version
func (dm *DockerManager) checkDockerAPIVersion(expectedVersion string) (int, error) { func (dm *DockerManager) checkDockerAPIVersion(expectedVersion string) (int, error) {
value, err := dm.versionCache.Get(dm.machineInfo.MachineID) value, err := dm.versionCache.Get(dm.machineInfo.MachineID)
if err != nil { if err != nil {
return 0, err return 0, err

View File

@ -1251,6 +1251,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
if kl.kubeClient == nil { if kl.kubeClient == nil {
glog.Warning("No api server defined - no node status update will be sent.") glog.Warning("No api server defined - no node status update will be sent.")
} }
if err := kl.initializeModules(); err != nil { if err := kl.initializeModules(); err != nil {
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error()) kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error())
glog.Error(err) glog.Error(err)