mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #42939 from k82cn/k8s_42701
Automatic merge from submit-queue Used ObjectReference for events. fixes #42701 ```release-note None ```
This commit is contained in:
commit
c20e63bfb9
@ -49,6 +49,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/client-go/pkg/api/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -26,6 +26,8 @@ import (
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
clientv1 "k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||
@ -69,6 +71,15 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
||||
Name: CgroupName(cm.cgroupRoot),
|
||||
ResourceParameters: getCgroupConfig(nodeAllocatable),
|
||||
}
|
||||
|
||||
// Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail.
|
||||
nodeRef := &clientv1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: cm.nodeInfo.Name,
|
||||
UID: types.UID(cm.nodeInfo.Name),
|
||||
Namespace: "",
|
||||
}
|
||||
|
||||
// If Node Allocatable is enforced on a node that has not been drained or is updated on an existing node to a lower value,
|
||||
// existing memory usage across pods might be higher that current Node Allocatable Memory Limits.
|
||||
// Pod Evictions are expected to bring down memory usage to below Node Allocatable limits.
|
||||
@ -79,11 +90,11 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
||||
for {
|
||||
err := cm.cgroupManager.Update(cgroupConfig)
|
||||
if err == nil {
|
||||
cm.recorder.Event(cm.nodeInfo, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated Node Allocatable limit across pods")
|
||||
cm.recorder.Event(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated Node Allocatable limit across pods")
|
||||
return
|
||||
}
|
||||
message := fmt.Sprintf("Failed to update Node Allocatable Limits %q: %v", cm.cgroupRoot, err)
|
||||
cm.recorder.Event(cm.nodeInfo, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||
time.Sleep(time.Minute)
|
||||
}
|
||||
}()
|
||||
@ -93,19 +104,19 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
||||
glog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved)
|
||||
if err := enforceExistingCgroup(cm.cgroupManager, nc.SystemReservedCgroupName, nc.SystemReserved); err != nil {
|
||||
message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err)
|
||||
cm.recorder.Event(cm.nodeInfo, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||
return fmt.Errorf(message)
|
||||
}
|
||||
cm.recorder.Eventf(cm.nodeInfo, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
|
||||
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
|
||||
}
|
||||
if nc.EnforceNodeAllocatable.Has(KubeReservedEnforcementKey) {
|
||||
glog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved)
|
||||
if err := enforceExistingCgroup(cm.cgroupManager, nc.KubeReservedCgroupName, nc.KubeReserved); err != nil {
|
||||
message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err)
|
||||
cm.recorder.Event(cm.nodeInfo, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
||||
return fmt.Errorf(message)
|
||||
}
|
||||
cm.recorder.Eventf(cm.nodeInfo, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName)
|
||||
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -1887,7 +1887,6 @@ type versionInfo struct {
|
||||
// -1: older than expected version
|
||||
// 0 : same version
|
||||
func (dm *DockerManager) checkDockerAPIVersion(expectedVersion string) (int, error) {
|
||||
|
||||
value, err := dm.versionCache.Get(dm.machineInfo.MachineID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
@ -1251,6 +1251,7 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
||||
if kl.kubeClient == nil {
|
||||
glog.Warning("No api server defined - no node status update will be sent.")
|
||||
}
|
||||
|
||||
if err := kl.initializeModules(); err != nil {
|
||||
kl.recorder.Eventf(kl.nodeRef, v1.EventTypeWarning, events.KubeletSetupFailed, err.Error())
|
||||
glog.Error(err)
|
||||
|
Loading…
Reference in New Issue
Block a user