mirror of
https://github.com/k3s-io/kubernetes.git
synced 2026-01-05 23:47:50 +00:00
Merge pull request #64860 from wgliang/master.kubelet-check-limit
Automatic merge from submit-queue (batch tested with PRs 65290, 65326, 65289, 65334, 64860). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. checkLimitsForResolvConf for the pod create and update events instead of checking period **What this PR does / why we need it**: - Check for the same at pod create and update events instead of checking continuously for every 30 seconds. - Increase the logging level to 4 or higher since the event is not catastrophic to cluster health . **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: Fixes #64849 **Special notes for your reviewer**: @ravisantoshgudimetla **Release note**: ```release-note checkLimitsForResolvConf for the pod create and update events instead of checking period ```
This commit is contained in:
@@ -50,6 +50,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||
"k8s.io/kubernetes/pkg/kubelet/logs"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network/dns"
|
||||
"k8s.io/kubernetes/pkg/kubelet/pleg"
|
||||
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
|
||||
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
|
||||
@@ -472,6 +473,16 @@ func TestHandlePortConflicts(t *testing.T) {
|
||||
},
|
||||
}}
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}
|
||||
pods := []*v1.Pod{
|
||||
podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
|
||||
@@ -509,6 +520,16 @@ func TestHandleHostNameConflicts(t *testing.T) {
|
||||
},
|
||||
}}
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
// default NodeName in test is 127.0.0.1
|
||||
pods := []*v1.Pod{
|
||||
podWithUIDNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}),
|
||||
@@ -542,6 +563,17 @@ func TestHandleNodeSelector(t *testing.T) {
|
||||
},
|
||||
}
|
||||
kl.nodeInfo = testNodeInfo{nodes: nodes}
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
pods := []*v1.Pod{
|
||||
podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
|
||||
podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
|
||||
@@ -573,6 +605,16 @@ func TestHandleMemExceeded(t *testing.T) {
|
||||
}
|
||||
kl.nodeInfo = testNodeInfo{nodes: nodes}
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
spec := v1.PodSpec{NodeName: string(kl.nodeName),
|
||||
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
@@ -659,6 +701,16 @@ func TestHandlePluginResources(t *testing.T) {
|
||||
kl.admitHandlers = lifecycle.PodAdmitHandlers{}
|
||||
kl.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kl.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), updatePluginResourcesFunc))
|
||||
|
||||
recorder := record.NewFakeRecorder(20)
|
||||
nodeRef := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: string("testNode"),
|
||||
UID: types.UID("testNode"),
|
||||
Namespace: "",
|
||||
}
|
||||
testClusterDNSDomain := "TEST"
|
||||
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
|
||||
|
||||
// pod requiring adjustedResource can be successfully allocated because updatePluginResourcesFunc
|
||||
// adjusts node.allocatableResource for this resource to a sufficient value.
|
||||
fittingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),
|
||||
|
||||
Reference in New Issue
Block a user