Merge pull request #64860 from wgliang/master.kubelet-check-limit

Automatic merge from submit-queue (batch tested with PRs 65290, 65326, 65289, 65334, 64860). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

checkLimitsForResolvConf for the  pod create and update events instead of checking period

**What this PR does / why we need it**:

- Check for the same at pod create and update events instead of checking continuously for every 30 seconds.
- Increase the logging level to 4 or higher since the event is not catastrophic to cluster health .


**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #64849

**Special notes for your reviewer**:
@ravisantoshgudimetla 

**Release note**:

```release-note
checkLimitsForResolvConf for the  pod create and update events instead of checking period
```
This commit is contained in:
Kubernetes Submit Queue 2018-06-22 04:43:16 -07:00 committed by GitHub
commit 1ca851baec
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 65 additions and 9 deletions

View File

@ -182,6 +182,7 @@ go_test(
"//pkg/kubelet/images:go_default_library",
"//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/logs:go_default_library",
"//pkg/kubelet/network/dns:go_default_library",
"//pkg/kubelet/pleg:go_default_library",
"//pkg/kubelet/pod:go_default_library",
"//pkg/kubelet/pod/testing:go_default_library",

View File

@ -1401,11 +1401,6 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
// handled by pod workers).
go wait.Until(kl.podKiller, 1*time.Second, wait.NeverStop)
// Start gorouting responsible for checking limits in resolv.conf
if kl.dnsConfigurer.ResolverConfig != "" {
go wait.Until(func() { kl.dnsConfigurer.CheckLimitsForResolvConf() }, 30*time.Second, wait.NeverStop)
}
// Start component sync loops.
kl.statusManager.Start()
kl.probeManager.Start()
@ -1995,6 +1990,10 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) {
start := kl.clock.Now()
sort.Sort(sliceutils.PodsByCreationTime(pods))
for _, pod := range pods {
// Responsible for checking limits in resolv.conf
if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" {
kl.dnsConfigurer.CheckLimitsForResolvConf()
}
existingPods := kl.podManager.GetPods()
// Always add the pod to the pod manager. Kubelet relies on the pod
// manager as the source of truth for the desired state. If a pod does
@ -2032,6 +2031,10 @@ func (kl *Kubelet) HandlePodAdditions(pods []*v1.Pod) {
func (kl *Kubelet) HandlePodUpdates(pods []*v1.Pod) {
start := kl.clock.Now()
for _, pod := range pods {
// Responsible for checking limits in resolv.conf
if kl.dnsConfigurer != nil && kl.dnsConfigurer.ResolverConfig != "" {
kl.dnsConfigurer.CheckLimitsForResolvConf()
}
kl.podManager.UpdatePod(pod)
if kubepod.IsMirrorPod(pod) {
kl.handleMirrorPod(pod, start)

View File

@ -50,6 +50,7 @@ import (
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
"k8s.io/kubernetes/pkg/kubelet/logs"
"k8s.io/kubernetes/pkg/kubelet/network/dns"
"k8s.io/kubernetes/pkg/kubelet/pleg"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing"
@ -472,6 +473,16 @@ func TestHandlePortConflicts(t *testing.T) {
},
}}
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: string("testNode"),
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
spec := v1.PodSpec{NodeName: string(kl.nodeName), Containers: []v1.Container{{Ports: []v1.ContainerPort{{HostPort: 80}}}}}
pods := []*v1.Pod{
podWithUIDNameNsSpec("123456789", "newpod", "foo", spec),
@ -509,6 +520,16 @@ func TestHandleHostNameConflicts(t *testing.T) {
},
}}
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: string("testNode"),
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
// default NodeName in test is 127.0.0.1
pods := []*v1.Pod{
podWithUIDNameNsSpec("123456789", "notfittingpod", "foo", v1.PodSpec{NodeName: "127.0.0.2"}),
@ -542,6 +563,17 @@ func TestHandleNodeSelector(t *testing.T) {
},
}
kl.nodeInfo = testNodeInfo{nodes: nodes}
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: string("testNode"),
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
pods := []*v1.Pod{
podWithUIDNameNsSpec("123456789", "podA", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "A"}}),
podWithUIDNameNsSpec("987654321", "podB", "foo", v1.PodSpec{NodeSelector: map[string]string{"key": "B"}}),
@ -573,6 +605,16 @@ func TestHandleMemExceeded(t *testing.T) {
}
kl.nodeInfo = testNodeInfo{nodes: nodes}
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: string("testNode"),
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
spec := v1.PodSpec{NodeName: string(kl.nodeName),
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
@ -659,6 +701,16 @@ func TestHandlePluginResources(t *testing.T) {
kl.admitHandlers = lifecycle.PodAdmitHandlers{}
kl.admitHandlers.AddPodAdmitHandler(lifecycle.NewPredicateAdmitHandler(kl.getNodeAnyWay, lifecycle.NewAdmissionFailureHandlerStub(), updatePluginResourcesFunc))
recorder := record.NewFakeRecorder(20)
nodeRef := &v1.ObjectReference{
Kind: "Node",
Name: string("testNode"),
UID: types.UID("testNode"),
Namespace: "",
}
testClusterDNSDomain := "TEST"
kl.dnsConfigurer = dns.NewConfigurer(recorder, nodeRef, nil, nil, testClusterDNSDomain, "")
// pod requiring adjustedResource can be successfully allocated because updatePluginResourcesFunc
// adjusts node.allocatableResource for this resource to a sufficient value.
fittingPodSpec := v1.PodSpec{NodeName: string(kl.nodeName),

View File

@ -156,7 +156,7 @@ func (c *Configurer) CheckLimitsForResolvConf() {
f, err := os.Open(c.ResolverConfig)
if err != nil {
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error())
glog.Error("CheckLimitsForResolvConf: " + err.Error())
glog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error())
return
}
defer f.Close()
@ -164,7 +164,7 @@ func (c *Configurer) CheckLimitsForResolvConf() {
_, hostSearch, _, err := parseResolvConf(f)
if err != nil {
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", err.Error())
glog.Error("CheckLimitsForResolvConf: " + err.Error())
glog.V(4).Infof("CheckLimitsForResolvConf: " + err.Error())
return
}
@ -177,14 +177,14 @@ func (c *Configurer) CheckLimitsForResolvConf() {
if len(hostSearch) > domainCountLimit {
log := fmt.Sprintf("Resolv.conf file '%s' contains search line consisting of more than %d domains!", c.ResolverConfig, domainCountLimit)
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log)
glog.Error("CheckLimitsForResolvConf: " + log)
glog.V(4).Infof("CheckLimitsForResolvConf: " + log)
return
}
if len(strings.Join(hostSearch, " ")) > validation.MaxDNSSearchListChars {
log := fmt.Sprintf("Resolv.conf file '%s' contains search line which length is more than allowed %d chars!", c.ResolverConfig, validation.MaxDNSSearchListChars)
c.recorder.Event(c.nodeRef, v1.EventTypeWarning, "CheckLimitsForResolvConf", log)
glog.Error("CheckLimitsForResolvConf: " + log)
glog.V(4).Infof("CheckLimitsForResolvConf: " + log)
return
}