Merge pull request #56661 from xiangpengzhao/move-kubelet-constants

Automatic merge from submit-queue (batch tested with PRs 56410, 56707, 56661, 54998, 56722). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Move some kubelet constants to a common place

**What this PR does / why we need it**:
More context, see: https://github.com/kubernetes/kubernetes/issues/56516
**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #56516
[thanks @ixdy for verifying this!]

**Special notes for your reviewer**:
@ixdy how can I verify #56516 against this locally?

/cc @ixdy @mtaufen 

**Release note**:

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2017-12-16 05:46:35 -08:00 committed by GitHub
commit 8415e0c608
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 22 additions and 23 deletions

View File

@ -12,7 +12,7 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation", importpath = "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig/validation",
deps = [ deps = [
"//pkg/kubelet/apis/kubeletconfig:go_default_library", "//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/cm:go_default_library", "//pkg/kubelet/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library", "//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
], ],

View File

@ -22,7 +22,7 @@ import (
utilerrors "k8s.io/apimachinery/pkg/util/errors" utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilvalidation "k8s.io/apimachinery/pkg/util/validation" utilvalidation "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
containermanager "k8s.io/kubernetes/pkg/kubelet/cm" kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
) )
// ValidateKubeletConfiguration validates `kc` and returns an error if it is invalid // ValidateKubeletConfiguration validates `kc` and returns an error if it is invalid
@ -91,13 +91,13 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration) error
} }
for _, val := range kc.EnforceNodeAllocatable { for _, val := range kc.EnforceNodeAllocatable {
switch val { switch val {
case containermanager.NodeAllocatableEnforcementKey: case kubetypes.NodeAllocatableEnforcementKey:
case containermanager.SystemReservedEnforcementKey: case kubetypes.SystemReservedEnforcementKey:
case containermanager.KubeReservedEnforcementKey: case kubetypes.KubeReservedEnforcementKey:
continue continue
default: default:
allErrors = append(allErrors, fmt.Errorf("Invalid option %q specified for EnforceNodeAllocatable (--enforce-node-allocatable) setting. Valid options are %q, %q or %q", allErrors = append(allErrors, fmt.Errorf("Invalid option %q specified for EnforceNodeAllocatable (--enforce-node-allocatable) setting. Valid options are %q, %q or %q",
val, containermanager.NodeAllocatableEnforcementKey, containermanager.SystemReservedEnforcementKey, containermanager.KubeReservedEnforcementKey)) val, kubetypes.NodeAllocatableEnforcementKey, kubetypes.SystemReservedEnforcementKey, kubetypes.KubeReservedEnforcementKey))
} }
} }
return utilerrors.NewAggregate(allErrors) return utilerrors.NewAggregate(allErrors)

View File

@ -57,6 +57,7 @@ go_library(
"//pkg/kubelet/events:go_default_library", "//pkg/kubelet/events:go_default_library",
"//pkg/kubelet/metrics:go_default_library", "//pkg/kubelet/metrics:go_default_library",
"//pkg/kubelet/qos:go_default_library", "//pkg/kubelet/qos:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/util/file:go_default_library", "//pkg/util/file:go_default_library",
"//pkg/util/oom:go_default_library", "//pkg/util/oom:go_default_library",
"//pkg/util/procfs:go_default_library", "//pkg/util/procfs:go_default_library",

View File

@ -122,13 +122,6 @@ type Status struct {
SoftRequirements error SoftRequirements error
} }
const (
// Uer visible keys for managing node allocatable enforcement on the node.
NodeAllocatableEnforcementKey = "pods"
SystemReservedEnforcementKey = "system-reserved"
KubeReservedEnforcementKey = "kube-reserved"
)
// containerManager for the kubelet is currently an injected dependency. // containerManager for the kubelet is currently an injected dependency.
// We need to parse the --qos-reserve-requests option in // We need to parse the --qos-reserve-requests option in
// cmd/kubelet/app/server.go and there isn't really a good place to put // cmd/kubelet/app/server.go and there isn't really a good place to put

View File

@ -32,6 +32,7 @@ import (
kubefeatures "k8s.io/kubernetes/pkg/features" kubefeatures "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
) )
const ( const (
@ -62,7 +63,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
// default cpu shares on cgroups are low and can cause cpu starvation. // default cpu shares on cgroups are low and can cause cpu starvation.
nodeAllocatable := cm.capacity nodeAllocatable := cm.capacity
// Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable. // Use Node Allocatable limits instead of capacity if the user requested enforcing node allocatable.
if cm.CgroupsPerQOS && nc.EnforceNodeAllocatable.Has(NodeAllocatableEnforcementKey) { if cm.CgroupsPerQOS && nc.EnforceNodeAllocatable.Has(kubetypes.NodeAllocatableEnforcementKey) {
nodeAllocatable = cm.getNodeAllocatableAbsolute() nodeAllocatable = cm.getNodeAllocatableAbsolute()
} }
@ -101,7 +102,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
}() }()
} }
// Now apply kube reserved and system reserved limits if required. // Now apply kube reserved and system reserved limits if required.
if nc.EnforceNodeAllocatable.Has(SystemReservedEnforcementKey) { if nc.EnforceNodeAllocatable.Has(kubetypes.SystemReservedEnforcementKey) {
glog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved) glog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved)
if err := enforceExistingCgroup(cm.cgroupManager, nc.SystemReservedCgroupName, nc.SystemReserved); err != nil { if err := enforceExistingCgroup(cm.cgroupManager, nc.SystemReservedCgroupName, nc.SystemReserved); err != nil {
message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err) message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err)
@ -110,7 +111,7 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
} }
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName) cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
} }
if nc.EnforceNodeAllocatable.Has(KubeReservedEnforcementKey) { if nc.EnforceNodeAllocatable.Has(kubetypes.KubeReservedEnforcementKey) {
glog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved) glog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved)
if err := enforceExistingCgroup(cm.cgroupManager, nc.KubeReservedCgroupName, nc.KubeReserved); err != nil { if err := enforceExistingCgroup(cm.cgroupManager, nc.KubeReservedCgroupName, nc.KubeReserved); err != nil {
message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err) message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err)

View File

@ -18,7 +18,6 @@ go_test(
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/kubelet/apis/stats/v1alpha1:go_default_library", "//pkg/kubelet/apis/stats/v1alpha1:go_default_library",
"//pkg/kubelet/cm:go_default_library",
"//pkg/kubelet/eviction/api:go_default_library", "//pkg/kubelet/eviction/api:go_default_library",
"//pkg/kubelet/lifecycle:go_default_library", "//pkg/kubelet/lifecycle:go_default_library",
"//pkg/kubelet/types:go_default_library", "//pkg/kubelet/types:go_default_library",

View File

@ -29,9 +29,9 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cm"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
"k8s.io/kubernetes/pkg/kubelet/server/stats" "k8s.io/kubernetes/pkg/kubelet/server/stats"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
schedulerutils "k8s.io/kubernetes/plugin/pkg/scheduler/util" schedulerutils "k8s.io/kubernetes/plugin/pkg/scheduler/util"
) )
@ -198,7 +198,7 @@ func parseThresholdStatement(signal evictionapi.Signal, val string) (evictionapi
// getAllocatableThreshold returns the thresholds applicable for the allocatable configuration // getAllocatableThreshold returns the thresholds applicable for the allocatable configuration
func getAllocatableThreshold(allocatableConfig []string) []evictionapi.Threshold { func getAllocatableThreshold(allocatableConfig []string) []evictionapi.Threshold {
for _, key := range allocatableConfig { for _, key := range allocatableConfig {
if key == cm.NodeAllocatableEnforcementKey { if key == kubetypes.NodeAllocatableEnforcementKey {
return []evictionapi.Threshold{ return []evictionapi.Threshold{
{ {
Signal: evictionapi.SignalAllocatableMemoryAvailable, Signal: evictionapi.SignalAllocatableMemoryAvailable,

View File

@ -30,8 +30,8 @@ import (
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" statsapi "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cm"
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api" evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota"
) )
@ -61,7 +61,7 @@ func TestParseThresholdConfig(t *testing.T) {
expectThresholds: []evictionapi.Threshold{}, expectThresholds: []evictionapi.Threshold{},
}, },
"all memory eviction values": { "all memory eviction values": {
allocatableConfig: []string{cm.NodeAllocatableEnforcementKey}, allocatableConfig: []string{kubetypes.NodeAllocatableEnforcementKey},
evictionHard: map[string]string{"memory.available": "150Mi"}, evictionHard: map[string]string{"memory.available": "150Mi"},
evictionSoft: map[string]string{"memory.available": "300Mi"}, evictionSoft: map[string]string{"memory.available": "300Mi"},
evictionSoftGracePeriod: map[string]string{"memory.available": "30s"}, evictionSoftGracePeriod: map[string]string{"memory.available": "30s"},

View File

@ -24,4 +24,9 @@ const (
DockerContainerRuntime = "docker" DockerContainerRuntime = "docker"
RktContainerRuntime = "rkt" RktContainerRuntime = "rkt"
RemoteContainerRuntime = "remote" RemoteContainerRuntime = "remote"
// User visible keys for managing node allocatable enforcement on the node.
NodeAllocatableEnforcementKey = "pods"
SystemReservedEnforcementKey = "system-reserved"
KubeReservedEnforcementKey = "kube-reserved"
) )

View File

@ -28,8 +28,8 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig" "k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1" stats "k8s.io/kubernetes/pkg/kubelet/apis/stats/v1alpha1"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -104,7 +104,7 @@ var _ = framework.KubeDescribe("MemoryAllocatableEviction [Slow] [Serial] [Disru
initialConfig.KubeReserved = map[string]string{ initialConfig.KubeReserved = map[string]string{
string(v1.ResourceMemory): kubeReserved.String(), string(v1.ResourceMemory): kubeReserved.String(),
} }
initialConfig.EnforceNodeAllocatable = []string{cm.NodeAllocatableEnforcementKey} initialConfig.EnforceNodeAllocatable = []string{kubetypes.NodeAllocatableEnforcementKey}
initialConfig.CgroupsPerQOS = true initialConfig.CgroupsPerQOS = true
}) })
runEvictionTest(f, pressureTimeout, expectedNodeCondition, logMemoryMetrics, []podEvictSpec{ runEvictionTest(f, pressureTimeout, expectedNodeCondition, logMemoryMetrics, []podEvictSpec{