mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-21 10:51:29 +00:00
remove DynamicKubeletConfig logic from kubelet
This commit is contained in:
parent
32f83b2b60
commit
7e7bc6d53b
2
api/openapi-spec/swagger.json
generated
2
api/openapi-spec/swagger.json
generated
@ -7528,7 +7528,7 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"configSource": {
|
"configSource": {
|
||||||
"$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource",
|
"$ref": "#/definitions/io.k8s.api.core.v1.NodeConfigSource",
|
||||||
"description": "Deprecated. If specified, the source of the node's configuration. The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field. This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"
|
"description": "Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26."
|
||||||
},
|
},
|
||||||
"externalID": {
|
"externalID": {
|
||||||
"description": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966",
|
"description": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966",
|
||||||
|
@ -3092,7 +3092,7 @@
|
|||||||
"properties": {
|
"properties": {
|
||||||
"configSource": {
|
"configSource": {
|
||||||
"$ref": "#/components/schemas/io.k8s.api.core.v1.NodeConfigSource",
|
"$ref": "#/components/schemas/io.k8s.api.core.v1.NodeConfigSource",
|
||||||
"description": "Deprecated. If specified, the source of the node's configuration. The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field. This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration"
|
"description": "Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26."
|
||||||
},
|
},
|
||||||
"externalID": {
|
"externalID": {
|
||||||
"description": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966",
|
"description": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966",
|
||||||
|
@ -79,13 +79,6 @@ type KubeletFlags struct {
|
|||||||
// mounts,etc).
|
// mounts,etc).
|
||||||
RootDirectory string
|
RootDirectory string
|
||||||
|
|
||||||
// The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health.
|
|
||||||
// The Kubelet will create this directory if it does not already exist.
|
|
||||||
// The path may be absolute or relative; relative paths are under the Kubelet's current working directory.
|
|
||||||
// Providing this flag enables dynamic kubelet configuration.
|
|
||||||
// To use this flag, the DynamicKubeletConfig feature gate must be enabled.
|
|
||||||
DynamicConfigDir cliflag.StringFlag
|
|
||||||
|
|
||||||
// The Kubelet will load its initial configuration from this file.
|
// The Kubelet will load its initial configuration from this file.
|
||||||
// The path may be absolute or relative; relative paths are under the Kubelet's current working directory.
|
// The path may be absolute or relative; relative paths are under the Kubelet's current working directory.
|
||||||
// Omit this flag to use the combination of built-in default configuration values and flags.
|
// Omit this flag to use the combination of built-in default configuration values and flags.
|
||||||
@ -171,11 +164,6 @@ func NewKubeletFlags() *KubeletFlags {
|
|||||||
|
|
||||||
// ValidateKubeletFlags validates Kubelet's configuration flags and returns an error if they are invalid.
|
// ValidateKubeletFlags validates Kubelet's configuration flags and returns an error if they are invalid.
|
||||||
func ValidateKubeletFlags(f *KubeletFlags) error {
|
func ValidateKubeletFlags(f *KubeletFlags) error {
|
||||||
// ensure that nobody sets DynamicConfigDir if the dynamic config feature gate is turned off
|
|
||||||
if f.DynamicConfigDir.Provided() && !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
|
||||||
return fmt.Errorf("the DynamicKubeletConfig feature gate must be enabled in order to use the --dynamic-config-dir flag")
|
|
||||||
}
|
|
||||||
|
|
||||||
unknownLabels := sets.NewString()
|
unknownLabels := sets.NewString()
|
||||||
for k := range f.NodeLabels {
|
for k := range f.NodeLabels {
|
||||||
if isKubernetesLabel(k) && !kubeletapis.IsKubeletLabel(k) {
|
if isKubernetesLabel(k) && !kubeletapis.IsKubeletLabel(k) {
|
||||||
@ -313,9 +301,6 @@ func (f *KubeletFlags) AddFlags(mainfs *pflag.FlagSet) {
|
|||||||
|
|
||||||
fs.StringVar(&f.RootDirectory, "root-dir", f.RootDirectory, "Directory path for managing kubelet files (volume mounts,etc).")
|
fs.StringVar(&f.RootDirectory, "root-dir", f.RootDirectory, "Directory path for managing kubelet files (volume mounts,etc).")
|
||||||
|
|
||||||
fs.Var(&f.DynamicConfigDir, "dynamic-config-dir", "The Kubelet will use this directory for checkpointing downloaded configurations and tracking configuration health. The Kubelet will create this directory if it does not already exist. The path may be absolute or relative; relative paths start at the Kubelet's current working directory. Providing this flag enables dynamic Kubelet configuration. The DynamicKubeletConfig feature gate must be enabled to pass this flag.")
|
|
||||||
fs.MarkDeprecated("dynamic-config-dir", "Feature DynamicKubeletConfig is deprecated in 1.22 and will not move to GA. It is planned to be removed from Kubernetes in the version 1.24. Please use alternative ways to update kubelet configuration.")
|
|
||||||
|
|
||||||
fs.StringVar(&f.RemoteRuntimeEndpoint, "container-runtime-endpoint", f.RemoteRuntimeEndpoint, "The endpoint of remote runtime service. Unix Domain Sockets are supported on Linux, while npipe and tcp endpoints are supported on Windows. Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'")
|
fs.StringVar(&f.RemoteRuntimeEndpoint, "container-runtime-endpoint", f.RemoteRuntimeEndpoint, "The endpoint of remote runtime service. Unix Domain Sockets are supported on Linux, while npipe and tcp endpoints are supported on Windows. Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'")
|
||||||
fs.StringVar(&f.RemoteImageEndpoint, "image-service-endpoint", f.RemoteImageEndpoint, "The endpoint of remote image service. If not specified, it will be the same with --container-runtime-endpoint by default. Unix Domain Socket are supported on Linux, while npipe and tcp endpoints are supported on Windows. Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'")
|
fs.StringVar(&f.RemoteImageEndpoint, "image-service-endpoint", f.RemoteImageEndpoint, "The endpoint of remote image service. If not specified, it will be the same with --container-runtime-endpoint by default. Unix Domain Socket are supported on Linux, while npipe and tcp endpoints are supported on Windows. Examples:'unix:///path/to/runtime.sock', 'npipe:////./pipe/runtime'")
|
||||||
|
|
||||||
|
@ -35,10 +35,6 @@ func newKubeletServerOrDie() *KubeletServer {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func cleanFlags(s *KubeletServer) {
|
|
||||||
s.DynamicConfigDir = cliflag.NewStringFlag(s.DynamicConfigDir.Value())
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestRoundTrip ensures that flag values from the Kubelet can be serialized
|
// TestRoundTrip ensures that flag values from the Kubelet can be serialized
|
||||||
// to arguments and then read back and have the same value. Also catches cases
|
// to arguments and then read back and have the same value. Also catches cases
|
||||||
// where the default value reported by the flag is not actually allowed to be
|
// where the default value reported by the flag is not actually allowed to be
|
||||||
@ -103,7 +99,6 @@ func TestRoundTrip(t *testing.T) {
|
|||||||
}
|
}
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
cleanFlags(outputFlags)
|
|
||||||
if !reflect.DeepEqual(modifiedFlags, outputFlags) {
|
if !reflect.DeepEqual(modifiedFlags, outputFlags) {
|
||||||
t.Errorf("%s: flags did not round trip: %s", testCase.name, diff.ObjectReflectDiff(modifiedFlags, outputFlags))
|
t.Errorf("%s: flags did not round trip: %s", testCase.name, diff.ObjectReflectDiff(modifiedFlags, outputFlags))
|
||||||
continue
|
continue
|
||||||
|
@ -88,7 +88,6 @@ import (
|
|||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||||
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
||||||
dynamickubeletconfig "k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles"
|
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles"
|
||||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/server"
|
"k8s.io/kubernetes/pkg/kubelet/server"
|
||||||
@ -217,30 +216,11 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
|
|||||||
klog.InfoS("unsupported configuration:KubeletCgroups is not within KubeReservedCgroup")
|
klog.InfoS("unsupported configuration:KubeletCgroups is not within KubeReservedCgroup")
|
||||||
}
|
}
|
||||||
|
|
||||||
// use dynamic kubelet config, if enabled
|
// The features.DynamicKubeletConfig is locked to false,
|
||||||
var kubeletConfigController *dynamickubeletconfig.Controller
|
// feature gate is not locked using the LockedToDefault flag
|
||||||
if dynamicConfigDir := kubeletFlags.DynamicConfigDir.Value(); len(dynamicConfigDir) > 0 {
|
// to make sure node authorizer can keep working with the older nodes
|
||||||
var dynamicKubeletConfig *kubeletconfiginternal.KubeletConfiguration
|
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
||||||
dynamicKubeletConfig, kubeletConfigController, err = BootstrapKubeletConfigController(dynamicConfigDir,
|
return fmt.Errorf("cannot set feature gate %v to %v, feature is locked to %v", features.DynamicKubeletConfig, true, false)
|
||||||
func(kc *kubeletconfiginternal.KubeletConfiguration) error {
|
|
||||||
// Here, we enforce flag precedence inside the controller, prior to the controller's validation sequence,
|
|
||||||
// so that we get a complete validation at the same point where we can decide to reject dynamic config.
|
|
||||||
// This fixes the flag-precedence component of issue #63305.
|
|
||||||
// See issue #56171 for general details on flag precedence.
|
|
||||||
return kubeletConfigFlagPrecedence(kc, args)
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to bootstrap a configuration controller, error: %w, dynamicConfigDir: %s", err, dynamicConfigDir)
|
|
||||||
}
|
|
||||||
// If we should just use our existing, local config, the controller will return a nil config
|
|
||||||
if dynamicKubeletConfig != nil {
|
|
||||||
kubeletConfig = dynamicKubeletConfig
|
|
||||||
// Note: flag precedence was already enforced in the controller, prior to validation,
|
|
||||||
// by our above transform function. Now we simply update feature gates from the new config.
|
|
||||||
if err := utilfeature.DefaultMutableFeatureGate.SetFromMap(kubeletConfig.FeatureGates); err != nil {
|
|
||||||
return fmt.Errorf("failed to set feature gates from initial flags-based config: %w", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config and flags parsed, now we can initialize logging.
|
// Config and flags parsed, now we can initialize logging.
|
||||||
@ -264,9 +244,6 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
|
|||||||
return fmt.Errorf("failed to construct kubelet dependencies: %w", err)
|
return fmt.Errorf("failed to construct kubelet dependencies: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// add the kubelet config controller to kubeletDeps
|
|
||||||
kubeletDeps.KubeletConfigController = kubeletConfigController
|
|
||||||
|
|
||||||
if err := checkPermissions(); err != nil {
|
if err := checkPermissions(); err != nil {
|
||||||
klog.ErrorS(err, "kubelet running with insufficient permissions")
|
klog.ErrorS(err, "kubelet running with insufficient permissions")
|
||||||
}
|
}
|
||||||
@ -783,14 +760,6 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the kubelet config controller is available, and dynamic config is enabled, start the config and status sync loops
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && len(s.DynamicConfigDir.Value()) > 0 &&
|
|
||||||
kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce {
|
|
||||||
if err := kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if s.HealthzPort > 0 {
|
if s.HealthzPort > 0 {
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
healthz.InstallHandler(mux)
|
healthz.InstallHandler(mux)
|
||||||
@ -1319,26 +1288,3 @@ func parseResourceList(m map[string]string) (v1.ResourceList, error) {
|
|||||||
}
|
}
|
||||||
return rl, nil
|
return rl, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// BootstrapKubeletConfigController constructs and bootstrap a configuration controller
|
|
||||||
func BootstrapKubeletConfigController(dynamicConfigDir string, transform dynamickubeletconfig.TransformFunc) (*kubeletconfiginternal.KubeletConfiguration, *dynamickubeletconfig.Controller, error) {
|
|
||||||
if !utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
|
||||||
return nil, nil, fmt.Errorf("failed to bootstrap Kubelet config controller, you must enable the DynamicKubeletConfig feature gate")
|
|
||||||
}
|
|
||||||
if len(dynamicConfigDir) == 0 {
|
|
||||||
return nil, nil, fmt.Errorf("cannot bootstrap Kubelet config controller, --dynamic-config-dir was not provided")
|
|
||||||
}
|
|
||||||
|
|
||||||
// compute absolute path and bootstrap controller
|
|
||||||
dir, err := filepath.Abs(dynamicConfigDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to get absolute path for --dynamic-config-dir=%s", dynamicConfigDir)
|
|
||||||
}
|
|
||||||
// get the latest KubeletConfiguration checkpoint from disk, or return the default config if no valid checkpoints exist
|
|
||||||
c := dynamickubeletconfig.NewController(dir, transform)
|
|
||||||
kc, err := c.Bootstrap()
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("failed to determine a valid configuration, error: %w", err)
|
|
||||||
}
|
|
||||||
return kc, c, nil
|
|
||||||
}
|
|
||||||
|
@ -64,7 +64,6 @@ allowed_prometheus_importers=(
|
|||||||
./staging/src/k8s.io/component-base/metrics/wrappers.go
|
./staging/src/k8s.io/component-base/metrics/wrappers.go
|
||||||
./test/e2e/apimachinery/flowcontrol.go
|
./test/e2e/apimachinery/flowcontrol.go
|
||||||
./test/e2e/node/pods.go
|
./test/e2e/node/pods.go
|
||||||
./test/e2e_node/dynamic_kubelet_config_test.go
|
|
||||||
./test/e2e_node/resource_metrics_test.go
|
./test/e2e_node/resource_metrics_test.go
|
||||||
./test/instrumentation/main_test.go
|
./test/instrumentation/main_test.go
|
||||||
./test/integration/apiserver/flowcontrol/concurrency_test.go
|
./test/integration/apiserver/flowcontrol/concurrency_test.go
|
||||||
|
@ -4127,8 +4127,7 @@ type NodeSpec struct {
|
|||||||
// +optional
|
// +optional
|
||||||
Taints []Taint
|
Taints []Taint
|
||||||
|
|
||||||
// If specified, the source to get node configuration from
|
// Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26.
|
||||||
// The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field
|
|
||||||
// +optional
|
// +optional
|
||||||
ConfigSource *NodeConfigSource
|
ConfigSource *NodeConfigSource
|
||||||
|
|
||||||
@ -4138,12 +4137,12 @@ type NodeSpec struct {
|
|||||||
DoNotUseExternalID string
|
DoNotUseExternalID string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil.
|
// Deprecated: NodeConfigSource specifies a source of node configuration. Exactly one subfield must be non-nil.
|
||||||
type NodeConfigSource struct {
|
type NodeConfigSource struct {
|
||||||
ConfigMap *ConfigMapNodeConfigSource
|
ConfigMap *ConfigMapNodeConfigSource
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConfigMapNodeConfigSource represents the config map of a node
|
// Deprecated: ConfigMapNodeConfigSource represents the config map of a node
|
||||||
type ConfigMapNodeConfigSource struct {
|
type ConfigMapNodeConfigSource struct {
|
||||||
// Namespace is the metadata.namespace of the referenced ConfigMap.
|
// Namespace is the metadata.namespace of the referenced ConfigMap.
|
||||||
// This field is required in all cases.
|
// This field is required in all cases.
|
||||||
|
@ -833,7 +833,7 @@ func init() {
|
|||||||
// available throughout Kubernetes binaries.
|
// available throughout Kubernetes binaries.
|
||||||
var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
|
var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{
|
||||||
AppArmor: {Default: true, PreRelease: featuregate.Beta},
|
AppArmor: {Default: true, PreRelease: featuregate.Beta},
|
||||||
DynamicKubeletConfig: {Default: false, PreRelease: featuregate.Deprecated}, // feature gate is deprecated in 1.22, remove no early than 1.23
|
DynamicKubeletConfig: {Default: false, PreRelease: featuregate.Deprecated}, // feature gate is deprecated in 1.22, kubelet logic is removed in 1.24, api server logic can be removed in 1.26
|
||||||
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Beta},
|
ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Beta},
|
||||||
DevicePlugins: {Default: true, PreRelease: featuregate.Beta},
|
DevicePlugins: {Default: true, PreRelease: featuregate.Beta},
|
||||||
RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta},
|
RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta},
|
||||||
|
@ -448,9 +448,6 @@ func TestSetDefaultsKubeletConfiguration(t *testing.T) {
|
|||||||
MakeIPTablesUtilChains: utilpointer.Bool(true),
|
MakeIPTablesUtilChains: utilpointer.Bool(true),
|
||||||
IPTablesMasqueradeBit: utilpointer.Int32(1),
|
IPTablesMasqueradeBit: utilpointer.Int32(1),
|
||||||
IPTablesDropBit: utilpointer.Int32(1),
|
IPTablesDropBit: utilpointer.Int32(1),
|
||||||
FeatureGates: map[string]bool{
|
|
||||||
"DynamicKubeletConfig": true,
|
|
||||||
},
|
|
||||||
FailSwapOn: utilpointer.Bool(true),
|
FailSwapOn: utilpointer.Bool(true),
|
||||||
MemorySwap: v1beta1.MemorySwapConfiguration{SwapBehavior: "UnlimitedSwap"},
|
MemorySwap: v1beta1.MemorySwapConfiguration{SwapBehavior: "UnlimitedSwap"},
|
||||||
ContainerLogMaxSize: "1Mi",
|
ContainerLogMaxSize: "1Mi",
|
||||||
@ -594,9 +591,6 @@ func TestSetDefaultsKubeletConfiguration(t *testing.T) {
|
|||||||
MakeIPTablesUtilChains: utilpointer.Bool(true),
|
MakeIPTablesUtilChains: utilpointer.Bool(true),
|
||||||
IPTablesMasqueradeBit: utilpointer.Int32(1),
|
IPTablesMasqueradeBit: utilpointer.Int32(1),
|
||||||
IPTablesDropBit: utilpointer.Int32(1),
|
IPTablesDropBit: utilpointer.Int32(1),
|
||||||
FeatureGates: map[string]bool{
|
|
||||||
"DynamicKubeletConfig": true,
|
|
||||||
},
|
|
||||||
FailSwapOn: utilpointer.Bool(true),
|
FailSwapOn: utilpointer.Bool(true),
|
||||||
MemorySwap: v1beta1.MemorySwapConfiguration{SwapBehavior: "UnlimitedSwap"},
|
MemorySwap: v1beta1.MemorySwapConfiguration{SwapBehavior: "UnlimitedSwap"},
|
||||||
ContainerLogMaxSize: "1Mi",
|
ContainerLogMaxSize: "1Mi",
|
||||||
|
@ -76,7 +76,6 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/kubelet/events"
|
"k8s.io/kubernetes/pkg/kubelet/events"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
"k8s.io/kubernetes/pkg/kubelet/eviction"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/images"
|
"k8s.io/kubernetes/pkg/kubelet/images"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
|
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/logs"
|
"k8s.io/kubernetes/pkg/kubelet/logs"
|
||||||
@ -238,7 +237,6 @@ type Dependencies struct {
|
|||||||
VolumePlugins []volume.VolumePlugin
|
VolumePlugins []volume.VolumePlugin
|
||||||
DynamicPluginProber volume.DynamicPluginProber
|
DynamicPluginProber volume.DynamicPluginProber
|
||||||
TLSOptions *server.TLSOptions
|
TLSOptions *server.TLSOptions
|
||||||
KubeletConfigController *kubeletconfig.Controller
|
|
||||||
RemoteRuntimeService internalapi.RuntimeService
|
RemoteRuntimeService internalapi.RuntimeService
|
||||||
RemoteImageService internalapi.ImageManagerService
|
RemoteImageService internalapi.ImageManagerService
|
||||||
// remove it after cadvisor.UsingLegacyCadvisorStats dropped.
|
// remove it after cadvisor.UsingLegacyCadvisorStats dropped.
|
||||||
|
@ -1,265 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package kubeletconfig
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
||||||
"k8s.io/client-go/tools/cache"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
// KubeletConfigChangedEventReason identifies an event as a change of Kubelet configuration
|
|
||||||
KubeletConfigChangedEventReason = "KubeletConfigChanged"
|
|
||||||
// LocalEventMessage is sent when the Kubelet restarts to use local config
|
|
||||||
LocalEventMessage = "Kubelet restarting to use local config"
|
|
||||||
// RemoteEventMessageFmt is sent when the Kubelet restarts to use a remote config
|
|
||||||
RemoteEventMessageFmt = "Kubelet restarting to use %s, UID: %s, ResourceVersion: %s, KubeletConfigKey: %s"
|
|
||||||
)
|
|
||||||
|
|
||||||
// pokeConfiSourceWorker tells the worker thread that syncs config sources that work needs to be done
|
|
||||||
func (cc *Controller) pokeConfigSourceWorker() {
|
|
||||||
select {
|
|
||||||
case cc.pendingConfigSource <- true:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// syncConfigSource checks if work needs to be done to use a new configuration, and does that work if necessary
|
|
||||||
func (cc *Controller) syncConfigSource(client clientset.Interface, eventClient v1core.EventsGetter, nodeName string) {
|
|
||||||
select {
|
|
||||||
case <-cc.pendingConfigSource:
|
|
||||||
default:
|
|
||||||
// no work to be done, return
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the sync fails, we want to retry
|
|
||||||
var syncerr error
|
|
||||||
defer func() {
|
|
||||||
if syncerr != nil {
|
|
||||||
klog.ErrorS(syncerr, "Kubelet config controller")
|
|
||||||
cc.pokeConfigSourceWorker()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// get the latest Node.Spec.ConfigSource from the informer
|
|
||||||
source, err := latestNodeConfigSource(cc.nodeInformer.GetStore(), nodeName)
|
|
||||||
if err != nil {
|
|
||||||
cc.configStatus.SetErrorOverride(fmt.Sprintf(status.SyncErrorFmt, status.InternalError))
|
|
||||||
syncerr = fmt.Errorf("%s, error: %v", status.InternalError, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// a nil source simply means we reset to local defaults
|
|
||||||
if source == nil {
|
|
||||||
klog.InfoS("Kubelet config controller Node.Spec.ConfigSource is empty, will reset assigned and last-known-good to defaults")
|
|
||||||
if updated, reason, err := cc.resetConfig(); err != nil {
|
|
||||||
reason = fmt.Sprintf(status.SyncErrorFmt, reason)
|
|
||||||
cc.configStatus.SetErrorOverride(reason)
|
|
||||||
syncerr = fmt.Errorf("%s, error: %v", reason, err)
|
|
||||||
return
|
|
||||||
} else if updated {
|
|
||||||
restartForNewConfig(eventClient, nodeName, nil)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// a non-nil source means we should attempt to download the config, and checkpoint it if necessary
|
|
||||||
klog.InfoS("Kubelet config controller Node.Spec.ConfigSource is non-empty, will checkpoint source and update config if necessary")
|
|
||||||
|
|
||||||
// TODO(mtaufen): It would be nice if we could check the payload's metadata before (re)downloading the whole payload
|
|
||||||
// we at least try pulling the latest configmap out of the local informer store.
|
|
||||||
|
|
||||||
// construct the interface that can dynamically dispatch the correct Download, etc. methods for the given source type
|
|
||||||
remote, reason, err := checkpoint.NewRemoteConfigSource(source)
|
|
||||||
if err != nil {
|
|
||||||
reason = fmt.Sprintf(status.SyncErrorFmt, reason)
|
|
||||||
cc.configStatus.SetErrorOverride(reason)
|
|
||||||
syncerr = fmt.Errorf("%s, error: %v", reason, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// "download" source, either from informer's in-memory store or directly from the API server, if the informer doesn't have a copy
|
|
||||||
payload, reason, err := cc.downloadConfigPayload(client, remote)
|
|
||||||
if err != nil {
|
|
||||||
reason = fmt.Sprintf(status.SyncErrorFmt, reason)
|
|
||||||
cc.configStatus.SetErrorOverride(reason)
|
|
||||||
syncerr = fmt.Errorf("%s, error: %v", reason, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// save a checkpoint for the payload, if one does not already exist
|
|
||||||
if reason, err := cc.saveConfigCheckpoint(remote, payload); err != nil {
|
|
||||||
reason = fmt.Sprintf(status.SyncErrorFmt, reason)
|
|
||||||
cc.configStatus.SetErrorOverride(reason)
|
|
||||||
syncerr = fmt.Errorf("%s, error: %v", reason, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// update the local, persistent record of assigned config
|
|
||||||
if updated, reason, err := cc.setAssignedConfig(remote); err != nil {
|
|
||||||
reason = fmt.Sprintf(status.SyncErrorFmt, reason)
|
|
||||||
cc.configStatus.SetErrorOverride(reason)
|
|
||||||
syncerr = fmt.Errorf("%s, error: %v", reason, err)
|
|
||||||
return
|
|
||||||
} else if updated {
|
|
||||||
restartForNewConfig(eventClient, nodeName, remote)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we get here:
|
|
||||||
// - there is no need to restart to use new config
|
|
||||||
// - there was no error trying to sync configuration
|
|
||||||
// - if, previously, there was an error trying to sync configuration, we need to clear that error from the status
|
|
||||||
cc.configStatus.SetErrorOverride("")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: source has up-to-date uid and resourceVersion after calling downloadConfigPayload.
|
|
||||||
func (cc *Controller) downloadConfigPayload(client clientset.Interface, source checkpoint.RemoteConfigSource) (checkpoint.Payload, string, error) {
|
|
||||||
var store cache.Store
|
|
||||||
if cc.remoteConfigSourceInformer != nil {
|
|
||||||
store = cc.remoteConfigSourceInformer.GetStore()
|
|
||||||
}
|
|
||||||
return source.Download(client, store)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cc *Controller) saveConfigCheckpoint(source checkpoint.RemoteConfigSource, payload checkpoint.Payload) (string, error) {
|
|
||||||
ok, err := cc.checkpointStore.Exists(source)
|
|
||||||
if err != nil {
|
|
||||||
return status.InternalError, fmt.Errorf("%s, error: %v", status.InternalError, err)
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
klog.InfoS("Kubelet config controller checkpoint already exists for source", "apiPath", source.APIPath(), "checkpointUID", payload.UID(), "resourceVersion", payload.ResourceVersion())
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
if err := cc.checkpointStore.Save(payload); err != nil {
|
|
||||||
return status.InternalError, fmt.Errorf("%s, error: %v", status.InternalError, err)
|
|
||||||
}
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// setAssignedConfig updates the assigned checkpoint config in the store.
|
|
||||||
// Returns whether the assigned config changed as a result, or a sanitized failure reason and an error.
|
|
||||||
func (cc *Controller) setAssignedConfig(source checkpoint.RemoteConfigSource) (bool, string, error) {
|
|
||||||
assigned, err := cc.checkpointStore.Assigned()
|
|
||||||
if err != nil {
|
|
||||||
return false, status.InternalError, err
|
|
||||||
}
|
|
||||||
if err := cc.checkpointStore.SetAssigned(source); err != nil {
|
|
||||||
return false, status.InternalError, err
|
|
||||||
}
|
|
||||||
return !checkpoint.EqualRemoteConfigSources(assigned, source), "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// resetConfig resets the assigned and last-known-good checkpoints in the checkpoint store to their default values and
|
|
||||||
// returns whether the assigned checkpoint changed as a result, or a sanitized failure reason and an error.
|
|
||||||
func (cc *Controller) resetConfig() (bool, string, error) {
|
|
||||||
updated, err := cc.checkpointStore.Reset()
|
|
||||||
if err != nil {
|
|
||||||
return false, status.InternalError, err
|
|
||||||
}
|
|
||||||
return updated, "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// restartForNewConfig presumes the Kubelet is managed by a babysitter, e.g. systemd
|
|
||||||
// It will send an event before exiting.
|
|
||||||
func restartForNewConfig(eventClient v1core.EventsGetter, nodeName string, source checkpoint.RemoteConfigSource) {
|
|
||||||
message := LocalEventMessage
|
|
||||||
if source != nil {
|
|
||||||
message = fmt.Sprintf(RemoteEventMessageFmt, source.APIPath(), source.UID(), source.ResourceVersion(), source.KubeletFilename())
|
|
||||||
}
|
|
||||||
// we directly log and send the event, instead of using the event recorder,
|
|
||||||
// because the event recorder won't flush its queue before we exit (we'd lose the event)
|
|
||||||
event := makeEvent(nodeName, apiv1.EventTypeNormal, KubeletConfigChangedEventReason, message)
|
|
||||||
klog.V(3).InfoS("Event created", "event", klog.KObj(event), "involvedObject", event.InvolvedObject, "eventType", event.Type, "reason", event.Reason, "message", event.Message)
|
|
||||||
if _, err := eventClient.Events(apiv1.NamespaceDefault).Create(context.TODO(), event, metav1.CreateOptions{}); err != nil {
|
|
||||||
klog.ErrorS(err, "Kubelet config controller failed to send event")
|
|
||||||
}
|
|
||||||
klog.InfoS("Kubelet config controller event", "message", message)
|
|
||||||
os.Exit(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// latestNodeConfigSource returns a copy of the most recent NodeConfigSource from the Node with `nodeName` in `store`
|
|
||||||
func latestNodeConfigSource(store cache.Store, nodeName string) (*apiv1.NodeConfigSource, error) {
|
|
||||||
obj, ok, err := store.GetByKey(nodeName)
|
|
||||||
if err != nil {
|
|
||||||
err := fmt.Errorf("failed to retrieve Node %q from informer's store, error: %v", nodeName, err)
|
|
||||||
klog.ErrorS(err, "Kubelet config controller")
|
|
||||||
return nil, err
|
|
||||||
} else if !ok {
|
|
||||||
err := fmt.Errorf("node %q does not exist in the informer's store, can't sync config source", nodeName)
|
|
||||||
klog.ErrorS(err, "Kubelet config controller")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
node, ok := obj.(*apiv1.Node)
|
|
||||||
if !ok {
|
|
||||||
err := fmt.Errorf("failed to cast object from informer's store to Node, can't sync config source for Node %q", nodeName)
|
|
||||||
klog.ErrorS(err, "Kubelet config controller")
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Copy the source, so anyone who modifies it after here doesn't mess up the informer's store!
|
|
||||||
// This was previously the cause of a bug that made the Kubelet frequently resync config; Download updated
|
|
||||||
// the UID and ResourceVersion on the NodeConfigSource, but the pointer was still drilling all the way
|
|
||||||
// into the informer's copy!
|
|
||||||
return node.Spec.ConfigSource.DeepCopy(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// makeEvent constructs an event
|
|
||||||
// similar to makeEvent in k8s.io/client-go/tools/record/event.go
|
|
||||||
func makeEvent(nodeName, eventtype, reason, message string) *apiv1.Event {
|
|
||||||
const componentKubelet = "kubelet"
|
|
||||||
// NOTE(mtaufen): This is consistent with pkg/kubelet/kubelet.go. Even though setting the node
|
|
||||||
// name as the UID looks strange, it appears to be conventional for events sent by the Kubelet.
|
|
||||||
ref := apiv1.ObjectReference{
|
|
||||||
Kind: "Node",
|
|
||||||
Name: nodeName,
|
|
||||||
UID: types.UID(nodeName),
|
|
||||||
Namespace: "",
|
|
||||||
}
|
|
||||||
|
|
||||||
t := metav1.Time{Time: time.Now()}
|
|
||||||
namespace := ref.Namespace
|
|
||||||
if namespace == "" {
|
|
||||||
namespace = metav1.NamespaceDefault
|
|
||||||
}
|
|
||||||
return &apiv1.Event{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
|
|
||||||
Namespace: namespace,
|
|
||||||
},
|
|
||||||
InvolvedObject: ref,
|
|
||||||
Reason: reason,
|
|
||||||
Message: message,
|
|
||||||
FirstTimestamp: t,
|
|
||||||
LastTimestamp: t,
|
|
||||||
Count: 1,
|
|
||||||
Type: eventtype,
|
|
||||||
Source: apiv1.EventSource{Component: componentKubelet, Host: string(nodeName)},
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,324 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package kubeletconfig
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
|
||||||
"k8s.io/client-go/tools/cache"
|
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/config/validation"
|
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
|
|
||||||
utilpanic "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/panic"
|
|
||||||
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
storeDir = "store"
|
|
||||||
// TODO(mtaufen): We may expose this in a future API, but for the time being we use an internal default,
|
|
||||||
// because it is not especially clear where this should live in the API.
|
|
||||||
configTrialDuration = 10 * time.Minute
|
|
||||||
)
|
|
||||||
|
|
||||||
// TransformFunc edits the KubeletConfiguration in-place, and returns an
|
|
||||||
// error if any of the transformations failed.
|
|
||||||
type TransformFunc func(kc *kubeletconfig.KubeletConfiguration) error
|
|
||||||
|
|
||||||
// Controller manages syncing dynamic Kubelet configurations
|
|
||||||
// For more information, see the proposal: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/node/dynamic-kubelet-configuration.md
|
|
||||||
type Controller struct {
|
|
||||||
// transform applies an arbitrary transformation to config after loading, and before validation.
|
|
||||||
// This can be used, for example, to include config from flags before the controller's validation step.
|
|
||||||
// If transform returns an error, loadConfig will fail, and an InternalError will be reported.
|
|
||||||
// Be wary if using this function as an extension point, in most cases the controller should
|
|
||||||
// probably just be natively extended to do what you need. Injecting flag precedence transformations
|
|
||||||
// is something of an exception because the caller of this controller (cmd/) is aware of flags, but this
|
|
||||||
// controller's tree (pkg/) is not.
|
|
||||||
transform TransformFunc
|
|
||||||
|
|
||||||
// pendingConfigSource; write to this channel to indicate that the config source needs to be synced from the API server
|
|
||||||
pendingConfigSource chan bool
|
|
||||||
|
|
||||||
// configStatus manages the status we report on the Node object
|
|
||||||
configStatus status.NodeConfigStatus
|
|
||||||
|
|
||||||
// nodeInformer is the informer that watches the Node object
|
|
||||||
nodeInformer cache.SharedInformer
|
|
||||||
|
|
||||||
// remoteConfigSourceInformer is the informer that watches the assigned config source
|
|
||||||
remoteConfigSourceInformer cache.SharedInformer
|
|
||||||
|
|
||||||
// checkpointStore persists config source checkpoints to a storage layer
|
|
||||||
checkpointStore store.Store
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewController constructs a new Controller object and returns it. The dynamicConfigDir
|
|
||||||
// path must be absolute. transform applies an arbitrary transformation to config after loading, and before validation.
|
|
||||||
// This can be used, for example, to include config from flags before the controller's validation step.
|
|
||||||
// If transform returns an error, loadConfig will fail, and an InternalError will be reported.
|
|
||||||
// Be wary if using this function as an extension point, in most cases the controller should
|
|
||||||
// probably just be natively extended to do what you need. Injecting flag precedence transformations
|
|
||||||
// is something of an exception because the caller of this controller (cmd/) is aware of flags, but this
|
|
||||||
// controller's tree (pkg/) is not.
|
|
||||||
func NewController(dynamicConfigDir string, transform TransformFunc) *Controller {
|
|
||||||
return &Controller{
|
|
||||||
transform: transform,
|
|
||||||
// channels must have capacity at least 1, since we signal with non-blocking writes
|
|
||||||
pendingConfigSource: make(chan bool, 1),
|
|
||||||
configStatus: status.NewNodeConfigStatus(),
|
|
||||||
checkpointStore: store.NewFsStore(&utilfs.DefaultFs{}, filepath.Join(dynamicConfigDir, storeDir)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bootstrap attempts to return a valid KubeletConfiguration based on the configuration of the Controller,
|
|
||||||
// or returns an error if no valid configuration could be produced. Bootstrap should be called synchronously before StartSync.
|
|
||||||
// If the pre-existing local configuration should be used, Bootstrap returns a nil config.
|
|
||||||
func (cc *Controller) Bootstrap() (*kubeletconfig.KubeletConfiguration, error) {
|
|
||||||
klog.InfoS("Kubelet config controller starting controller")
|
|
||||||
|
|
||||||
// ensure the filesystem is initialized
|
|
||||||
if err := cc.initializeDynamicConfigDir(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// determine assigned source and set status
|
|
||||||
assignedSource, err := cc.checkpointStore.Assigned()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if assignedSource != nil {
|
|
||||||
cc.configStatus.SetAssigned(assignedSource.NodeConfigSource())
|
|
||||||
}
|
|
||||||
|
|
||||||
// determine last-known-good source and set status
|
|
||||||
lastKnownGoodSource, err := cc.checkpointStore.LastKnownGood()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if lastKnownGoodSource != nil {
|
|
||||||
cc.configStatus.SetLastKnownGood(lastKnownGoodSource.NodeConfigSource())
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the assigned source is nil, return nil to indicate local config
|
|
||||||
if assignedSource == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// attempt to load assigned config
|
|
||||||
assignedConfig, reason, err := cc.loadConfig(assignedSource)
|
|
||||||
if err == nil {
|
|
||||||
// update the active source to the non-nil assigned source
|
|
||||||
cc.configStatus.SetActive(assignedSource.NodeConfigSource())
|
|
||||||
|
|
||||||
// update the last-known-good config if necessary, and start a timer that
|
|
||||||
// periodically checks whether the last-known good needs to be updated
|
|
||||||
// we only do this when the assigned config loads and passes validation
|
|
||||||
// wait.Forever will call the func once before starting the timer
|
|
||||||
go wait.Forever(func() { cc.checkTrial(configTrialDuration) }, 10*time.Second)
|
|
||||||
|
|
||||||
return assignedConfig, nil
|
|
||||||
} // Assert: the assigned config failed to load or validate
|
|
||||||
|
|
||||||
// TODO(mtaufen): consider re-attempting download when a load/verify/parse/validate
|
|
||||||
// error happens outside trial period, we already made it past the trial so it's probably filesystem corruption
|
|
||||||
// or something else scary
|
|
||||||
|
|
||||||
// log the reason and error details for the failure to load the assigned config
|
|
||||||
klog.ErrorS(err, "Kubelet config controller", "reason", reason)
|
|
||||||
|
|
||||||
// set status to indicate the failure with the assigned config
|
|
||||||
cc.configStatus.SetError(reason)
|
|
||||||
|
|
||||||
// if the last-known-good source is nil, return nil to indicate local config
|
|
||||||
if lastKnownGoodSource == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// attempt to load the last-known-good config
|
|
||||||
lastKnownGoodConfig, _, err := cc.loadConfig(lastKnownGoodSource)
|
|
||||||
if err != nil {
|
|
||||||
// we failed to load the last-known-good, so something is really messed up and we just return the error
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// set status to indicate the active source is the non-nil last-known-good source
|
|
||||||
cc.configStatus.SetActive(lastKnownGoodSource.NodeConfigSource())
|
|
||||||
return lastKnownGoodConfig, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartSync tells the controller to start the goroutines that sync status/config to/from the API server.
|
|
||||||
// The clients must be non-nil, and the nodeName must be non-empty.
|
|
||||||
func (cc *Controller) StartSync(client clientset.Interface, eventClient v1core.EventsGetter, nodeName string) error {
|
|
||||||
const errFmt = "cannot start Kubelet config sync: %s"
|
|
||||||
if client == nil {
|
|
||||||
return fmt.Errorf(errFmt, "nil client")
|
|
||||||
}
|
|
||||||
if eventClient == nil {
|
|
||||||
return fmt.Errorf(errFmt, "nil event client")
|
|
||||||
}
|
|
||||||
if nodeName == "" {
|
|
||||||
return fmt.Errorf(errFmt, "empty nodeName")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rather than use utilruntime.HandleCrash, which doesn't actually crash in the Kubelet,
|
|
||||||
// we use HandlePanic to manually call the panic handlers and then crash.
|
|
||||||
// We have a better chance of recovering normal operation if we just restart the Kubelet in the event
|
|
||||||
// of a Go runtime error.
|
|
||||||
// NOTE(mtaufen): utilpanic.HandlePanic returns a function and you have to call it for your thing to run!
|
|
||||||
// This was EVIL to debug (difficult to see missing `()`).
|
|
||||||
// The code now uses `go name()` instead of `go utilpanic.HandlePanic(func(){...})()` to avoid confusion.
|
|
||||||
|
|
||||||
// status sync worker
|
|
||||||
statusSyncLoopFunc := utilpanic.HandlePanic(func() {
|
|
||||||
klog.InfoS("Kubelet config controller starting status sync loop")
|
|
||||||
wait.JitterUntil(func() {
|
|
||||||
cc.configStatus.Sync(client, nodeName)
|
|
||||||
}, 10*time.Second, 0.2, true, wait.NeverStop)
|
|
||||||
})
|
|
||||||
// remote config source informer, if we have a remote source to watch
|
|
||||||
assignedSource, err := cc.checkpointStore.Assigned()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf(errFmt, err)
|
|
||||||
} else if assignedSource == nil {
|
|
||||||
klog.InfoS("Kubelet config controller local source is assigned, will not start remote config source informer")
|
|
||||||
} else {
|
|
||||||
cc.remoteConfigSourceInformer = assignedSource.Informer(client, cache.ResourceEventHandlerFuncs{
|
|
||||||
AddFunc: cc.onAddRemoteConfigSourceEvent,
|
|
||||||
UpdateFunc: cc.onUpdateRemoteConfigSourceEvent,
|
|
||||||
DeleteFunc: cc.onDeleteRemoteConfigSourceEvent,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
remoteConfigSourceInformerFunc := utilpanic.HandlePanic(func() {
|
|
||||||
if cc.remoteConfigSourceInformer != nil {
|
|
||||||
klog.InfoS("Kubelet config controller starting remote config source informer")
|
|
||||||
cc.remoteConfigSourceInformer.Run(wait.NeverStop)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
// node informer
|
|
||||||
cc.nodeInformer = newSharedNodeInformer(client, nodeName,
|
|
||||||
cc.onAddNodeEvent, cc.onUpdateNodeEvent, cc.onDeleteNodeEvent)
|
|
||||||
nodeInformerFunc := utilpanic.HandlePanic(func() {
|
|
||||||
klog.InfoS("Kubelet config controller starting Node informer")
|
|
||||||
cc.nodeInformer.Run(wait.NeverStop)
|
|
||||||
})
|
|
||||||
// config sync worker
|
|
||||||
configSyncLoopFunc := utilpanic.HandlePanic(func() {
|
|
||||||
klog.InfoS("Kubelet config controller starting Kubelet config sync loop")
|
|
||||||
wait.JitterUntil(func() {
|
|
||||||
cc.syncConfigSource(client, eventClient, nodeName)
|
|
||||||
}, 10*time.Second, 0.2, true, wait.NeverStop)
|
|
||||||
})
|
|
||||||
|
|
||||||
go statusSyncLoopFunc()
|
|
||||||
go remoteConfigSourceInformerFunc()
|
|
||||||
go nodeInformerFunc()
|
|
||||||
go configSyncLoopFunc()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// loadConfig loads Kubelet config from a checkpoint
|
|
||||||
// It returns the loaded configuration or a clean failure reason (for status reporting) and an error.
|
|
||||||
func (cc *Controller) loadConfig(source checkpoint.RemoteConfigSource) (*kubeletconfig.KubeletConfiguration, string, error) {
|
|
||||||
// load KubeletConfiguration from checkpoint
|
|
||||||
kc, err := cc.checkpointStore.Load(source)
|
|
||||||
if err != nil {
|
|
||||||
return nil, status.LoadError, err
|
|
||||||
}
|
|
||||||
// apply any required transformations to the KubeletConfiguration
|
|
||||||
if cc.transform != nil {
|
|
||||||
if err := cc.transform(kc); err != nil {
|
|
||||||
return nil, status.InternalError, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// validate the result
|
|
||||||
if err := validation.ValidateKubeletConfiguration(kc); err != nil {
|
|
||||||
return nil, status.ValidateError, err
|
|
||||||
}
|
|
||||||
return kc, "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// initializeDynamicConfigDir makes sure that the storage layers for various controller components are set up correctly
|
|
||||||
func (cc *Controller) initializeDynamicConfigDir() error {
|
|
||||||
klog.InfoS("Kubelet config controller ensuring filesystem is set up correctly")
|
|
||||||
// initializeDynamicConfigDir local checkpoint storage location
|
|
||||||
return cc.checkpointStore.Initialize()
|
|
||||||
}
|
|
||||||
|
|
||||||
// checkTrial checks whether the trial duration has passed, and updates the last-known-good config if necessary
|
|
||||||
func (cc *Controller) checkTrial(duration time.Duration) {
|
|
||||||
// when the trial period is over, the assigned config becomes the last-known-good
|
|
||||||
if trial, err := cc.inTrial(duration); err != nil {
|
|
||||||
klog.ErrorS(err, "Kubelet config controller failed to check trial period for assigned config")
|
|
||||||
} else if !trial {
|
|
||||||
if err := cc.graduateAssignedToLastKnownGood(); err != nil {
|
|
||||||
klog.ErrorS(err, "failed to set last-known-good to assigned config")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// inTrial returns true if the time elapsed since the last modification of the assigned config does not exceed `trialDur`, false otherwise
|
|
||||||
func (cc *Controller) inTrial(trialDur time.Duration) (bool, error) {
|
|
||||||
now := time.Now()
|
|
||||||
t, err := cc.checkpointStore.AssignedModified()
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
if now.Sub(t) <= trialDur {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// graduateAssignedToLastKnownGood sets the last-known-good in the checkpointStore
|
|
||||||
// to the same value as the assigned config maintained by the checkpointStore
|
|
||||||
func (cc *Controller) graduateAssignedToLastKnownGood() error {
|
|
||||||
// get assigned
|
|
||||||
assigned, err := cc.checkpointStore.Assigned()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// get last-known-good
|
|
||||||
lastKnownGood, err := cc.checkpointStore.LastKnownGood()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// if the sources are equal, no need to change
|
|
||||||
if assigned == lastKnownGood ||
|
|
||||||
assigned != nil && lastKnownGood != nil && apiequality.Semantic.DeepEqual(assigned.NodeConfigSource(), lastKnownGood.NodeConfigSource()) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// update last-known-good
|
|
||||||
err = cc.checkpointStore.SetLastKnownGood(assigned)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// update the status to reflect the new last-known-good config
|
|
||||||
cc.configStatus.SetLastKnownGood(assigned.NodeConfigSource())
|
|
||||||
klog.InfoS("Kubelet config controller updated last-known-good config to new checkpointStore", "apiPath", assigned.APIPath(), "checkpointUID", assigned.UID(), "resourceVersion", assigned.ResourceVersion())
|
|
||||||
return nil
|
|
||||||
}
|
|
@ -1,81 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2018 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package kubeletconfig
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/checkpoint/store"
|
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestGraduateAssignedToLastKnownGood(t *testing.T) {
|
|
||||||
realSource1, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{
|
|
||||||
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
|
||||||
Namespace: "foo",
|
|
||||||
Name: "1",
|
|
||||||
KubeletConfigKey: "kubelet",
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
realSource2, _, err := checkpoint.NewRemoteConfigSource(&apiv1.NodeConfigSource{
|
|
||||||
ConfigMap: &apiv1.ConfigMapNodeConfigSource{
|
|
||||||
Namespace: "foo",
|
|
||||||
Name: "2",
|
|
||||||
KubeletConfigKey: "kubelet",
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
cases := []struct {
|
|
||||||
name string
|
|
||||||
assigned checkpoint.RemoteConfigSource
|
|
||||||
lkg checkpoint.RemoteConfigSource
|
|
||||||
}{
|
|
||||||
{
|
|
||||||
name: "nil lkg to non-nil lkg",
|
|
||||||
assigned: realSource1,
|
|
||||||
lkg: nil,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "non-nil lkg to non-nil lkg",
|
|
||||||
assigned: realSource2,
|
|
||||||
lkg: realSource1,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tc := range cases {
|
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
controller := &Controller{
|
|
||||||
configStatus: status.NewNodeConfigStatus(),
|
|
||||||
checkpointStore: store.NewFakeStore(),
|
|
||||||
}
|
|
||||||
controller.checkpointStore.SetLastKnownGood(tc.lkg)
|
|
||||||
controller.checkpointStore.SetAssigned(tc.assigned)
|
|
||||||
if err := controller.graduateAssignedToLastKnownGood(); err != nil {
|
|
||||||
t.Fatalf("unexpected error: %v", err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
@ -28,7 +28,6 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
nodeutil "k8s.io/component-helpers/node/util"
|
nodeutil "k8s.io/component-helpers/node/util"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/metrics"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -175,24 +174,6 @@ func (s *nodeConfigStatus) Sync(client clientset.Interface, nodeName string) {
|
|||||||
status.Error = s.errorOverride
|
status.Error = s.errorOverride
|
||||||
}
|
}
|
||||||
|
|
||||||
// update metrics based on the status we will sync
|
|
||||||
metrics.SetConfigError(len(status.Error) > 0)
|
|
||||||
err = metrics.SetAssignedConfig(status.Assigned)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to update Assigned config metric, error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = metrics.SetActiveConfig(status.Active)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to update Active config metric, error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
err = metrics.SetLastKnownGoodConfig(status.LastKnownGood)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("failed to update LastKnownGood config metric, error: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// apply the status to a copy of the node so we don't modify the object in the informer's store
|
// apply the status to a copy of the node so we don't modify the object in the informer's store
|
||||||
newNode := oldNode.DeepCopy()
|
newNode := oldNode.DeepCopy()
|
||||||
newNode.Status.Config = status
|
newNode.Status.Config = status
|
||||||
|
@ -1,140 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package kubeletconfig
|
|
||||||
|
|
||||||
import (
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"math/rand"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
apiv1 "k8s.io/api/core/v1"
|
|
||||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/fields"
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
|
||||||
"k8s.io/client-go/tools/cache"
|
|
||||||
)
|
|
||||||
|
|
||||||
// newSharedNodeInformer returns a shared informer that uses `client` to watch the Node with
|
|
||||||
// `nodeName` for changes and respond with `addFunc`, `updateFunc`, and `deleteFunc`.
|
|
||||||
func newSharedNodeInformer(client clientset.Interface, nodeName string,
|
|
||||||
addFunc func(newObj interface{}),
|
|
||||||
updateFunc func(oldObj interface{}, newObj interface{}),
|
|
||||||
deleteFunc func(deletedObj interface{})) cache.SharedInformer {
|
|
||||||
// select nodes by name
|
|
||||||
fieldSelector := fields.OneTermEqualSelector("metadata.name", nodeName)
|
|
||||||
|
|
||||||
// add some randomness to resync period, which can help avoid controllers falling into lock-step
|
|
||||||
minResyncPeriod := 15 * time.Minute
|
|
||||||
factor := rand.Float64() + 1
|
|
||||||
resyncPeriod := time.Duration(float64(minResyncPeriod.Nanoseconds()) * factor)
|
|
||||||
|
|
||||||
lw := cache.NewListWatchFromClient(client.CoreV1().RESTClient(), "nodes", metav1.NamespaceAll, fieldSelector)
|
|
||||||
|
|
||||||
handler := cache.ResourceEventHandlerFuncs{
|
|
||||||
AddFunc: addFunc,
|
|
||||||
UpdateFunc: updateFunc,
|
|
||||||
DeleteFunc: deleteFunc,
|
|
||||||
}
|
|
||||||
|
|
||||||
informer := cache.NewSharedInformer(lw, &apiv1.Node{}, resyncPeriod)
|
|
||||||
informer.AddEventHandler(handler)
|
|
||||||
|
|
||||||
return informer
|
|
||||||
}
|
|
||||||
|
|
||||||
// onAddNodeEvent calls onUpdateNodeEvent with the new object and a nil old object
|
|
||||||
func (cc *Controller) onAddNodeEvent(newObj interface{}) {
|
|
||||||
cc.onUpdateNodeEvent(nil, newObj)
|
|
||||||
}
|
|
||||||
|
|
||||||
// onUpdateNodeEvent checks whether the configSource changed between oldObj and newObj, and pokes the
|
|
||||||
// configuration sync worker if there was a change
|
|
||||||
func (cc *Controller) onUpdateNodeEvent(oldObj interface{}, newObj interface{}) {
|
|
||||||
newNode, ok := newObj.(*apiv1.Node)
|
|
||||||
if !ok {
|
|
||||||
klog.ErrorS(nil, "Kubelet config controller failed to cast new object to Node, couldn't handle event")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if oldObj == nil {
|
|
||||||
// Node was just added, need to sync
|
|
||||||
klog.InfoS("Kubelet config controller initial Node watch event")
|
|
||||||
cc.pokeConfigSourceWorker()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
oldNode, ok := oldObj.(*apiv1.Node)
|
|
||||||
if !ok {
|
|
||||||
klog.ErrorS(nil, "Kubelet config controller failed to cast old object to Node, couldn't handle event")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !apiequality.Semantic.DeepEqual(oldNode.Spec.ConfigSource, newNode.Spec.ConfigSource) {
|
|
||||||
klog.InfoS("Kubelet config controller Node.Spec.ConfigSource was updated")
|
|
||||||
cc.pokeConfigSourceWorker()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// onDeleteNodeEvent logs a message if the Node was deleted
|
|
||||||
// We allow the sync-loop to continue, because it is possible that the Kubelet detected
|
|
||||||
// a Node with unexpected externalID and is attempting to delete and re-create the Node
|
|
||||||
// (see pkg/kubelet/kubelet_node_status.go), or that someone accidentally deleted the Node
|
|
||||||
// (the Kubelet will re-create it).
|
|
||||||
func (cc *Controller) onDeleteNodeEvent(deletedObj interface{}) {
|
|
||||||
// For this case, we just log the event.
|
|
||||||
// We don't want to poke the worker, because a temporary deletion isn't worth reporting an error for.
|
|
||||||
// If the Node is deleted because the VM is being deleted, then the Kubelet has nothing to do.
|
|
||||||
klog.InfoS("Kubelet config controller Node was deleted")
|
|
||||||
}
|
|
||||||
|
|
||||||
// onAddRemoteConfigSourceEvent calls onUpdateConfigMapEvent with the new object and a nil old object
|
|
||||||
func (cc *Controller) onAddRemoteConfigSourceEvent(newObj interface{}) {
|
|
||||||
cc.onUpdateRemoteConfigSourceEvent(nil, newObj)
|
|
||||||
}
|
|
||||||
|
|
||||||
// onUpdateRemoteConfigSourceEvent checks whether the configSource changed between oldObj and newObj,
|
|
||||||
// and pokes the sync worker if there was a change
|
|
||||||
func (cc *Controller) onUpdateRemoteConfigSourceEvent(oldObj interface{}, newObj interface{}) {
|
|
||||||
// since ConfigMap is currently the only source type, we handle that here
|
|
||||||
newConfigMap, ok := newObj.(*apiv1.ConfigMap)
|
|
||||||
if !ok {
|
|
||||||
klog.ErrorS(nil, "Kubelet config controller failed to cast new object to ConfigMap, couldn't handle event")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if oldObj == nil {
|
|
||||||
// ConfigMap was just added, need to sync
|
|
||||||
klog.InfoS("Kubelet config controller initial ConfigMap watch event")
|
|
||||||
cc.pokeConfigSourceWorker()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
oldConfigMap, ok := oldObj.(*apiv1.ConfigMap)
|
|
||||||
if !ok {
|
|
||||||
klog.ErrorS(nil, "Kubelet config controller failed to cast old object to ConfigMap, couldn't handle event")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if !apiequality.Semantic.DeepEqual(oldConfigMap, newConfigMap) {
|
|
||||||
klog.InfoS("Kubelet config controller assigned ConfigMap was updated")
|
|
||||||
cc.pokeConfigSourceWorker()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// onDeleteRemoteConfigSourceEvent logs a message if the ConfigMap was deleted and pokes the sync worker
|
|
||||||
func (cc *Controller) onDeleteRemoteConfigSourceEvent(deletedObj interface{}) {
|
|
||||||
// If the ConfigMap we're watching is deleted, we log the event and poke the sync worker.
|
|
||||||
// This requires a sync, because if the Node is still configured to use the deleted ConfigMap,
|
|
||||||
// the Kubelet should report a DownloadError.
|
|
||||||
klog.InfoS("Kubelet config controller assigned ConfigMap was deleted")
|
|
||||||
cc.pokeConfigSourceWorker()
|
|
||||||
}
|
|
@ -17,17 +17,14 @@ limitations under the License.
|
|||||||
package metrics
|
package metrics
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/component-base/metrics"
|
"k8s.io/component-base/metrics"
|
||||||
"k8s.io/component-base/metrics/legacyregistry"
|
"k8s.io/component-base/metrics/legacyregistry"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -69,17 +66,6 @@ const (
|
|||||||
PodResourcesEndpointErrorsListKey = "pod_resources_endpoint_errors_list"
|
PodResourcesEndpointErrorsListKey = "pod_resources_endpoint_errors_list"
|
||||||
PodResourcesEndpointErrorsGetAllocatableKey = "pod_resources_endpoint_errors_get_allocatable"
|
PodResourcesEndpointErrorsGetAllocatableKey = "pod_resources_endpoint_errors_get_allocatable"
|
||||||
|
|
||||||
// Metric keys for node config
|
|
||||||
AssignedConfigKey = "node_config_assigned"
|
|
||||||
ActiveConfigKey = "node_config_active"
|
|
||||||
LastKnownGoodConfigKey = "node_config_last_known_good"
|
|
||||||
ConfigErrorKey = "node_config_error"
|
|
||||||
ConfigSourceLabelKey = "node_config_source"
|
|
||||||
ConfigSourceLabelValueLocal = "local"
|
|
||||||
ConfigUIDLabelKey = "node_config_uid"
|
|
||||||
ConfigResourceVersionLabelKey = "node_config_resource_version"
|
|
||||||
KubeletConfigKeyLabelKey = "node_config_kubelet_key"
|
|
||||||
|
|
||||||
// Metrics keys for RuntimeClass
|
// Metrics keys for RuntimeClass
|
||||||
RunPodSandboxDurationKey = "run_podsandbox_duration_seconds"
|
RunPodSandboxDurationKey = "run_podsandbox_duration_seconds"
|
||||||
RunPodSandboxErrorsKey = "run_podsandbox_errors_total"
|
RunPodSandboxErrorsKey = "run_podsandbox_errors_total"
|
||||||
@ -364,52 +350,6 @@ var (
|
|||||||
[]string{"server_api_version"},
|
[]string{"server_api_version"},
|
||||||
)
|
)
|
||||||
|
|
||||||
// Metrics for node config
|
|
||||||
|
|
||||||
// AssignedConfig is a Gauge that is set 1 if the Kubelet has a NodeConfig assigned.
|
|
||||||
AssignedConfig = metrics.NewGaugeVec(
|
|
||||||
&metrics.GaugeOpts{
|
|
||||||
Subsystem: KubeletSubsystem,
|
|
||||||
Name: AssignedConfigKey,
|
|
||||||
Help: "The node's understanding of intended config. The count is always 1.",
|
|
||||||
DeprecatedVersion: "1.22.0",
|
|
||||||
StabilityLevel: metrics.ALPHA,
|
|
||||||
},
|
|
||||||
[]string{ConfigSourceLabelKey, ConfigUIDLabelKey, ConfigResourceVersionLabelKey, KubeletConfigKeyLabelKey},
|
|
||||||
)
|
|
||||||
// ActiveConfig is a Gauge that is set to 1 if the Kubelet has an active NodeConfig.
|
|
||||||
ActiveConfig = metrics.NewGaugeVec(
|
|
||||||
&metrics.GaugeOpts{
|
|
||||||
Subsystem: KubeletSubsystem,
|
|
||||||
Name: ActiveConfigKey,
|
|
||||||
Help: "The config source the node is actively using. The count is always 1.",
|
|
||||||
DeprecatedVersion: "1.22.0",
|
|
||||||
StabilityLevel: metrics.ALPHA,
|
|
||||||
},
|
|
||||||
[]string{ConfigSourceLabelKey, ConfigUIDLabelKey, ConfigResourceVersionLabelKey, KubeletConfigKeyLabelKey},
|
|
||||||
)
|
|
||||||
// LastKnownGoodConfig is a Gauge that is set to 1 if the Kubelet has a NodeConfig it can fall back to if there
|
|
||||||
// are certain errors.
|
|
||||||
LastKnownGoodConfig = metrics.NewGaugeVec(
|
|
||||||
&metrics.GaugeOpts{
|
|
||||||
Subsystem: KubeletSubsystem,
|
|
||||||
Name: LastKnownGoodConfigKey,
|
|
||||||
Help: "The config source the node will fall back to when it encounters certain errors. The count is always 1.",
|
|
||||||
DeprecatedVersion: "1.22.0",
|
|
||||||
StabilityLevel: metrics.ALPHA,
|
|
||||||
},
|
|
||||||
[]string{ConfigSourceLabelKey, ConfigUIDLabelKey, ConfigResourceVersionLabelKey, KubeletConfigKeyLabelKey},
|
|
||||||
)
|
|
||||||
// ConfigError is a Gauge that is set to 1 if the node is experiencing a configuration-related error.
|
|
||||||
ConfigError = metrics.NewGauge(
|
|
||||||
&metrics.GaugeOpts{
|
|
||||||
Subsystem: KubeletSubsystem,
|
|
||||||
Name: ConfigErrorKey,
|
|
||||||
Help: "This metric is true (1) if the node is experiencing a configuration-related error, false (0) otherwise.",
|
|
||||||
DeprecatedVersion: "1.22.0",
|
|
||||||
StabilityLevel: metrics.ALPHA,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
// RunPodSandboxDuration is a Histogram that tracks the duration (in seconds) it takes to run Pod Sandbox operations.
|
// RunPodSandboxDuration is a Histogram that tracks the duration (in seconds) it takes to run Pod Sandbox operations.
|
||||||
// Broken down by RuntimeClass.Handler.
|
// Broken down by RuntimeClass.Handler.
|
||||||
RunPodSandboxDuration = metrics.NewHistogramVec(
|
RunPodSandboxDuration = metrics.NewHistogramVec(
|
||||||
@ -561,12 +501,6 @@ func Register(collectors ...metrics.StableCollector) {
|
|||||||
}
|
}
|
||||||
legacyregistry.MustRegister(RunPodSandboxDuration)
|
legacyregistry.MustRegister(RunPodSandboxDuration)
|
||||||
legacyregistry.MustRegister(RunPodSandboxErrors)
|
legacyregistry.MustRegister(RunPodSandboxErrors)
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
|
||||||
legacyregistry.MustRegister(AssignedConfig)
|
|
||||||
legacyregistry.MustRegister(ActiveConfig)
|
|
||||||
legacyregistry.MustRegister(LastKnownGoodConfig)
|
|
||||||
legacyregistry.MustRegister(ConfigError)
|
|
||||||
}
|
|
||||||
for _, collector := range collectors {
|
for _, collector := range collectors {
|
||||||
legacyregistry.CustomMustRegister(collector)
|
legacyregistry.CustomMustRegister(collector)
|
||||||
}
|
}
|
||||||
@ -583,104 +517,6 @@ func SinceInSeconds(start time.Time) float64 {
|
|||||||
return time.Since(start).Seconds()
|
return time.Since(start).Seconds()
|
||||||
}
|
}
|
||||||
|
|
||||||
const configMapAPIPathFmt = "/api/v1/namespaces/%s/configmaps/%s"
|
|
||||||
|
|
||||||
func configLabels(source *corev1.NodeConfigSource) (map[string]string, error) {
|
|
||||||
if source == nil {
|
|
||||||
return map[string]string{
|
|
||||||
// prometheus requires all of the labels that can be set on the metric
|
|
||||||
ConfigSourceLabelKey: "local",
|
|
||||||
ConfigUIDLabelKey: "",
|
|
||||||
ConfigResourceVersionLabelKey: "",
|
|
||||||
KubeletConfigKeyLabelKey: "",
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
if source.ConfigMap != nil {
|
|
||||||
return map[string]string{
|
|
||||||
ConfigSourceLabelKey: fmt.Sprintf(configMapAPIPathFmt, source.ConfigMap.Namespace, source.ConfigMap.Name),
|
|
||||||
ConfigUIDLabelKey: string(source.ConfigMap.UID),
|
|
||||||
ConfigResourceVersionLabelKey: source.ConfigMap.ResourceVersion,
|
|
||||||
KubeletConfigKeyLabelKey: source.ConfigMap.KubeletConfigKey,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("unrecognized config source type, all source subfields were nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// track labels across metric updates, so we can delete old label sets and prevent leaks
|
|
||||||
var assignedConfigLabels map[string]string
|
|
||||||
|
|
||||||
// SetAssignedConfig tracks labels according to the assigned NodeConfig. It also tracks labels
|
|
||||||
// across metric updates so old labels can be safely deleted.
|
|
||||||
func SetAssignedConfig(source *corev1.NodeConfigSource) error {
|
|
||||||
// compute the timeseries labels from the source
|
|
||||||
labels, err := configLabels(source)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// clean up the old timeseries (WithLabelValues creates a new one for each distinct label set)
|
|
||||||
if !AssignedConfig.Delete(assignedConfigLabels) {
|
|
||||||
klog.InfoS("Failed to delete metric for labels. This may result in ambiguity from multiple metrics concurrently indicating different assigned configs.", "labels", assignedConfigLabels)
|
|
||||||
}
|
|
||||||
// record the new timeseries
|
|
||||||
assignedConfigLabels = labels
|
|
||||||
// expose the new timeseries with a constant count of 1
|
|
||||||
AssignedConfig.With(assignedConfigLabels).Set(1)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// track labels across metric updates, so we can delete old label sets and prevent leaks
|
|
||||||
var activeConfigLabels map[string]string
|
|
||||||
|
|
||||||
// SetActiveConfig tracks labels according to the NodeConfig that is currently used by the Kubelet.
|
|
||||||
// It also tracks labels across metric updates so old labels can be safely deleted.
|
|
||||||
func SetActiveConfig(source *corev1.NodeConfigSource) error {
|
|
||||||
// compute the timeseries labels from the source
|
|
||||||
labels, err := configLabels(source)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// clean up the old timeseries (WithLabelValues creates a new one for each distinct label set)
|
|
||||||
if !ActiveConfig.Delete(activeConfigLabels) {
|
|
||||||
klog.InfoS("Failed to delete metric for labels. This may result in ambiguity from multiple metrics concurrently indicating different active configs.", "labels", activeConfigLabels)
|
|
||||||
}
|
|
||||||
// record the new timeseries
|
|
||||||
activeConfigLabels = labels
|
|
||||||
// expose the new timeseries with a constant count of 1
|
|
||||||
ActiveConfig.With(activeConfigLabels).Set(1)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// track labels across metric updates, so we can delete old label sets and prevent leaks
|
|
||||||
var lastKnownGoodConfigLabels map[string]string
|
|
||||||
|
|
||||||
// SetLastKnownGoodConfig tracks labels according to the NodeConfig that was successfully applied last.
|
|
||||||
// It also tracks labels across metric updates so old labels can be safely deleted.
|
|
||||||
func SetLastKnownGoodConfig(source *corev1.NodeConfigSource) error {
|
|
||||||
// compute the timeseries labels from the source
|
|
||||||
labels, err := configLabels(source)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// clean up the old timeseries (WithLabelValues creates a new one for each distinct label set)
|
|
||||||
if !LastKnownGoodConfig.Delete(lastKnownGoodConfigLabels) {
|
|
||||||
klog.InfoS("Failed to delete metric for labels. This may result in ambiguity from multiple metrics concurrently indicating different last known good configs.", "labels", lastKnownGoodConfigLabels)
|
|
||||||
}
|
|
||||||
// record the new timeseries
|
|
||||||
lastKnownGoodConfigLabels = labels
|
|
||||||
// expose the new timeseries with a constant count of 1
|
|
||||||
LastKnownGoodConfig.With(lastKnownGoodConfigLabels).Set(1)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetConfigError sets a the ConfigError metric to 1 in case any errors were encountered.
|
|
||||||
func SetConfigError(err bool) {
|
|
||||||
if err {
|
|
||||||
ConfigError.Set(1)
|
|
||||||
} else {
|
|
||||||
ConfigError.Set(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNodeName sets the NodeName Gauge to 1.
|
// SetNodeName sets the NodeName Gauge to 1.
|
||||||
func SetNodeName(name types.NodeName) {
|
func SetNodeName(name types.NodeName) {
|
||||||
NodeName.WithLabelValues(string(name)).Set(1)
|
NodeName.WithLabelValues(string(name)).Set(1)
|
||||||
|
@ -2402,9 +2402,7 @@ message NodeSpec {
|
|||||||
// +optional
|
// +optional
|
||||||
repeated Taint taints = 5;
|
repeated Taint taints = 5;
|
||||||
|
|
||||||
// Deprecated. If specified, the source of the node's configuration.
|
// Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26.
|
||||||
// The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field.
|
|
||||||
// This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration
|
|
||||||
// +optional
|
// +optional
|
||||||
optional NodeConfigSource configSource = 6;
|
optional NodeConfigSource configSource = 6;
|
||||||
|
|
||||||
|
@ -4783,9 +4783,7 @@ type NodeSpec struct {
|
|||||||
// +optional
|
// +optional
|
||||||
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
|
Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"`
|
||||||
|
|
||||||
// Deprecated. If specified, the source of the node's configuration.
|
// Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26.
|
||||||
// The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field.
|
|
||||||
// This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration
|
|
||||||
// +optional
|
// +optional
|
||||||
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
|
ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"`
|
||||||
|
|
||||||
|
@ -1188,7 +1188,7 @@ var map_NodeSpec = map[string]string{
|
|||||||
"providerID": "ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>",
|
"providerID": "ID of the node assigned by the cloud provider in the format: <ProviderName>://<ProviderSpecificNodeID>",
|
||||||
"unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration",
|
"unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration",
|
||||||
"taints": "If specified, the node's taints.",
|
"taints": "If specified, the node's taints.",
|
||||||
"configSource": "Deprecated. If specified, the source of the node's configuration. The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field. This field is deprecated as of 1.22: https://git.k8s.io/enhancements/keps/sig-node/281-dynamic-kubelet-configuration",
|
"configSource": "Deprecated: Previously used to specify the source of the node's configuration for the DynamicKubeletConfig feature. This feature is removed from Kubelets as of 1.24 and will be fully removed in 1.26.",
|
||||||
"externalID": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966",
|
"externalID": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,75 +90,42 @@ type KubeletConfiguration struct {
|
|||||||
|
|
||||||
// enableServer enables Kubelet's secured server.
|
// enableServer enables Kubelet's secured server.
|
||||||
// Note: Kubelet's insecure port is controlled by the readOnlyPort option.
|
// Note: Kubelet's insecure port is controlled by the readOnlyPort option.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that interact with the Kubelet server.
|
|
||||||
// Default: true
|
// Default: true
|
||||||
EnableServer *bool `json:"enableServer,omitempty"`
|
EnableServer *bool `json:"enableServer,omitempty"`
|
||||||
// staticPodPath is the path to the directory containing local (static) pods to
|
// staticPodPath is the path to the directory containing local (static) pods to
|
||||||
// run, or the path to a single static pod file.
|
// run, or the path to a single static pod file.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// the set of static pods specified at the new path may be different than the
|
|
||||||
// ones the Kubelet initially started with, and this may disrupt your node.
|
|
||||||
// Default: ""
|
// Default: ""
|
||||||
// +optional
|
// +optional
|
||||||
StaticPodPath string `json:"staticPodPath,omitempty"`
|
StaticPodPath string `json:"staticPodPath,omitempty"`
|
||||||
// syncFrequency is the max period between synchronizing running
|
// syncFrequency is the max period between synchronizing running
|
||||||
// containers and config.
|
// containers and config.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// shortening this duration may have a negative performance impact, especially
|
|
||||||
// as the number of Pods on the node increases. Alternatively, increasing this
|
|
||||||
// duration will result in longer refresh times for ConfigMaps and Secrets.
|
|
||||||
// Default: "1m"
|
// Default: "1m"
|
||||||
// +optional
|
// +optional
|
||||||
SyncFrequency metav1.Duration `json:"syncFrequency,omitempty"`
|
SyncFrequency metav1.Duration `json:"syncFrequency,omitempty"`
|
||||||
// fileCheckFrequency is the duration between checking config files for
|
// fileCheckFrequency is the duration between checking config files for
|
||||||
// new data.
|
// new data.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// shortening the duration will cause the Kubelet to reload local Static Pod
|
|
||||||
// configurations more frequently, which may have a negative performance impact.
|
|
||||||
// Default: "20s"
|
// Default: "20s"
|
||||||
// +optional
|
// +optional
|
||||||
FileCheckFrequency metav1.Duration `json:"fileCheckFrequency,omitempty"`
|
FileCheckFrequency metav1.Duration `json:"fileCheckFrequency,omitempty"`
|
||||||
// httpCheckFrequency is the duration between checking http for new data.
|
// httpCheckFrequency is the duration between checking http for new data.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// shortening the duration will cause the Kubelet to poll staticPodURL more
|
|
||||||
// frequently, which may have a negative performance impact.
|
|
||||||
// Default: "20s"
|
// Default: "20s"
|
||||||
// +optional
|
// +optional
|
||||||
HTTPCheckFrequency metav1.Duration `json:"httpCheckFrequency,omitempty"`
|
HTTPCheckFrequency metav1.Duration `json:"httpCheckFrequency,omitempty"`
|
||||||
// staticPodURL is the URL for accessing static pods to run.
|
// staticPodURL is the URL for accessing static pods to run.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// the set of static pods specified at the new URL may be different than the
|
|
||||||
// ones the Kubelet initially started with, and this may disrupt your node.
|
|
||||||
// Default: ""
|
// Default: ""
|
||||||
// +optional
|
// +optional
|
||||||
StaticPodURL string `json:"staticPodURL,omitempty"`
|
StaticPodURL string `json:"staticPodURL,omitempty"`
|
||||||
// staticPodURLHeader is a map of slices with HTTP headers to use when accessing the podURL.
|
// staticPodURLHeader is a map of slices with HTTP headers to use when accessing the podURL.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt the ability to read the latest set of static pods from StaticPodURL.
|
|
||||||
// Default: nil
|
// Default: nil
|
||||||
// +optional
|
// +optional
|
||||||
StaticPodURLHeader map[string][]string `json:"staticPodURLHeader,omitempty"`
|
StaticPodURLHeader map[string][]string `json:"staticPodURLHeader,omitempty"`
|
||||||
// address is the IP address for the Kubelet to serve on (set to 0.0.0.0
|
// address is the IP address for the Kubelet to serve on (set to 0.0.0.0
|
||||||
// for all interfaces).
|
// for all interfaces).
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that interact with the Kubelet server.
|
|
||||||
// Default: "0.0.0.0"
|
// Default: "0.0.0.0"
|
||||||
// +optional
|
// +optional
|
||||||
Address string `json:"address,omitempty"`
|
Address string `json:"address,omitempty"`
|
||||||
// port is the port for the Kubelet to serve on.
|
// port is the port for the Kubelet to serve on.
|
||||||
// The port number must be between 1 and 65535, inclusive.
|
// The port number must be between 1 and 65535, inclusive.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that interact with the Kubelet server.
|
|
||||||
// Default: 10250
|
// Default: 10250
|
||||||
// +optional
|
// +optional
|
||||||
Port int32 `json:"port,omitempty"`
|
Port int32 `json:"port,omitempty"`
|
||||||
@ -166,9 +133,6 @@ type KubeletConfiguration struct {
|
|||||||
// no authentication/authorization.
|
// no authentication/authorization.
|
||||||
// The port number must be between 1 and 65535, inclusive.
|
// The port number must be between 1 and 65535, inclusive.
|
||||||
// Setting this field to 0 disables the read-only service.
|
// Setting this field to 0 disables the read-only service.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that interact with the Kubelet server.
|
|
||||||
// Default: 0 (disabled)
|
// Default: 0 (disabled)
|
||||||
// +optional
|
// +optional
|
||||||
ReadOnlyPort int32 `json:"readOnlyPort,omitempty"`
|
ReadOnlyPort int32 `json:"readOnlyPort,omitempty"`
|
||||||
@ -177,42 +141,26 @@ type KubeletConfiguration struct {
|
|||||||
// tlsPrivateKeyFile are not provided, a self-signed certificate
|
// tlsPrivateKeyFile are not provided, a self-signed certificate
|
||||||
// and key are generated for the public address and saved to the directory
|
// and key are generated for the public address and saved to the directory
|
||||||
// passed to the Kubelet's --cert-dir flag.
|
// passed to the Kubelet's --cert-dir flag.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that interact with the Kubelet server.
|
|
||||||
// Default: ""
|
// Default: ""
|
||||||
// +optional
|
// +optional
|
||||||
TLSCertFile string `json:"tlsCertFile,omitempty"`
|
TLSCertFile string `json:"tlsCertFile,omitempty"`
|
||||||
// tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile.
|
// tlsPrivateKeyFile is the file containing x509 private key matching tlsCertFile.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that interact with the Kubelet server.
|
|
||||||
// Default: ""
|
// Default: ""
|
||||||
// +optional
|
// +optional
|
||||||
TLSPrivateKeyFile string `json:"tlsPrivateKeyFile,omitempty"`
|
TLSPrivateKeyFile string `json:"tlsPrivateKeyFile,omitempty"`
|
||||||
// tlsCipherSuites is the list of allowed cipher suites for the server.
|
// tlsCipherSuites is the list of allowed cipher suites for the server.
|
||||||
// Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants).
|
// Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants).
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that interact with the Kubelet server.
|
|
||||||
// Default: nil
|
// Default: nil
|
||||||
// +optional
|
// +optional
|
||||||
TLSCipherSuites []string `json:"tlsCipherSuites,omitempty"`
|
TLSCipherSuites []string `json:"tlsCipherSuites,omitempty"`
|
||||||
// tlsMinVersion is the minimum TLS version supported.
|
// tlsMinVersion is the minimum TLS version supported.
|
||||||
// Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants).
|
// Values are from tls package constants (https://golang.org/pkg/crypto/tls/#pkg-constants).
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that interact with the Kubelet server.
|
|
||||||
// Default: ""
|
// Default: ""
|
||||||
// +optional
|
// +optional
|
||||||
TLSMinVersion string `json:"tlsMinVersion,omitempty"`
|
TLSMinVersion string `json:"tlsMinVersion,omitempty"`
|
||||||
// rotateCertificates enables client certificate rotation. The Kubelet will request a
|
// rotateCertificates enables client certificate rotation. The Kubelet will request a
|
||||||
// new certificate from the certificates.k8s.io API. This requires an approver to approve the
|
// new certificate from the certificates.k8s.io API. This requires an approver to approve the
|
||||||
// certificate signing requests.
|
// certificate signing requests.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// disabling it may disrupt the Kubelet's ability to authenticate with the API server
|
|
||||||
// after the current certificate expires.
|
|
||||||
// Default: false
|
// Default: false
|
||||||
// +optional
|
// +optional
|
||||||
RotateCertificates bool `json:"rotateCertificates,omitempty"`
|
RotateCertificates bool `json:"rotateCertificates,omitempty"`
|
||||||
@ -221,18 +169,10 @@ type KubeletConfiguration struct {
|
|||||||
// the 'certificates.k8s.io' API. This requires an approver to approve the
|
// the 'certificates.k8s.io' API. This requires an approver to approve the
|
||||||
// certificate signing requests (CSR). The RotateKubeletServerCertificate feature
|
// certificate signing requests (CSR). The RotateKubeletServerCertificate feature
|
||||||
// must be enabled when setting this field.
|
// must be enabled when setting this field.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// disabling it will stop the renewal of Kubelet server certificates, which can
|
|
||||||
// disrupt components that interact with the Kubelet server in the long term,
|
|
||||||
// due to certificate expiration.
|
|
||||||
// Default: false
|
// Default: false
|
||||||
// +optional
|
// +optional
|
||||||
ServerTLSBootstrap bool `json:"serverTLSBootstrap,omitempty"`
|
ServerTLSBootstrap bool `json:"serverTLSBootstrap,omitempty"`
|
||||||
// authentication specifies how requests to the Kubelet's server are authenticated.
|
// authentication specifies how requests to the Kubelet's server are authenticated.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that interact with the Kubelet server.
|
|
||||||
// Defaults:
|
// Defaults:
|
||||||
// anonymous:
|
// anonymous:
|
||||||
// enabled: false
|
// enabled: false
|
||||||
@ -242,9 +182,6 @@ type KubeletConfiguration struct {
|
|||||||
// +optional
|
// +optional
|
||||||
Authentication KubeletAuthentication `json:"authentication"`
|
Authentication KubeletAuthentication `json:"authentication"`
|
||||||
// authorization specifies how requests to the Kubelet's server are authorized.
|
// authorization specifies how requests to the Kubelet's server are authorized.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that interact with the Kubelet server.
|
|
||||||
// Defaults:
|
// Defaults:
|
||||||
// mode: Webhook
|
// mode: Webhook
|
||||||
// webhook:
|
// webhook:
|
||||||
@ -255,10 +192,6 @@ type KubeletConfiguration struct {
|
|||||||
// registryPullQPS is the limit of registry pulls per second.
|
// registryPullQPS is the limit of registry pulls per second.
|
||||||
// The value must not be a negative number.
|
// The value must not be a negative number.
|
||||||
// Setting it to 0 means no limit.
|
// Setting it to 0 means no limit.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact scalability by changing the amount of traffic produced
|
|
||||||
// by image pulls.
|
|
||||||
// Default: 5
|
// Default: 5
|
||||||
// +optional
|
// +optional
|
||||||
RegistryPullQPS *int32 `json:"registryPullQPS,omitempty"`
|
RegistryPullQPS *int32 `json:"registryPullQPS,omitempty"`
|
||||||
@ -266,19 +199,11 @@ type KubeletConfiguration struct {
|
|||||||
// pulls to burst to this number, while still not exceeding registryPullQPS.
|
// pulls to burst to this number, while still not exceeding registryPullQPS.
|
||||||
// The value must not be a negative number.
|
// The value must not be a negative number.
|
||||||
// Only used if registryPullQPS is greater than 0.
|
// Only used if registryPullQPS is greater than 0.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact scalability by changing the amount of traffic produced
|
|
||||||
// by image pulls.
|
|
||||||
// Default: 10
|
// Default: 10
|
||||||
// +optional
|
// +optional
|
||||||
RegistryBurst int32 `json:"registryBurst,omitempty"`
|
RegistryBurst int32 `json:"registryBurst,omitempty"`
|
||||||
// eventRecordQPS is the maximum event creations per second. If 0, there
|
// eventRecordQPS is the maximum event creations per second. If 0, there
|
||||||
// is no limit enforced. The value cannot be a negative number.
|
// is no limit enforced. The value cannot be a negative number.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact scalability by changing the amount of traffic produced by
|
|
||||||
// event creations.
|
|
||||||
// Default: 5
|
// Default: 5
|
||||||
// +optional
|
// +optional
|
||||||
EventRecordQPS *int32 `json:"eventRecordQPS,omitempty"`
|
EventRecordQPS *int32 `json:"eventRecordQPS,omitempty"`
|
||||||
@ -286,76 +211,47 @@ type KubeletConfiguration struct {
|
|||||||
// allows event creations to burst to this number, while still not exceeding
|
// allows event creations to burst to this number, while still not exceeding
|
||||||
// eventRecordQPS. This field canot be a negative number and it is only used
|
// eventRecordQPS. This field canot be a negative number and it is only used
|
||||||
// when eventRecordQPS > 0.
|
// when eventRecordQPS > 0.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact scalability by changing the amount of traffic produced by
|
|
||||||
// event creations.
|
|
||||||
// Default: 10
|
// Default: 10
|
||||||
// +optional
|
// +optional
|
||||||
EventBurst int32 `json:"eventBurst,omitempty"`
|
EventBurst int32 `json:"eventBurst,omitempty"`
|
||||||
// enableDebuggingHandlers enables server endpoints for log access
|
// enableDebuggingHandlers enables server endpoints for log access
|
||||||
// and local running of containers and commands, including the exec,
|
// and local running of containers and commands, including the exec,
|
||||||
// attach, logs, and portforward features.
|
// attach, logs, and portforward features.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// disabling it may disrupt components that interact with the Kubelet server.
|
|
||||||
// Default: true
|
// Default: true
|
||||||
// +optional
|
// +optional
|
||||||
EnableDebuggingHandlers *bool `json:"enableDebuggingHandlers,omitempty"`
|
EnableDebuggingHandlers *bool `json:"enableDebuggingHandlers,omitempty"`
|
||||||
// enableContentionProfiling enables lock contention profiling, if enableDebuggingHandlers is true.
|
// enableContentionProfiling enables lock contention profiling, if enableDebuggingHandlers is true.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// enabling it may carry a performance impact.
|
|
||||||
// Default: false
|
// Default: false
|
||||||
// +optional
|
// +optional
|
||||||
EnableContentionProfiling bool `json:"enableContentionProfiling,omitempty"`
|
EnableContentionProfiling bool `json:"enableContentionProfiling,omitempty"`
|
||||||
// healthzPort is the port of the localhost healthz endpoint (set to 0 to disable).
|
// healthzPort is the port of the localhost healthz endpoint (set to 0 to disable).
|
||||||
// A valid number is between 1 and 65535.
|
// A valid number is between 1 and 65535.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that monitor Kubelet health.
|
|
||||||
// Default: 10248
|
// Default: 10248
|
||||||
// +optional
|
// +optional
|
||||||
HealthzPort *int32 `json:"healthzPort,omitempty"`
|
HealthzPort *int32 `json:"healthzPort,omitempty"`
|
||||||
// healthzBindAddress is the IP address for the healthz server to serve on.
|
// healthzBindAddress is the IP address for the healthz server to serve on.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that monitor Kubelet health.
|
|
||||||
// Default: "127.0.0.1"
|
// Default: "127.0.0.1"
|
||||||
// +optional
|
// +optional
|
||||||
HealthzBindAddress string `json:"healthzBindAddress,omitempty"`
|
HealthzBindAddress string `json:"healthzBindAddress,omitempty"`
|
||||||
// oomScoreAdj is The oom-score-adj value for kubelet process. Values
|
// oomScoreAdj is The oom-score-adj value for kubelet process. Values
|
||||||
// must be within the range [-1000, 1000].
|
// must be within the range [-1000, 1000].
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact the stability of nodes under memory pressure.
|
|
||||||
// Default: -999
|
// Default: -999
|
||||||
// +optional
|
// +optional
|
||||||
OOMScoreAdj *int32 `json:"oomScoreAdj,omitempty"`
|
OOMScoreAdj *int32 `json:"oomScoreAdj,omitempty"`
|
||||||
// clusterDomain is the DNS domain for this cluster. If set, kubelet will
|
// clusterDomain is the DNS domain for this cluster. If set, kubelet will
|
||||||
// configure all containers to search this domain in addition to the
|
// configure all containers to search this domain in addition to the
|
||||||
// host's search domains.
|
// host's search domains.
|
||||||
// Dynamic Kubelet Config (deprecated): Dynamically updating this field is not recommended,
|
|
||||||
// as it should be kept in sync with the rest of the cluster.
|
|
||||||
// Default: ""
|
// Default: ""
|
||||||
// +optional
|
// +optional
|
||||||
ClusterDomain string `json:"clusterDomain,omitempty"`
|
ClusterDomain string `json:"clusterDomain,omitempty"`
|
||||||
// clusterDNS is a list of IP addresses for the cluster DNS server. If set,
|
// clusterDNS is a list of IP addresses for the cluster DNS server. If set,
|
||||||
// kubelet will configure all containers to use this for DNS resolution
|
// kubelet will configure all containers to use this for DNS resolution
|
||||||
// instead of the host's DNS servers.
|
// instead of the host's DNS servers.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// changes will only take effect on Pods created after the update. Draining
|
|
||||||
// the node is recommended before changing this field.
|
|
||||||
// Default: nil
|
// Default: nil
|
||||||
// +optional
|
// +optional
|
||||||
ClusterDNS []string `json:"clusterDNS,omitempty"`
|
ClusterDNS []string `json:"clusterDNS,omitempty"`
|
||||||
// streamingConnectionIdleTimeout is the maximum time a streaming connection
|
// streamingConnectionIdleTimeout is the maximum time a streaming connection
|
||||||
// can be idle before the connection is automatically closed.
|
// can be idle before the connection is automatically closed.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact components that rely on infrequent updates over streaming
|
|
||||||
// connections to the Kubelet server.
|
|
||||||
// Default: "4h"
|
// Default: "4h"
|
||||||
// +optional
|
// +optional
|
||||||
StreamingConnectionIdleTimeout metav1.Duration `json:"streamingConnectionIdleTimeout,omitempty"`
|
StreamingConnectionIdleTimeout metav1.Duration `json:"streamingConnectionIdleTimeout,omitempty"`
|
||||||
@ -364,12 +260,6 @@ type KubeletConfiguration struct {
|
|||||||
// kubelet posts node status to master.
|
// kubelet posts node status to master.
|
||||||
// Note: When node lease feature is not enabled, be cautious when changing the
|
// Note: When node lease feature is not enabled, be cautious when changing the
|
||||||
// constant, it must work with nodeMonitorGracePeriod in nodecontroller.
|
// constant, it must work with nodeMonitorGracePeriod in nodecontroller.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact node scalability, and also that the node controller's
|
|
||||||
// nodeMonitorGracePeriod must be set to N*NodeStatusUpdateFrequency,
|
|
||||||
// where N is the number of retries before the node controller marks
|
|
||||||
// the node unhealthy.
|
|
||||||
// Default: "10s"
|
// Default: "10s"
|
||||||
// +optional
|
// +optional
|
||||||
NodeStatusUpdateFrequency metav1.Duration `json:"nodeStatusUpdateFrequency,omitempty"`
|
NodeStatusUpdateFrequency metav1.Duration `json:"nodeStatusUpdateFrequency,omitempty"`
|
||||||
@ -390,17 +280,11 @@ type KubeletConfiguration struct {
|
|||||||
// The lease is currently renewed every 10s, per KEP-0009. In the future, the lease renewal
|
// The lease is currently renewed every 10s, per KEP-0009. In the future, the lease renewal
|
||||||
// interval may be set based on the lease duration.
|
// interval may be set based on the lease duration.
|
||||||
// The field value must be greater than 0.
|
// The field value must be greater than 0.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// decreasing the duration may reduce tolerance for issues that temporarily prevent
|
|
||||||
// the Kubelet from renewing the lease (e.g. a short-lived network issue).
|
|
||||||
// Default: 40
|
// Default: 40
|
||||||
// +optional
|
// +optional
|
||||||
NodeLeaseDurationSeconds int32 `json:"nodeLeaseDurationSeconds,omitempty"`
|
NodeLeaseDurationSeconds int32 `json:"nodeLeaseDurationSeconds,omitempty"`
|
||||||
// imageMinimumGCAge is the minimum age for an unused image before it is
|
// imageMinimumGCAge is the minimum age for an unused image before it is
|
||||||
// garbage collected. If DynamicKubeletConfig (deprecated; default off)
|
// garbage collected.
|
||||||
// is on, when dynamically updating this field, consider that it may trigger or
|
|
||||||
// delay garbage collection, and may change the image overhead on the node.
|
|
||||||
// Default: "2m"
|
// Default: "2m"
|
||||||
// +optional
|
// +optional
|
||||||
ImageMinimumGCAge metav1.Duration `json:"imageMinimumGCAge,omitempty"`
|
ImageMinimumGCAge metav1.Duration `json:"imageMinimumGCAge,omitempty"`
|
||||||
@ -409,10 +293,6 @@ type KubeletConfiguration struct {
|
|||||||
// dividing this field value by 100, so this field must be between 0 and
|
// dividing this field value by 100, so this field must be between 0 and
|
||||||
// 100, inclusive. When specified, the value must be greater than
|
// 100, inclusive. When specified, the value must be greater than
|
||||||
// imageGCLowThresholdPercent.
|
// imageGCLowThresholdPercent.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may trigger or delay garbage collection, and may change the image overhead
|
|
||||||
// on the node.
|
|
||||||
// Default: 85
|
// Default: 85
|
||||||
// +optional
|
// +optional
|
||||||
ImageGCHighThresholdPercent *int32 `json:"imageGCHighThresholdPercent,omitempty"`
|
ImageGCHighThresholdPercent *int32 `json:"imageGCHighThresholdPercent,omitempty"`
|
||||||
@ -421,24 +301,15 @@ type KubeletConfiguration struct {
|
|||||||
// collect to. The percent is calculated by dividing this field value by 100,
|
// collect to. The percent is calculated by dividing this field value by 100,
|
||||||
// so the field value must be between 0 and 100, inclusive. When specified, the
|
// so the field value must be between 0 and 100, inclusive. When specified, the
|
||||||
// value must be less than imageGCHighThresholdPercent.
|
// value must be less than imageGCHighThresholdPercent.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may trigger or delay garbage collection, and may change the image overhead
|
|
||||||
// on the node.
|
|
||||||
// Default: 80
|
// Default: 80
|
||||||
// +optional
|
// +optional
|
||||||
ImageGCLowThresholdPercent *int32 `json:"imageGCLowThresholdPercent,omitempty"`
|
ImageGCLowThresholdPercent *int32 `json:"imageGCLowThresholdPercent,omitempty"`
|
||||||
// volumeStatsAggPeriod is the frequency for calculating and caching volume
|
// volumeStatsAggPeriod is the frequency for calculating and caching volume
|
||||||
// disk usage for all pods.
|
// disk usage for all pods.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// shortening the period may carry a performance impact.
|
|
||||||
// Default: "1m"
|
// Default: "1m"
|
||||||
// +optional
|
// +optional
|
||||||
VolumeStatsAggPeriod metav1.Duration `json:"volumeStatsAggPeriod,omitempty"`
|
VolumeStatsAggPeriod metav1.Duration `json:"volumeStatsAggPeriod,omitempty"`
|
||||||
// kubeletCgroups is the absolute name of cgroups to isolate the kubelet in
|
// kubeletCgroups is the absolute name of cgroups to isolate the kubelet in
|
||||||
// Dynamic Kubelet Config (deprecated): This field should not be updated without a full node
|
|
||||||
// reboot. It is safest to keep this value the same as the local config.
|
|
||||||
// Default: ""
|
// Default: ""
|
||||||
// +optional
|
// +optional
|
||||||
KubeletCgroups string `json:"kubeletCgroups,omitempty"`
|
KubeletCgroups string `json:"kubeletCgroups,omitempty"`
|
||||||
@ -446,60 +317,42 @@ type KubeletConfiguration struct {
|
|||||||
// all non-kernel processes that are not already in a container. Empty
|
// all non-kernel processes that are not already in a container. Empty
|
||||||
// for no container. Rolling back the flag requires a reboot.
|
// for no container. Rolling back the flag requires a reboot.
|
||||||
// The cgroupRoot must be specified if this field is not empty.
|
// The cgroupRoot must be specified if this field is not empty.
|
||||||
// Dynamic Kubelet Config (deprecated): This field should not be updated without a full node
|
|
||||||
// reboot. It is safest to keep this value the same as the local config.
|
|
||||||
// Default: ""
|
// Default: ""
|
||||||
// +optional
|
// +optional
|
||||||
SystemCgroups string `json:"systemCgroups,omitempty"`
|
SystemCgroups string `json:"systemCgroups,omitempty"`
|
||||||
// cgroupRoot is the root cgroup to use for pods. This is handled by the
|
// cgroupRoot is the root cgroup to use for pods. This is handled by the
|
||||||
// container runtime on a best effort basis.
|
// container runtime on a best effort basis.
|
||||||
// Dynamic Kubelet Config (deprecated): This field should not be updated without a full node
|
|
||||||
// reboot. It is safest to keep this value the same as the local config.
|
|
||||||
// Default: ""
|
|
||||||
// +optional
|
// +optional
|
||||||
CgroupRoot string `json:"cgroupRoot,omitempty"`
|
CgroupRoot string `json:"cgroupRoot,omitempty"`
|
||||||
// cgroupsPerQOS enable QoS based CGroup hierarchy: top level CGroups for QoS classes
|
// cgroupsPerQOS enable QoS based CGroup hierarchy: top level CGroups for QoS classes
|
||||||
// and all Burstable and BestEffort Pods are brought up under their specific top level
|
// and all Burstable and BestEffort Pods are brought up under their specific top level
|
||||||
// QoS CGroup.
|
// QoS CGroup.
|
||||||
// Dynamic Kubelet Config (deprecated): This field should not be updated without a full node
|
|
||||||
// reboot. It is safest to keep this value the same as the local config.
|
|
||||||
// Default: true
|
// Default: true
|
||||||
// +optional
|
// +optional
|
||||||
CgroupsPerQOS *bool `json:"cgroupsPerQOS,omitempty"`
|
CgroupsPerQOS *bool `json:"cgroupsPerQOS,omitempty"`
|
||||||
// cgroupDriver is the driver kubelet uses to manipulate CGroups on the host (cgroupfs
|
// cgroupDriver is the driver kubelet uses to manipulate CGroups on the host (cgroupfs
|
||||||
// or systemd).
|
// or systemd).
|
||||||
// Dynamic Kubelet Config (deprecated): This field should not be updated without a full node
|
|
||||||
// reboot. It is safest to keep this value the same as the local config.
|
|
||||||
// Default: "cgroupfs"
|
// Default: "cgroupfs"
|
||||||
// +optional
|
// +optional
|
||||||
CgroupDriver string `json:"cgroupDriver,omitempty"`
|
CgroupDriver string `json:"cgroupDriver,omitempty"`
|
||||||
// cpuManagerPolicy is the name of the policy to use.
|
// cpuManagerPolicy is the name of the policy to use.
|
||||||
// Requires the CPUManager feature gate to be enabled.
|
// Requires the CPUManager feature gate to be enabled.
|
||||||
// Dynamic Kubelet Config (deprecated): This field should not be updated without a full node
|
|
||||||
// reboot. It is safest to keep this value the same as the local config.
|
|
||||||
// Default: "None"
|
// Default: "None"
|
||||||
// +optional
|
// +optional
|
||||||
CPUManagerPolicy string `json:"cpuManagerPolicy,omitempty"`
|
CPUManagerPolicy string `json:"cpuManagerPolicy,omitempty"`
|
||||||
// cpuManagerPolicyOptions is a set of key=value which allows to set extra options
|
// cpuManagerPolicyOptions is a set of key=value which allows to set extra options
|
||||||
// to fine tune the behaviour of the cpu manager policies.
|
// to fine tune the behaviour of the cpu manager policies.
|
||||||
// Requires both the "CPUManager" and "CPUManagerPolicyOptions" feature gates to be enabled.
|
// Requires both the "CPUManager" and "CPUManagerPolicyOptions" feature gates to be enabled.
|
||||||
// Dynamic Kubelet Config (beta): This field should not be updated without a full node
|
|
||||||
// reboot. It is safest to keep this value the same as the local config.
|
|
||||||
// Default: nil
|
// Default: nil
|
||||||
// +optional
|
// +optional
|
||||||
CPUManagerPolicyOptions map[string]string `json:"cpuManagerPolicyOptions,omitempty"`
|
CPUManagerPolicyOptions map[string]string `json:"cpuManagerPolicyOptions,omitempty"`
|
||||||
// cpuManagerReconcilePeriod is the reconciliation period for the CPU Manager.
|
// cpuManagerReconcilePeriod is the reconciliation period for the CPU Manager.
|
||||||
// Requires the CPUManager feature gate to be enabled.
|
// Requires the CPUManager feature gate to be enabled.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// shortening the period may carry a performance impact.
|
|
||||||
// Default: "10s"
|
// Default: "10s"
|
||||||
// +optional
|
// +optional
|
||||||
CPUManagerReconcilePeriod metav1.Duration `json:"cpuManagerReconcilePeriod,omitempty"`
|
CPUManagerReconcilePeriod metav1.Duration `json:"cpuManagerReconcilePeriod,omitempty"`
|
||||||
// memoryManagerPolicy is the name of the policy to use by memory manager.
|
// memoryManagerPolicy is the name of the policy to use by memory manager.
|
||||||
// Requires the MemoryManager feature gate to be enabled.
|
// Requires the MemoryManager feature gate to be enabled.
|
||||||
// Dynamic Kubelet Config (deprecated): This field should not be updated without a full node
|
|
||||||
// reboot. It is safest to keep this value the same as the local config.
|
|
||||||
// Default: "none"
|
// Default: "none"
|
||||||
// +optional
|
// +optional
|
||||||
MemoryManagerPolicy string `json:"memoryManagerPolicy,omitempty"`
|
MemoryManagerPolicy string `json:"memoryManagerPolicy,omitempty"`
|
||||||
@ -515,8 +368,6 @@ type KubeletConfiguration struct {
|
|||||||
// of CPU and device resources.
|
// of CPU and device resources.
|
||||||
//
|
//
|
||||||
// Policies other than "none" require the TopologyManager feature gate to be enabled.
|
// Policies other than "none" require the TopologyManager feature gate to be enabled.
|
||||||
// Dynamic Kubelet Config (deprecated): This field should not be updated without a full node
|
|
||||||
// reboot. It is safest to keep this value the same as the local config.
|
|
||||||
// Default: "none"
|
// Default: "none"
|
||||||
// +optional
|
// +optional
|
||||||
TopologyManagerPolicy string `json:"topologyManagerPolicy,omitempty"`
|
TopologyManagerPolicy string `json:"topologyManagerPolicy,omitempty"`
|
||||||
@ -535,16 +386,11 @@ type KubeletConfiguration struct {
|
|||||||
// guaranteed QoS tier.
|
// guaranteed QoS tier.
|
||||||
// Currently supported resources: "memory"
|
// Currently supported resources: "memory"
|
||||||
// Requires the QOSReserved feature gate to be enabled.
|
// Requires the QOSReserved feature gate to be enabled.
|
||||||
// Dynamic Kubelet Config (deprecated): This field should not be updated without a full node
|
|
||||||
// reboot. It is safest to keep this value the same as the local config.
|
|
||||||
// Default: nil
|
// Default: nil
|
||||||
// +optional
|
// +optional
|
||||||
QOSReserved map[string]string `json:"qosReserved,omitempty"`
|
QOSReserved map[string]string `json:"qosReserved,omitempty"`
|
||||||
// runtimeRequestTimeout is the timeout for all runtime requests except long running
|
// runtimeRequestTimeout is the timeout for all runtime requests except long running
|
||||||
// requests - pull, logs, exec and attach.
|
// requests - pull, logs, exec and attach.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may disrupt components that interact with the Kubelet server.
|
|
||||||
// Default: "2m"
|
// Default: "2m"
|
||||||
// +optional
|
// +optional
|
||||||
RuntimeRequestTimeout metav1.Duration `json:"runtimeRequestTimeout,omitempty"`
|
RuntimeRequestTimeout metav1.Duration `json:"runtimeRequestTimeout,omitempty"`
|
||||||
@ -559,43 +405,25 @@ type KubeletConfiguration struct {
|
|||||||
//
|
//
|
||||||
// Generally, one must set `--hairpin-mode=hairpin-veth to` achieve hairpin NAT,
|
// Generally, one must set `--hairpin-mode=hairpin-veth to` achieve hairpin NAT,
|
||||||
// because promiscuous-bridge assumes the existence of a container bridge named cbr0.
|
// because promiscuous-bridge assumes the existence of a container bridge named cbr0.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may require a node reboot, depending on the network plugin.
|
|
||||||
// Default: "promiscuous-bridge"
|
// Default: "promiscuous-bridge"
|
||||||
// +optional
|
// +optional
|
||||||
HairpinMode string `json:"hairpinMode,omitempty"`
|
HairpinMode string `json:"hairpinMode,omitempty"`
|
||||||
// maxPods is the maximum number of Pods that can run on this Kubelet.
|
// maxPods is the maximum number of Pods that can run on this Kubelet.
|
||||||
// The value must be a non-negative integer.
|
// The value must be a non-negative integer.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// changes may cause Pods to fail admission on Kubelet restart, and may change
|
|
||||||
// the value reported in Node.Status.Capacity[v1.ResourcePods], thus affecting
|
|
||||||
// future scheduling decisions. Increasing this value may also decrease performance,
|
|
||||||
// as more Pods can be packed into a single node.
|
|
||||||
// Default: 110
|
// Default: 110
|
||||||
// +optional
|
// +optional
|
||||||
MaxPods int32 `json:"maxPods,omitempty"`
|
MaxPods int32 `json:"maxPods,omitempty"`
|
||||||
// podCIDR is the CIDR to use for pod IP addresses, only used in standalone mode.
|
// podCIDR is the CIDR to use for pod IP addresses, only used in standalone mode.
|
||||||
// In cluster mode, this is obtained from the control plane.
|
// In cluster mode, this is obtained from the control plane.
|
||||||
// Dynamic Kubelet Config (deprecated): This field should always be set to the empty default.
|
|
||||||
// It should only set for standalone Kubelets, which cannot use Dynamic Kubelet Config.
|
|
||||||
// Default: ""
|
// Default: ""
|
||||||
// +optional
|
// +optional
|
||||||
PodCIDR string `json:"podCIDR,omitempty"`
|
PodCIDR string `json:"podCIDR,omitempty"`
|
||||||
// podPidsLimit is the maximum number of PIDs in any pod.
|
// podPidsLimit is the maximum number of PIDs in any pod.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// lowering it may prevent container processes from forking after the change.
|
|
||||||
// Default: -1
|
// Default: -1
|
||||||
// +optional
|
// +optional
|
||||||
PodPidsLimit *int64 `json:"podPidsLimit,omitempty"`
|
PodPidsLimit *int64 `json:"podPidsLimit,omitempty"`
|
||||||
// resolvConf is the resolver configuration file used as the basis
|
// resolvConf is the resolver configuration file used as the basis
|
||||||
// for the container DNS resolution configuration.
|
// for the container DNS resolution configuration.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// changes will only take effect on Pods created after the update. Draining
|
|
||||||
// the node is recommended before changing this field.
|
|
||||||
// If set to the empty string, will override the default and effectively disable DNS lookups.
|
// If set to the empty string, will override the default and effectively disable DNS lookups.
|
||||||
// Default: "/etc/resolv.conf"
|
// Default: "/etc/resolv.conf"
|
||||||
// +optional
|
// +optional
|
||||||
@ -607,62 +435,36 @@ type KubeletConfiguration struct {
|
|||||||
RunOnce bool `json:"runOnce,omitempty"`
|
RunOnce bool `json:"runOnce,omitempty"`
|
||||||
// cpuCFSQuota enables CPU CFS quota enforcement for containers that
|
// cpuCFSQuota enables CPU CFS quota enforcement for containers that
|
||||||
// specify CPU limits.
|
// specify CPU limits.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// disabling it may reduce node stability.
|
|
||||||
// Default: true
|
// Default: true
|
||||||
// +optional
|
// +optional
|
||||||
CPUCFSQuota *bool `json:"cpuCFSQuota,omitempty"`
|
CPUCFSQuota *bool `json:"cpuCFSQuota,omitempty"`
|
||||||
// cpuCFSQuotaPeriod is the CPU CFS quota period value, `cpu.cfs_period_us`.
|
// cpuCFSQuotaPeriod is the CPU CFS quota period value, `cpu.cfs_period_us`.
|
||||||
// The value must be between 1 us and 1 second, inclusive.
|
// The value must be between 1 us and 1 second, inclusive.
|
||||||
// Requires the CustomCPUCFSQuotaPeriod feature gate to be enabled.
|
// Requires the CustomCPUCFSQuotaPeriod feature gate to be enabled.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// limits set for containers will result in different cpu.cfs_quota settings. This
|
|
||||||
// will trigger container restarts on the node being reconfigured.
|
|
||||||
// Default: "100ms"
|
// Default: "100ms"
|
||||||
// +optional
|
// +optional
|
||||||
CPUCFSQuotaPeriod *metav1.Duration `json:"cpuCFSQuotaPeriod,omitempty"`
|
CPUCFSQuotaPeriod *metav1.Duration `json:"cpuCFSQuotaPeriod,omitempty"`
|
||||||
// nodeStatusMaxImages caps the number of images reported in Node.status.images.
|
// nodeStatusMaxImages caps the number of images reported in Node.status.images.
|
||||||
// The value must be greater than -2.
|
// The value must be greater than -2.
|
||||||
// Note: If -1 is specified, no cap will be applied. If 0 is specified, no image is returned.
|
// Note: If -1 is specified, no cap will be applied. If 0 is specified, no image is returned.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// different values can be reported on node status.
|
|
||||||
// Default: 50
|
// Default: 50
|
||||||
// +optional
|
// +optional
|
||||||
NodeStatusMaxImages *int32 `json:"nodeStatusMaxImages,omitempty"`
|
NodeStatusMaxImages *int32 `json:"nodeStatusMaxImages,omitempty"`
|
||||||
// maxOpenFiles is Number of files that can be opened by Kubelet process.
|
// maxOpenFiles is Number of files that can be opened by Kubelet process.
|
||||||
// The value must be a non-negative number.
|
// The value must be a non-negative number.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact the ability of the Kubelet to interact with the node's filesystem.
|
|
||||||
// Default: 1000000
|
// Default: 1000000
|
||||||
// +optional
|
// +optional
|
||||||
MaxOpenFiles int64 `json:"maxOpenFiles,omitempty"`
|
MaxOpenFiles int64 `json:"maxOpenFiles,omitempty"`
|
||||||
// contentType is contentType of requests sent to apiserver.
|
// contentType is contentType of requests sent to apiserver.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact the ability for the Kubelet to communicate with the API server.
|
|
||||||
// If the Kubelet loses contact with the API server due to a change to this field,
|
|
||||||
// the change cannot be reverted via dynamic Kubelet config.
|
|
||||||
// Default: "application/vnd.kubernetes.protobuf"
|
// Default: "application/vnd.kubernetes.protobuf"
|
||||||
// +optional
|
// +optional
|
||||||
ContentType string `json:"contentType,omitempty"`
|
ContentType string `json:"contentType,omitempty"`
|
||||||
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
|
// kubeAPIQPS is the QPS to use while talking with kubernetes apiserver.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact scalability by changing the amount of traffic the Kubelet
|
|
||||||
// sends to the API server.
|
|
||||||
// Default: 5
|
// Default: 5
|
||||||
// +optional
|
// +optional
|
||||||
KubeAPIQPS *int32 `json:"kubeAPIQPS,omitempty"`
|
KubeAPIQPS *int32 `json:"kubeAPIQPS,omitempty"`
|
||||||
// kubeAPIBurst is the burst to allow while talking with kubernetes API server.
|
// kubeAPIBurst is the burst to allow while talking with kubernetes API server.
|
||||||
// This field cannot be a negative number.
|
// This field cannot be a negative number.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact scalability by changing the amount of traffic the Kubelet
|
|
||||||
// sends to the API server.
|
|
||||||
// Default: 10
|
// Default: 10
|
||||||
// +optional
|
// +optional
|
||||||
KubeAPIBurst int32 `json:"kubeAPIBurst,omitempty"`
|
KubeAPIBurst int32 `json:"kubeAPIBurst,omitempty"`
|
||||||
@ -670,18 +472,12 @@ type KubeletConfiguration struct {
|
|||||||
// at a time. We recommend *not* changing the default value on nodes that
|
// at a time. We recommend *not* changing the default value on nodes that
|
||||||
// run docker daemon with version < 1.9 or an Aufs storage backend.
|
// run docker daemon with version < 1.9 or an Aufs storage backend.
|
||||||
// Issue #10959 has more details.
|
// Issue #10959 has more details.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact the performance of image pulls.
|
|
||||||
// Default: true
|
// Default: true
|
||||||
// +optional
|
// +optional
|
||||||
SerializeImagePulls *bool `json:"serializeImagePulls,omitempty"`
|
SerializeImagePulls *bool `json:"serializeImagePulls,omitempty"`
|
||||||
// evictionHard is a map of signal names to quantities that defines hard eviction
|
// evictionHard is a map of signal names to quantities that defines hard eviction
|
||||||
// thresholds. For example: `{"memory.available": "300Mi"}`.
|
// thresholds. For example: `{"memory.available": "300Mi"}`.
|
||||||
// To explicitly disable, pass a 0% or 100% threshold on an arbitrary resource.
|
// To explicitly disable, pass a 0% or 100% threshold on an arbitrary resource.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may trigger or delay Pod evictions.
|
|
||||||
// Default:
|
// Default:
|
||||||
// memory.available: "100Mi"
|
// memory.available: "100Mi"
|
||||||
// nodefs.available: "10%"
|
// nodefs.available: "10%"
|
||||||
@ -691,26 +487,16 @@ type KubeletConfiguration struct {
|
|||||||
EvictionHard map[string]string `json:"evictionHard,omitempty"`
|
EvictionHard map[string]string `json:"evictionHard,omitempty"`
|
||||||
// evictionSoft is a map of signal names to quantities that defines soft eviction thresholds.
|
// evictionSoft is a map of signal names to quantities that defines soft eviction thresholds.
|
||||||
// For example: `{"memory.available": "300Mi"}`.
|
// For example: `{"memory.available": "300Mi"}`.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may trigger or delay Pod evictions, and may change the allocatable reported
|
|
||||||
// by the node.
|
|
||||||
// Default: nil
|
// Default: nil
|
||||||
// +optional
|
// +optional
|
||||||
EvictionSoft map[string]string `json:"evictionSoft,omitempty"`
|
EvictionSoft map[string]string `json:"evictionSoft,omitempty"`
|
||||||
// evictionSoftGracePeriod is a map of signal names to quantities that defines grace
|
// evictionSoftGracePeriod is a map of signal names to quantities that defines grace
|
||||||
// periods for each soft eviction signal. For example: `{"memory.available": "30s"}`.
|
// periods for each soft eviction signal. For example: `{"memory.available": "30s"}`.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may trigger or delay Pod evictions.
|
|
||||||
// Default: nil
|
// Default: nil
|
||||||
// +optional
|
// +optional
|
||||||
EvictionSoftGracePeriod map[string]string `json:"evictionSoftGracePeriod,omitempty"`
|
EvictionSoftGracePeriod map[string]string `json:"evictionSoftGracePeriod,omitempty"`
|
||||||
// evictionPressureTransitionPeriod is the duration for which the kubelet has to wait
|
// evictionPressureTransitionPeriod is the duration for which the kubelet has to wait
|
||||||
// before transitioning out of an eviction pressure condition.
|
// before transitioning out of an eviction pressure condition.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// lowering it may decrease the stability of the node when the node is overcommitted.
|
|
||||||
// Default: "5m"
|
// Default: "5m"
|
||||||
// +optional
|
// +optional
|
||||||
EvictionPressureTransitionPeriod metav1.Duration `json:"evictionPressureTransitionPeriod,omitempty"`
|
EvictionPressureTransitionPeriod metav1.Duration `json:"evictionPressureTransitionPeriod,omitempty"`
|
||||||
@ -720,10 +506,6 @@ type KubeletConfiguration struct {
|
|||||||
// Note: Due to issue #64530, the behavior has a bug where this value currently just
|
// Note: Due to issue #64530, the behavior has a bug where this value currently just
|
||||||
// overrides the grace period during soft eviction, which can increase the grace
|
// overrides the grace period during soft eviction, which can increase the grace
|
||||||
// period from what is set on the Pod. This bug will be fixed in a future release.
|
// period from what is set on the Pod. This bug will be fixed in a future release.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// lowering it decreases the amount of time Pods will have to gracefully clean
|
|
||||||
// up before being killed during a soft eviction.
|
|
||||||
// Default: 0
|
// Default: 0
|
||||||
// +optional
|
// +optional
|
||||||
EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod,omitempty"`
|
EvictionMaxPodGracePeriod int32 `json:"evictionMaxPodGracePeriod,omitempty"`
|
||||||
@ -731,44 +513,24 @@ type KubeletConfiguration struct {
|
|||||||
// which describe the minimum amount of a given resource the kubelet will reclaim when
|
// which describe the minimum amount of a given resource the kubelet will reclaim when
|
||||||
// performing a pod eviction while that resource is under pressure.
|
// performing a pod eviction while that resource is under pressure.
|
||||||
// For example: `{"imagefs.available": "2Gi"}`.
|
// For example: `{"imagefs.available": "2Gi"}`.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may change how well eviction can manage resource pressure.
|
|
||||||
// Default: nil
|
// Default: nil
|
||||||
// +optional
|
// +optional
|
||||||
EvictionMinimumReclaim map[string]string `json:"evictionMinimumReclaim,omitempty"`
|
EvictionMinimumReclaim map[string]string `json:"evictionMinimumReclaim,omitempty"`
|
||||||
// podsPerCore is the maximum number of pods per core. Cannot exceed maxPods.
|
// podsPerCore is the maximum number of pods per core. Cannot exceed maxPods.
|
||||||
// The value must be a non-negative integer.
|
// The value must be a non-negative integer.
|
||||||
// If 0, there is no limit on the number of Pods.
|
// If 0, there is no limit on the number of Pods.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// changes may cause Pods to fail admission on Kubelet restart, and may change
|
|
||||||
// the value reported in `Node.status.capacity.pods`, thus affecting
|
|
||||||
// future scheduling decisions. Increasing this value may also decrease performance,
|
|
||||||
// as more Pods can be packed into a single node.
|
|
||||||
// Default: 0
|
// Default: 0
|
||||||
// +optional
|
// +optional
|
||||||
PodsPerCore int32 `json:"podsPerCore,omitempty"`
|
PodsPerCore int32 `json:"podsPerCore,omitempty"`
|
||||||
// enableControllerAttachDetach enables the Attach/Detach controller to
|
// enableControllerAttachDetach enables the Attach/Detach controller to
|
||||||
// manage attachment/detachment of volumes scheduled to this node, and
|
// manage attachment/detachment of volumes scheduled to this node, and
|
||||||
// disables kubelet from executing any attach/detach operations.
|
// disables kubelet from executing any attach/detach operations.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// changing which component is responsible for volume management on a live node
|
|
||||||
// may result in volumes refusing to detach if the node is not drained prior to
|
|
||||||
// the update, and if Pods are scheduled to the node before the
|
|
||||||
// volumes.kubernetes.io/controller-managed-attach-detach annotation is updated by the
|
|
||||||
// Kubelet. In general, it is safest to leave this value set the same as local config.
|
|
||||||
// Default: true
|
// Default: true
|
||||||
// +optional
|
// +optional
|
||||||
EnableControllerAttachDetach *bool `json:"enableControllerAttachDetach,omitempty"`
|
EnableControllerAttachDetach *bool `json:"enableControllerAttachDetach,omitempty"`
|
||||||
// protectKernelDefaults, if true, causes the Kubelet to error if kernel
|
// protectKernelDefaults, if true, causes the Kubelet to error if kernel
|
||||||
// flags are not as it expects. Otherwise the Kubelet will attempt to modify
|
// flags are not as it expects. Otherwise the Kubelet will attempt to modify
|
||||||
// kernel flags to match its expectation.
|
// kernel flags to match its expectation.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// enabling it may cause the Kubelet to crash-loop if the Kernel is not configured as
|
|
||||||
// Kubelet expects.
|
|
||||||
// Default: false
|
// Default: false
|
||||||
// +optional
|
// +optional
|
||||||
ProtectKernelDefaults bool `json:"protectKernelDefaults,omitempty"`
|
ProtectKernelDefaults bool `json:"protectKernelDefaults,omitempty"`
|
||||||
@ -776,9 +538,6 @@ type KubeletConfiguration struct {
|
|||||||
// are present on host.
|
// are present on host.
|
||||||
// These rules will serve as utility rules for various components, e.g. kube-proxy.
|
// These rules will serve as utility rules for various components, e.g. kube-proxy.
|
||||||
// The rules will be created based on iptablesMasqueradeBit and iptablesDropBit.
|
// The rules will be created based on iptablesMasqueradeBit and iptablesDropBit.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// disabling it will prevent the Kubelet from healing locally misconfigured iptables rules.
|
|
||||||
// Default: true
|
// Default: true
|
||||||
// +optional
|
// +optional
|
||||||
MakeIPTablesUtilChains *bool `json:"makeIPTablesUtilChains,omitempty"`
|
MakeIPTablesUtilChains *bool `json:"makeIPTablesUtilChains,omitempty"`
|
||||||
@ -786,38 +545,21 @@ type KubeletConfiguration struct {
|
|||||||
// Values must be within the range [0, 31]. Must be different from other mark bits.
|
// Values must be within the range [0, 31]. Must be different from other mark bits.
|
||||||
// Warning: Please match the value of the corresponding parameter in kube-proxy.
|
// Warning: Please match the value of the corresponding parameter in kube-proxy.
|
||||||
// TODO: clean up IPTablesMasqueradeBit in kube-proxy.
|
// TODO: clean up IPTablesMasqueradeBit in kube-proxy.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it needs to be coordinated with other components, like kube-proxy, and the update
|
|
||||||
// will only be effective if MakeIPTablesUtilChains is enabled.
|
|
||||||
// Default: 14
|
// Default: 14
|
||||||
// +optional
|
// +optional
|
||||||
IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit,omitempty"`
|
IPTablesMasqueradeBit *int32 `json:"iptablesMasqueradeBit,omitempty"`
|
||||||
// iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets.
|
// iptablesDropBit is the bit of the iptables fwmark space to mark for dropping packets.
|
||||||
// Values must be within the range [0, 31]. Must be different from other mark bits.
|
// Values must be within the range [0, 31]. Must be different from other mark bits.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it needs to be coordinated with other components, like kube-proxy, and the update
|
|
||||||
// will only be effective if MakeIPTablesUtilChains is enabled.
|
|
||||||
// Default: 15
|
// Default: 15
|
||||||
// +optional
|
// +optional
|
||||||
IPTablesDropBit *int32 `json:"iptablesDropBit,omitempty"`
|
IPTablesDropBit *int32 `json:"iptablesDropBit,omitempty"`
|
||||||
// featureGates is a map of feature names to bools that enable or disable experimental
|
// featureGates is a map of feature names to bools that enable or disable experimental
|
||||||
// features. This field modifies piecemeal the built-in default values from
|
// features. This field modifies piecemeal the built-in default values from
|
||||||
// "k8s.io/kubernetes/pkg/features/kube_features.go".
|
// "k8s.io/kubernetes/pkg/features/kube_features.go".
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider the
|
|
||||||
// documentation for the features you are enabling or disabling. While we
|
|
||||||
// encourage feature developers to make it possible to dynamically enable
|
|
||||||
// and disable features, some changes may require node reboots, and some
|
|
||||||
// features may require careful coordination to retroactively disable.
|
|
||||||
// Default: nil
|
// Default: nil
|
||||||
// +optional
|
// +optional
|
||||||
FeatureGates map[string]bool `json:"featureGates,omitempty"`
|
FeatureGates map[string]bool `json:"featureGates,omitempty"`
|
||||||
// failSwapOn tells the Kubelet to fail to start if swap is enabled on the node.
|
// failSwapOn tells the Kubelet to fail to start if swap is enabled on the node.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// setting it to true will cause the Kubelet to crash-loop if swap is enabled.
|
|
||||||
// Default: true
|
// Default: true
|
||||||
// +optional
|
// +optional
|
||||||
FailSwapOn *bool `json:"failSwapOn,omitempty"`
|
FailSwapOn *bool `json:"failSwapOn,omitempty"`
|
||||||
@ -827,17 +569,11 @@ type KubeletConfiguration struct {
|
|||||||
MemorySwap MemorySwapConfiguration `json:"memorySwap,omitempty"`
|
MemorySwap MemorySwapConfiguration `json:"memorySwap,omitempty"`
|
||||||
// containerLogMaxSize is a quantity defining the maximum size of the container log
|
// containerLogMaxSize is a quantity defining the maximum size of the container log
|
||||||
// file before it is rotated. For example: "5Mi" or "256Ki".
|
// file before it is rotated. For example: "5Mi" or "256Ki".
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may trigger log rotation.
|
|
||||||
// Default: "10Mi"
|
// Default: "10Mi"
|
||||||
// +optional
|
// +optional
|
||||||
ContainerLogMaxSize string `json:"containerLogMaxSize,omitempty"`
|
ContainerLogMaxSize string `json:"containerLogMaxSize,omitempty"`
|
||||||
// containerLogMaxFiles specifies the maximum number of container log files that can
|
// containerLogMaxFiles specifies the maximum number of container log files that can
|
||||||
// be present for a container.
|
// be present for a container.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// lowering it may cause log files to be deleted.
|
|
||||||
// Default: 5
|
// Default: 5
|
||||||
// +optional
|
// +optional
|
||||||
ContainerLogMaxFiles *int32 `json:"containerLogMaxFiles,omitempty"`
|
ContainerLogMaxFiles *int32 `json:"containerLogMaxFiles,omitempty"`
|
||||||
@ -858,11 +594,6 @@ type KubeletConfiguration struct {
|
|||||||
// pairs that describe resources reserved for non-kubernetes components.
|
// pairs that describe resources reserved for non-kubernetes components.
|
||||||
// Currently only cpu and memory are supported.
|
// Currently only cpu and memory are supported.
|
||||||
// See http://kubernetes.io/docs/user-guide/compute-resources for more detail.
|
// See http://kubernetes.io/docs/user-guide/compute-resources for more detail.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may not be possible to increase the reserved resources, because this
|
|
||||||
// requires resizing cgroups. Always look for a NodeAllocatableEnforced event
|
|
||||||
// after updating this field to ensure that the update was successful.
|
|
||||||
// Default: nil
|
// Default: nil
|
||||||
// +optional
|
// +optional
|
||||||
SystemReserved map[string]string `json:"systemReserved,omitempty"`
|
SystemReserved map[string]string `json:"systemReserved,omitempty"`
|
||||||
@ -871,11 +602,6 @@ type KubeletConfiguration struct {
|
|||||||
// Currently cpu, memory and local storage for root file system are supported.
|
// Currently cpu, memory and local storage for root file system are supported.
|
||||||
// See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
// See https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
|
||||||
// for more details.
|
// for more details.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may not be possible to increase the reserved resources, because this
|
|
||||||
// requires resizing cgroups. Always look for a NodeAllocatableEnforced event
|
|
||||||
// after updating this field to ensure that the update was successful.
|
|
||||||
// Default: nil
|
// Default: nil
|
||||||
// +optional
|
// +optional
|
||||||
KubeReserved map[string]string `json:"kubeReserved,omitempty"`
|
KubeReserved map[string]string `json:"kubeReserved,omitempty"`
|
||||||
@ -898,8 +624,6 @@ type KubeletConfiguration struct {
|
|||||||
// to enforce `systemReserved` compute resource reservation for OS system daemons.
|
// to enforce `systemReserved` compute resource reservation for OS system daemons.
|
||||||
// Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md)
|
// Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md)
|
||||||
// doc for more information.
|
// doc for more information.
|
||||||
// Dynamic Kubelet Config (deprecated): This field should not be updated without a full node
|
|
||||||
// reboot. It is safest to keep this value the same as the local config.
|
|
||||||
// Default: ""
|
// Default: ""
|
||||||
// +optional
|
// +optional
|
||||||
SystemReservedCgroup string `json:"systemReservedCgroup,omitempty"`
|
SystemReservedCgroup string `json:"systemReservedCgroup,omitempty"`
|
||||||
@ -907,8 +631,6 @@ type KubeletConfiguration struct {
|
|||||||
// to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons.
|
// to enforce `KubeReserved` compute resource reservation for Kubernetes node system daemons.
|
||||||
// Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md)
|
// Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md)
|
||||||
// doc for more information.
|
// doc for more information.
|
||||||
// Dynamic Kubelet Config (deprecated): This field should not be updated without a full node
|
|
||||||
// reboot. It is safest to keep this value the same as the local config.
|
|
||||||
// Default: ""
|
// Default: ""
|
||||||
// +optional
|
// +optional
|
||||||
KubeReservedCgroup string `json:"kubeReservedCgroup,omitempty"`
|
KubeReservedCgroup string `json:"kubeReservedCgroup,omitempty"`
|
||||||
@ -921,13 +643,6 @@ type KubeletConfiguration struct {
|
|||||||
// This field is supported only when `cgroupsPerQOS` is set to true.
|
// This field is supported only when `cgroupsPerQOS` is set to true.
|
||||||
// Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md)
|
// Refer to [Node Allocatable](https://git.k8s.io/community/contributors/design-proposals/node/node-allocatable.md)
|
||||||
// for more information.
|
// for more information.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// removing enforcements may reduce the stability of the node. Alternatively, adding
|
|
||||||
// enforcements may reduce the stability of components which were using more than
|
|
||||||
// the reserved amount of resources; for example, enforcing kube-reserved may cause
|
|
||||||
// Kubelets to OOM if it uses more than the reserved resources, and enforcing system-reserved
|
|
||||||
// may cause system daemons to OOM if they use more than the reserved resources.
|
|
||||||
// Default: ["pods"]
|
// Default: ["pods"]
|
||||||
// +optional
|
// +optional
|
||||||
EnforceNodeAllocatable []string `json:"enforceNodeAllocatable,omitempty"`
|
EnforceNodeAllocatable []string `json:"enforceNodeAllocatable,omitempty"`
|
||||||
@ -939,26 +654,17 @@ type KubeletConfiguration struct {
|
|||||||
AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"`
|
AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty"`
|
||||||
// volumePluginDir is the full path of the directory in which to search
|
// volumePluginDir is the full path of the directory in which to search
|
||||||
// for additional third party volume plugins.
|
// for additional third party volume plugins.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that changing
|
|
||||||
// the volumePluginDir may disrupt workloads relying on third party volume plugins.
|
|
||||||
// Default: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
|
// Default: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/"
|
||||||
// +optional
|
// +optional
|
||||||
VolumePluginDir string `json:"volumePluginDir,omitempty"`
|
VolumePluginDir string `json:"volumePluginDir,omitempty"`
|
||||||
// providerID, if set, sets the unique ID of the instance that an external
|
// providerID, if set, sets the unique ID of the instance that an external
|
||||||
// provider (i.e. cloudprovider) can use to identify a specific node.
|
// provider (i.e. cloudprovider) can use to identify a specific node.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact the ability of the Kubelet to interact with cloud providers.
|
|
||||||
// Default: ""
|
// Default: ""
|
||||||
// +optional
|
// +optional
|
||||||
ProviderID string `json:"providerID,omitempty"`
|
ProviderID string `json:"providerID,omitempty"`
|
||||||
// kernelMemcgNotification, if set, instructs the the kubelet to integrate with the
|
// kernelMemcgNotification, if set, instructs the the kubelet to integrate with the
|
||||||
// kernel memcg notification for determining if memory eviction thresholds are
|
// kernel memcg notification for determining if memory eviction thresholds are
|
||||||
// exceeded rather than polling.
|
// exceeded rather than polling.
|
||||||
// If DynamicKubeletConfig (deprecated; default off) is on, when
|
|
||||||
// dynamically updating this field, consider that
|
|
||||||
// it may impact the way Kubelet interacts with the kernel.
|
|
||||||
// Default: false
|
// Default: false
|
||||||
// +optional
|
// +optional
|
||||||
KernelMemcgNotification bool `json:"kernelMemcgNotification,omitempty"`
|
KernelMemcgNotification bool `json:"kernelMemcgNotification,omitempty"`
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -36,7 +36,6 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/cmd/kubelet/app/options"
|
"k8s.io/kubernetes/cmd/kubelet/app/options"
|
||||||
"k8s.io/kubernetes/pkg/cluster/ports"
|
"k8s.io/kubernetes/pkg/cluster/ports"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles"
|
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles"
|
||||||
kubeletconfigcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
|
kubeletconfigcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
|
||||||
@ -271,15 +270,6 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
|||||||
kc.FeatureGates = framework.TestContext.FeatureGates
|
kc.FeatureGates = framework.TestContext.FeatureGates
|
||||||
}
|
}
|
||||||
|
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
|
||||||
// Enable dynamic config if the feature gate is enabled
|
|
||||||
dynamicConfigDir, err := getDynamicConfigDir()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
cmdArgs = append(cmdArgs, "--dynamic-config-dir", dynamicConfigDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keep hostname override for convenience.
|
// Keep hostname override for convenience.
|
||||||
if framework.TestContext.NodeName != "" { // If node name is specified, set hostname override.
|
if framework.TestContext.NodeName != "" { // If node name is specified, set hostname override.
|
||||||
cmdArgs = append(cmdArgs, "--hostname-override", framework.TestContext.NodeName)
|
cmdArgs = append(cmdArgs, "--hostname-override", framework.TestContext.NodeName)
|
||||||
@ -423,15 +413,6 @@ func createKubeconfigCWD() (string, error) {
|
|||||||
return kubeconfigPath, nil
|
return kubeconfigPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// getDynamicConfigDir returns the directory for dynamic Kubelet configuration
|
|
||||||
func getDynamicConfigDir() (string, error) {
|
|
||||||
cwd, err := os.Getwd()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return filepath.Join(cwd, "dynamic-kubelet-config"), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// adjustArgsForSystemd escape special characters in kubelet arguments for systemd. Systemd
|
// adjustArgsForSystemd escape special characters in kubelet arguments for systemd. Systemd
|
||||||
// may try to do auto expansion without escaping.
|
// may try to do auto expansion without escaping.
|
||||||
func adjustArgsForSystemd(args []string) {
|
func adjustArgsForSystemd(args []string) {
|
||||||
|
@ -38,16 +38,13 @@ import (
|
|||||||
"k8s.io/component-base/featuregate"
|
"k8s.io/component-base/featuregate"
|
||||||
internalapi "k8s.io/cri-api/pkg/apis"
|
internalapi "k8s.io/cri-api/pkg/apis"
|
||||||
"k8s.io/klog/v2"
|
"k8s.io/klog/v2"
|
||||||
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
|
|
||||||
kubeletpodresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
kubeletpodresourcesv1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||||
kubeletpodresourcesv1alpha1 "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
kubeletpodresourcesv1alpha1 "k8s.io/kubelet/pkg/apis/podresources/v1alpha1"
|
||||||
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
stats "k8s.io/kubelet/pkg/apis/stats/v1alpha1"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
|
"k8s.io/kubernetes/pkg/kubelet/apis/podresources"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/cri/remote"
|
"k8s.io/kubernetes/pkg/kubelet/cri/remote"
|
||||||
kubeletconfigcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
|
|
||||||
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||||
|
|
||||||
@ -223,57 +220,6 @@ func deleteStateFile(stateFileName string) {
|
|||||||
framework.ExpectNoError(err, "failed to delete the state file")
|
framework.ExpectNoError(err, "failed to delete the state file")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns true if kubeletConfig is enabled, false otherwise or if we cannot determine if it is.
|
|
||||||
func isKubeletConfigEnabled(f *framework.Framework) (bool, error) {
|
|
||||||
cfgz, err := getCurrentKubeletConfig()
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("could not determine whether 'DynamicKubeletConfig' feature is enabled, err: %v", err)
|
|
||||||
}
|
|
||||||
v, ok := cfgz.FeatureGates[string(features.DynamicKubeletConfig)]
|
|
||||||
if !ok {
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// sets the current node's configSource, this should only be called from Serial tests
|
|
||||||
func setNodeConfigSource(f *framework.Framework, source *v1.NodeConfigSource) error {
|
|
||||||
// since this is a serial test, we just get the node, change the source, and then update it
|
|
||||||
// this prevents any issues with the patch API from affecting the test results
|
|
||||||
nodeclient := f.ClientSet.CoreV1().Nodes()
|
|
||||||
|
|
||||||
// get the node
|
|
||||||
node, err := nodeclient.Get(context.TODO(), framework.TestContext.NodeName, metav1.GetOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// set new source
|
|
||||||
node.Spec.ConfigSource = source
|
|
||||||
|
|
||||||
// update to the new source
|
|
||||||
_, err = nodeclient.Update(context.TODO(), node, metav1.UpdateOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// constructs a ConfigMap, populating one of its keys with the KubeletConfiguration. Always uses GenerateName to generate a suffix.
|
|
||||||
func newKubeletConfigMap(name string, internalKC *kubeletconfig.KubeletConfiguration) *v1.ConfigMap {
|
|
||||||
data, err := kubeletconfigcodec.EncodeKubeletConfig(internalKC, kubeletconfigv1beta1.SchemeGroupVersion)
|
|
||||||
framework.ExpectNoError(err)
|
|
||||||
|
|
||||||
cmap := &v1.ConfigMap{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{GenerateName: name + "-"},
|
|
||||||
Data: map[string]string{
|
|
||||||
"kubelet": string(data),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return cmap
|
|
||||||
}
|
|
||||||
|
|
||||||
// listNamespaceEvents lists the events in the given namespace.
|
// listNamespaceEvents lists the events in the given namespace.
|
||||||
func listNamespaceEvents(c clientset.Interface, ns string) error {
|
func listNamespaceEvents(c clientset.Interface, ns string) error {
|
||||||
ls, err := c.CoreV1().Events(ns).List(context.TODO(), metav1.ListOptions{})
|
ls, err := c.CoreV1().Events(ns).List(context.TODO(), metav1.ListOptions{})
|
||||||
@ -321,24 +267,6 @@ func logKubeletLatencyMetrics(metricNames ...string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// returns config related metrics from the local kubelet, filtered to the filterMetricNames passed in
|
|
||||||
func getKubeletMetrics(filterMetricNames sets.String) (e2emetrics.KubeletMetrics, error) {
|
|
||||||
// grab Kubelet metrics
|
|
||||||
ms, err := e2emetrics.GrabKubeletMetricsWithoutProxy(framework.TestContext.NodeName+":10255", "/metrics")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
filtered := e2emetrics.NewKubeletMetrics()
|
|
||||||
for name := range ms {
|
|
||||||
if !filterMetricNames.Has(name) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
filtered[name] = ms[name]
|
|
||||||
}
|
|
||||||
return filtered, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// runCommand runs the cmd and returns the combined stdout and stderr, or an
|
// runCommand runs the cmd and returns the combined stdout and stderr, or an
|
||||||
// error if the command failed.
|
// error if the command failed.
|
||||||
func runCommand(cmd ...string) (string, error) {
|
func runCommand(cmd ...string) (string, error) {
|
||||||
|
@ -17,18 +17,13 @@ limitations under the License.
|
|||||||
package metrics
|
package metrics
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/component-base/metrics"
|
"k8s.io/component-base/metrics"
|
||||||
"k8s.io/component-base/metrics/legacyregistry"
|
"k8s.io/component-base/metrics/legacyregistry"
|
||||||
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
||||||
"k8s.io/klog/v2"
|
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// This const block defines the metric names for the kubelet metrics.
|
// This const block defines the metric names for the kubelet metrics.
|
||||||
@ -69,17 +64,6 @@ const (
|
|||||||
PodResourcesEndpointErrorsListKey = "pod_resources_endpoint_errors_list"
|
PodResourcesEndpointErrorsListKey = "pod_resources_endpoint_errors_list"
|
||||||
PodResourcesEndpointErrorsGetAllocatableKey = "pod_resources_endpoint_errors_get_allocatable"
|
PodResourcesEndpointErrorsGetAllocatableKey = "pod_resources_endpoint_errors_get_allocatable"
|
||||||
|
|
||||||
// Metric keys for node config
|
|
||||||
AssignedConfigKey = "node_config_assigned"
|
|
||||||
ActiveConfigKey = "node_config_active"
|
|
||||||
LastKnownGoodConfigKey = "node_config_last_known_good"
|
|
||||||
ConfigErrorKey = "node_config_error"
|
|
||||||
ConfigSourceLabelKey = "node_config_source"
|
|
||||||
ConfigSourceLabelValueLocal = "local"
|
|
||||||
ConfigUIDLabelKey = "node_config_uid"
|
|
||||||
ConfigResourceVersionLabelKey = "node_config_resource_version"
|
|
||||||
KubeletConfigKeyLabelKey = "node_config_kubelet_key"
|
|
||||||
|
|
||||||
// Metrics keys for RuntimeClass
|
// Metrics keys for RuntimeClass
|
||||||
RunPodSandboxDurationKey = "run_podsandbox_duration_seconds"
|
RunPodSandboxDurationKey = "run_podsandbox_duration_seconds"
|
||||||
RunPodSandboxErrorsKey = "run_podsandbox_errors_total"
|
RunPodSandboxErrorsKey = "run_podsandbox_errors_total"
|
||||||
@ -345,48 +329,6 @@ var (
|
|||||||
[]string{"server_api_version"},
|
[]string{"server_api_version"},
|
||||||
)
|
)
|
||||||
|
|
||||||
// Metrics for node config
|
|
||||||
|
|
||||||
// AssignedConfig is a Gauge that is set 1 if the Kubelet has a NodeConfig assigned.
|
|
||||||
AssignedConfig = metrics.NewGaugeVec(
|
|
||||||
&metrics.GaugeOpts{
|
|
||||||
Subsystem: KubeletSubsystem,
|
|
||||||
Name: AssignedConfigKey,
|
|
||||||
Help: "The node's understanding of intended config. The count is always 1.",
|
|
||||||
StabilityLevel: metrics.ALPHA,
|
|
||||||
},
|
|
||||||
[]string{ConfigSourceLabelKey, ConfigUIDLabelKey, ConfigResourceVersionLabelKey, KubeletConfigKeyLabelKey},
|
|
||||||
)
|
|
||||||
// ActiveConfig is a Gauge that is set to 1 if the Kubelet has an active NodeConfig.
|
|
||||||
ActiveConfig = metrics.NewGaugeVec(
|
|
||||||
&metrics.GaugeOpts{
|
|
||||||
Subsystem: KubeletSubsystem,
|
|
||||||
Name: ActiveConfigKey,
|
|
||||||
Help: "The config source the node is actively using. The count is always 1.",
|
|
||||||
StabilityLevel: metrics.ALPHA,
|
|
||||||
},
|
|
||||||
[]string{ConfigSourceLabelKey, ConfigUIDLabelKey, ConfigResourceVersionLabelKey, KubeletConfigKeyLabelKey},
|
|
||||||
)
|
|
||||||
// LastKnownGoodConfig is a Gauge that is set to 1 if the Kubelet has a NodeConfig it can fall back to if there
|
|
||||||
// are certain errors.
|
|
||||||
LastKnownGoodConfig = metrics.NewGaugeVec(
|
|
||||||
&metrics.GaugeOpts{
|
|
||||||
Subsystem: KubeletSubsystem,
|
|
||||||
Name: LastKnownGoodConfigKey,
|
|
||||||
Help: "The config source the node will fall back to when it encounters certain errors. The count is always 1.",
|
|
||||||
StabilityLevel: metrics.ALPHA,
|
|
||||||
},
|
|
||||||
[]string{ConfigSourceLabelKey, ConfigUIDLabelKey, ConfigResourceVersionLabelKey, KubeletConfigKeyLabelKey},
|
|
||||||
)
|
|
||||||
// ConfigError is a Gauge that is set to 1 if the node is experiencing a configuration-related error.
|
|
||||||
ConfigError = metrics.NewGauge(
|
|
||||||
&metrics.GaugeOpts{
|
|
||||||
Subsystem: KubeletSubsystem,
|
|
||||||
Name: ConfigErrorKey,
|
|
||||||
Help: "This metric is true (1) if the node is experiencing a configuration-related error, false (0) otherwise.",
|
|
||||||
StabilityLevel: metrics.ALPHA,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
// RunPodSandboxDuration is a Histogram that tracks the duration (in seconds) it takes to run Pod Sandbox operations.
|
// RunPodSandboxDuration is a Histogram that tracks the duration (in seconds) it takes to run Pod Sandbox operations.
|
||||||
// Broken down by RuntimeClass.Handler.
|
// Broken down by RuntimeClass.Handler.
|
||||||
RunPodSandboxDuration = metrics.NewHistogramVec(
|
RunPodSandboxDuration = metrics.NewHistogramVec(
|
||||||
@ -462,12 +404,6 @@ func Register(collectors ...metrics.StableCollector) {
|
|||||||
legacyregistry.MustRegister(RunningPodCount)
|
legacyregistry.MustRegister(RunningPodCount)
|
||||||
legacyregistry.MustRegister(RunPodSandboxDuration)
|
legacyregistry.MustRegister(RunPodSandboxDuration)
|
||||||
legacyregistry.MustRegister(RunPodSandboxErrors)
|
legacyregistry.MustRegister(RunPodSandboxErrors)
|
||||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) {
|
|
||||||
legacyregistry.MustRegister(AssignedConfig)
|
|
||||||
legacyregistry.MustRegister(ActiveConfig)
|
|
||||||
legacyregistry.MustRegister(LastKnownGoodConfig)
|
|
||||||
legacyregistry.MustRegister(ConfigError)
|
|
||||||
}
|
|
||||||
for _, collector := range collectors {
|
for _, collector := range collectors {
|
||||||
legacyregistry.CustomMustRegister(collector)
|
legacyregistry.CustomMustRegister(collector)
|
||||||
}
|
}
|
||||||
@ -484,104 +420,6 @@ func SinceInSeconds(start time.Time) float64 {
|
|||||||
return time.Since(start).Seconds()
|
return time.Since(start).Seconds()
|
||||||
}
|
}
|
||||||
|
|
||||||
const configMapAPIPathFmt = "/api/v1/namespaces/%s/configmaps/%s"
|
|
||||||
|
|
||||||
func configLabels(source *corev1.NodeConfigSource) (map[string]string, error) {
|
|
||||||
if source == nil {
|
|
||||||
return map[string]string{
|
|
||||||
// prometheus requires all of the labels that can be set on the metric
|
|
||||||
ConfigSourceLabelKey: "local",
|
|
||||||
ConfigUIDLabelKey: "",
|
|
||||||
ConfigResourceVersionLabelKey: "",
|
|
||||||
KubeletConfigKeyLabelKey: "",
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
if source.ConfigMap != nil {
|
|
||||||
return map[string]string{
|
|
||||||
ConfigSourceLabelKey: fmt.Sprintf(configMapAPIPathFmt, source.ConfigMap.Namespace, source.ConfigMap.Name),
|
|
||||||
ConfigUIDLabelKey: string(source.ConfigMap.UID),
|
|
||||||
ConfigResourceVersionLabelKey: source.ConfigMap.ResourceVersion,
|
|
||||||
KubeletConfigKeyLabelKey: source.ConfigMap.KubeletConfigKey,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("unrecognized config source type, all source subfields were nil")
|
|
||||||
}
|
|
||||||
|
|
||||||
// track labels across metric updates, so we can delete old label sets and prevent leaks
|
|
||||||
var assignedConfigLabels map[string]string
|
|
||||||
|
|
||||||
// SetAssignedConfig tracks labels according to the assigned NodeConfig. It also tracks labels
|
|
||||||
// across metric updates so old labels can be safely deleted.
|
|
||||||
func SetAssignedConfig(source *corev1.NodeConfigSource) error {
|
|
||||||
// compute the timeseries labels from the source
|
|
||||||
labels, err := configLabels(source)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// clean up the old timeseries (WithLabelValues creates a new one for each distinct label set)
|
|
||||||
if !AssignedConfig.Delete(assignedConfigLabels) {
|
|
||||||
klog.InfoS("Failed to delete metric for labels. This may result in ambiguity from multiple metrics concurrently indicating different assigned configs.", "labels", assignedConfigLabels)
|
|
||||||
}
|
|
||||||
// record the new timeseries
|
|
||||||
assignedConfigLabels = labels
|
|
||||||
// expose the new timeseries with a constant count of 1
|
|
||||||
AssignedConfig.With(assignedConfigLabels).Set(1)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// track labels across metric updates, so we can delete old label sets and prevent leaks
|
|
||||||
var activeConfigLabels map[string]string
|
|
||||||
|
|
||||||
// SetActiveConfig tracks labels according to the NodeConfig that is currently used by the Kubelet.
|
|
||||||
// It also tracks labels across metric updates so old labels can be safely deleted.
|
|
||||||
func SetActiveConfig(source *corev1.NodeConfigSource) error {
|
|
||||||
// compute the timeseries labels from the source
|
|
||||||
labels, err := configLabels(source)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// clean up the old timeseries (WithLabelValues creates a new one for each distinct label set)
|
|
||||||
if !ActiveConfig.Delete(activeConfigLabels) {
|
|
||||||
klog.InfoS("Failed to delete metric for labels. This may result in ambiguity from multiple metrics concurrently indicating different active configs.", "labels", activeConfigLabels)
|
|
||||||
}
|
|
||||||
// record the new timeseries
|
|
||||||
activeConfigLabels = labels
|
|
||||||
// expose the new timeseries with a constant count of 1
|
|
||||||
ActiveConfig.With(activeConfigLabels).Set(1)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// track labels across metric updates, so we can delete old label sets and prevent leaks
|
|
||||||
var lastKnownGoodConfigLabels map[string]string
|
|
||||||
|
|
||||||
// SetLastKnownGoodConfig tracks labels according to the NodeConfig that was successfully applied last.
|
|
||||||
// It also tracks labels across metric updates so old labels can be safely deleted.
|
|
||||||
func SetLastKnownGoodConfig(source *corev1.NodeConfigSource) error {
|
|
||||||
// compute the timeseries labels from the source
|
|
||||||
labels, err := configLabels(source)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// clean up the old timeseries (WithLabelValues creates a new one for each distinct label set)
|
|
||||||
if !LastKnownGoodConfig.Delete(lastKnownGoodConfigLabels) {
|
|
||||||
klog.InfoS("Failed to delete metric for labels. This may result in ambiguity from multiple metrics concurrently indicating different last known good configs.", "labels", lastKnownGoodConfigLabels)
|
|
||||||
}
|
|
||||||
// record the new timeseries
|
|
||||||
lastKnownGoodConfigLabels = labels
|
|
||||||
// expose the new timeseries with a constant count of 1
|
|
||||||
LastKnownGoodConfig.With(lastKnownGoodConfigLabels).Set(1)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetConfigError sets a the ConfigError metric to 1 in case any errors were encountered.
|
|
||||||
func SetConfigError(err bool) {
|
|
||||||
if err {
|
|
||||||
ConfigError.Set(1)
|
|
||||||
} else {
|
|
||||||
ConfigError.Set(0)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetNodeName sets the NodeName Gauge to 1.
|
// SetNodeName sets the NodeName Gauge to 1.
|
||||||
func SetNodeName(name types.NodeName) {
|
func SetNodeName(name types.NodeName) {
|
||||||
NodeName.WithLabelValues(string(name)).Set(1)
|
NodeName.WithLabelValues(string(name)).Set(1)
|
||||||
|
Loading…
Reference in New Issue
Block a user