mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Add field for KubeletConfiguration and Regenerate
This commit is contained in:
parent
39c76ba2ed
commit
d82f606970
@ -381,6 +381,7 @@ API rule violation: list_type_missing,k8s.io/kubelet/config/v1beta1,KubeletConfi
|
|||||||
API rule violation: list_type_missing,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,EnforceNodeAllocatable
|
API rule violation: list_type_missing,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,EnforceNodeAllocatable
|
||||||
API rule violation: list_type_missing,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,RegisterWithTaints
|
API rule violation: list_type_missing,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,RegisterWithTaints
|
||||||
API rule violation: list_type_missing,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,ReservedMemory
|
API rule violation: list_type_missing,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,ReservedMemory
|
||||||
|
API rule violation: list_type_missing,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,ShutdownGracePeriodByPodPriority
|
||||||
API rule violation: list_type_missing,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,TLSCipherSuites
|
API rule violation: list_type_missing,k8s.io/kubelet/config/v1beta1,KubeletConfiguration,TLSCipherSuites
|
||||||
API rule violation: list_type_missing,k8s.io/metrics/pkg/apis/metrics/v1alpha1,PodMetrics,Containers
|
API rule violation: list_type_missing,k8s.io/metrics/pkg/apis/metrics/v1alpha1,PodMetrics,Containers
|
||||||
API rule violation: list_type_missing,k8s.io/metrics/pkg/apis/metrics/v1beta1,PodMetrics,Containers
|
API rule violation: list_type_missing,k8s.io/metrics/pkg/apis/metrics/v1beta1,PodMetrics,Containers
|
||||||
|
@ -585,6 +585,12 @@ const (
|
|||||||
// Adds support for kubelet to detect node shutdown and gracefully terminate pods prior to the node being shutdown.
|
// Adds support for kubelet to detect node shutdown and gracefully terminate pods prior to the node being shutdown.
|
||||||
GracefulNodeShutdown featuregate.Feature = "GracefulNodeShutdown"
|
GracefulNodeShutdown featuregate.Feature = "GracefulNodeShutdown"
|
||||||
|
|
||||||
|
// owner: @wzshiming
|
||||||
|
// alpha: v1.23
|
||||||
|
//
|
||||||
|
// Make the kubelet use shutdown configuration based on pod priority values for graceful shutdown.
|
||||||
|
GracefulNodeShutdownBasedOnPodPriority featuregate.Feature = "GracefulNodeShutdownBasedOnPodPriority"
|
||||||
|
|
||||||
// owner: @andrewsykim @uablrek
|
// owner: @andrewsykim @uablrek
|
||||||
// kep: http://kep.k8s.io/1864
|
// kep: http://kep.k8s.io/1864
|
||||||
// alpha: v1.20
|
// alpha: v1.20
|
||||||
@ -920,6 +926,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
|||||||
ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update
|
ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update
|
||||||
KubeletCredentialProviders: {Default: false, PreRelease: featuregate.Alpha},
|
KubeletCredentialProviders: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
GracefulNodeShutdown: {Default: true, PreRelease: featuregate.Beta},
|
GracefulNodeShutdown: {Default: true, PreRelease: featuregate.Beta},
|
||||||
|
GracefulNodeShutdownBasedOnPodPriority: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
ServiceLBNodePortControl: {Default: true, PreRelease: featuregate.Beta},
|
ServiceLBNodePortControl: {Default: true, PreRelease: featuregate.Beta},
|
||||||
MixedProtocolLBService: {Default: false, PreRelease: featuregate.Alpha},
|
MixedProtocolLBService: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
VolumeCapacityPriority: {Default: false, PreRelease: featuregate.Alpha},
|
VolumeCapacityPriority: {Default: false, PreRelease: featuregate.Alpha},
|
||||||
|
@ -397,6 +397,15 @@ type KubeletConfiguration struct {
|
|||||||
// +featureGate=GracefulNodeShutdown
|
// +featureGate=GracefulNodeShutdown
|
||||||
// +optional
|
// +optional
|
||||||
ShutdownGracePeriodCriticalPods metav1.Duration
|
ShutdownGracePeriodCriticalPods metav1.Duration
|
||||||
|
// ShutdownGracePeriodByPodPriority specifies the shutdown grace period for Pods based
|
||||||
|
// on their associated priority class value.
|
||||||
|
// When a shutdown request is received, the Kubelet will initiate shutdown on all pods
|
||||||
|
// running on the node with a grace period that depends on the priority of the pod,
|
||||||
|
// and then wait for all pods to exit.
|
||||||
|
// Each entry in the array represents the graceful shutdown time a pod with a priority
|
||||||
|
// class value that lies in the range of that value and the next higher entry in the
|
||||||
|
// list when the node is shutting down.
|
||||||
|
ShutdownGracePeriodByPodPriority []ShutdownGracePeriodByPodPriority
|
||||||
// ReservedMemory specifies a comma-separated list of memory reservations for NUMA nodes.
|
// ReservedMemory specifies a comma-separated list of memory reservations for NUMA nodes.
|
||||||
// The parameter makes sense only in the context of the memory manager feature. The memory manager will not allocate reserved memory for container workloads.
|
// The parameter makes sense only in the context of the memory manager feature. The memory manager will not allocate reserved memory for container workloads.
|
||||||
// For example, if you have a NUMA0 with 10Gi of memory and the ReservedMemory was specified to reserve 1Gi of memory at NUMA0,
|
// For example, if you have a NUMA0 with 10Gi of memory and the ReservedMemory was specified to reserve 1Gi of memory at NUMA0,
|
||||||
@ -595,6 +604,14 @@ type MemoryReservation struct {
|
|||||||
Limits v1.ResourceList
|
Limits v1.ResourceList
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShutdownGracePeriodByPodPriority specifies the shutdown grace period for Pods based on their associated priority class value
|
||||||
|
type ShutdownGracePeriodByPodPriority struct {
|
||||||
|
// priority is the priority value associated with the shutdown grace period
|
||||||
|
Priority int32
|
||||||
|
// shutdownGracePeriodSeconds is the shutdown grace period in seconds
|
||||||
|
ShutdownGracePeriodSeconds int64
|
||||||
|
}
|
||||||
|
|
||||||
type MemorySwapConfiguration struct {
|
type MemorySwapConfiguration struct {
|
||||||
// swapBehavior configures swap memory available to container workloads. May be one of
|
// swapBehavior configures swap memory available to container workloads. May be one of
|
||||||
// "", "LimitedSwap": workload combined memory and swap usage cannot exceed pod memory limit
|
// "", "LimitedSwap": workload combined memory and swap usage cannot exceed pod memory limit
|
||||||
|
@ -140,6 +140,16 @@ func RegisterConversions(s *runtime.Scheme) error {
|
|||||||
}); err != nil {
|
}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*v1beta1.ShutdownGracePeriodByPodPriority)(nil), (*config.ShutdownGracePeriodByPodPriority)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_v1beta1_ShutdownGracePeriodByPodPriority_To_config_ShutdownGracePeriodByPodPriority(a.(*v1beta1.ShutdownGracePeriodByPodPriority), b.(*config.ShutdownGracePeriodByPodPriority), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := s.AddGeneratedConversionFunc((*config.ShutdownGracePeriodByPodPriority)(nil), (*v1beta1.ShutdownGracePeriodByPodPriority)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||||
|
return Convert_config_ShutdownGracePeriodByPodPriority_To_v1beta1_ShutdownGracePeriodByPodPriority(a.(*config.ShutdownGracePeriodByPodPriority), b.(*v1beta1.ShutdownGracePeriodByPodPriority), scope)
|
||||||
|
}); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -381,6 +391,7 @@ func autoConvert_v1beta1_KubeletConfiguration_To_config_KubeletConfiguration(in
|
|||||||
}
|
}
|
||||||
out.ShutdownGracePeriod = in.ShutdownGracePeriod
|
out.ShutdownGracePeriod = in.ShutdownGracePeriod
|
||||||
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
|
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
|
||||||
|
out.ShutdownGracePeriodByPodPriority = *(*[]config.ShutdownGracePeriodByPodPriority)(unsafe.Pointer(&in.ShutdownGracePeriodByPodPriority))
|
||||||
out.ReservedMemory = *(*[]config.MemoryReservation)(unsafe.Pointer(&in.ReservedMemory))
|
out.ReservedMemory = *(*[]config.MemoryReservation)(unsafe.Pointer(&in.ReservedMemory))
|
||||||
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableProfilingHandler, &out.EnableProfilingHandler, s); err != nil {
|
if err := v1.Convert_Pointer_bool_To_bool(&in.EnableProfilingHandler, &out.EnableProfilingHandler, s); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -556,6 +567,7 @@ func autoConvert_config_KubeletConfiguration_To_v1beta1_KubeletConfiguration(in
|
|||||||
}
|
}
|
||||||
out.ShutdownGracePeriod = in.ShutdownGracePeriod
|
out.ShutdownGracePeriod = in.ShutdownGracePeriod
|
||||||
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
|
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
|
||||||
|
out.ShutdownGracePeriodByPodPriority = *(*[]v1beta1.ShutdownGracePeriodByPodPriority)(unsafe.Pointer(&in.ShutdownGracePeriodByPodPriority))
|
||||||
out.ReservedMemory = *(*[]v1beta1.MemoryReservation)(unsafe.Pointer(&in.ReservedMemory))
|
out.ReservedMemory = *(*[]v1beta1.MemoryReservation)(unsafe.Pointer(&in.ReservedMemory))
|
||||||
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableProfilingHandler, &out.EnableProfilingHandler, s); err != nil {
|
if err := v1.Convert_bool_To_Pointer_bool(&in.EnableProfilingHandler, &out.EnableProfilingHandler, s); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -708,3 +720,25 @@ func autoConvert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConf
|
|||||||
func Convert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource(in *config.SerializedNodeConfigSource, out *v1beta1.SerializedNodeConfigSource, s conversion.Scope) error {
|
func Convert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource(in *config.SerializedNodeConfigSource, out *v1beta1.SerializedNodeConfigSource, s conversion.Scope) error {
|
||||||
return autoConvert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource(in, out, s)
|
return autoConvert_config_SerializedNodeConfigSource_To_v1beta1_SerializedNodeConfigSource(in, out, s)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func autoConvert_v1beta1_ShutdownGracePeriodByPodPriority_To_config_ShutdownGracePeriodByPodPriority(in *v1beta1.ShutdownGracePeriodByPodPriority, out *config.ShutdownGracePeriodByPodPriority, s conversion.Scope) error {
|
||||||
|
out.Priority = in.Priority
|
||||||
|
out.ShutdownGracePeriodSeconds = in.ShutdownGracePeriodSeconds
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_v1beta1_ShutdownGracePeriodByPodPriority_To_config_ShutdownGracePeriodByPodPriority is an autogenerated conversion function.
|
||||||
|
func Convert_v1beta1_ShutdownGracePeriodByPodPriority_To_config_ShutdownGracePeriodByPodPriority(in *v1beta1.ShutdownGracePeriodByPodPriority, out *config.ShutdownGracePeriodByPodPriority, s conversion.Scope) error {
|
||||||
|
return autoConvert_v1beta1_ShutdownGracePeriodByPodPriority_To_config_ShutdownGracePeriodByPodPriority(in, out, s)
|
||||||
|
}
|
||||||
|
|
||||||
|
func autoConvert_config_ShutdownGracePeriodByPodPriority_To_v1beta1_ShutdownGracePeriodByPodPriority(in *config.ShutdownGracePeriodByPodPriority, out *v1beta1.ShutdownGracePeriodByPodPriority, s conversion.Scope) error {
|
||||||
|
out.Priority = in.Priority
|
||||||
|
out.ShutdownGracePeriodSeconds = in.ShutdownGracePeriodSeconds
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert_config_ShutdownGracePeriodByPodPriority_To_v1beta1_ShutdownGracePeriodByPodPriority is an autogenerated conversion function.
|
||||||
|
func Convert_config_ShutdownGracePeriodByPodPriority_To_v1beta1_ShutdownGracePeriodByPodPriority(in *config.ShutdownGracePeriodByPodPriority, out *v1beta1.ShutdownGracePeriodByPodPriority, s conversion.Scope) error {
|
||||||
|
return autoConvert_config_ShutdownGracePeriodByPodPriority_To_v1beta1_ShutdownGracePeriodByPodPriority(in, out, s)
|
||||||
|
}
|
||||||
|
21
pkg/kubelet/apis/config/zz_generated.deepcopy.go
generated
21
pkg/kubelet/apis/config/zz_generated.deepcopy.go
generated
@ -283,6 +283,11 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
|
|||||||
in.Logging.DeepCopyInto(&out.Logging)
|
in.Logging.DeepCopyInto(&out.Logging)
|
||||||
out.ShutdownGracePeriod = in.ShutdownGracePeriod
|
out.ShutdownGracePeriod = in.ShutdownGracePeriod
|
||||||
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
|
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
|
||||||
|
if in.ShutdownGracePeriodByPodPriority != nil {
|
||||||
|
in, out := &in.ShutdownGracePeriodByPodPriority, &out.ShutdownGracePeriodByPodPriority
|
||||||
|
*out = make([]ShutdownGracePeriodByPodPriority, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
if in.ReservedMemory != nil {
|
if in.ReservedMemory != nil {
|
||||||
in, out := &in.ReservedMemory, &out.ReservedMemory
|
in, out := &in.ReservedMemory, &out.ReservedMemory
|
||||||
*out = make([]MemoryReservation, len(*in))
|
*out = make([]MemoryReservation, len(*in))
|
||||||
@ -438,3 +443,19 @@ func (in *SerializedNodeConfigSource) DeepCopyObject() runtime.Object {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ShutdownGracePeriodByPodPriority) DeepCopyInto(out *ShutdownGracePeriodByPodPriority) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShutdownGracePeriodByPodPriority.
|
||||||
|
func (in *ShutdownGracePeriodByPodPriority) DeepCopy() *ShutdownGracePeriodByPodPriority {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ShutdownGracePeriodByPodPriority)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
@ -989,6 +989,35 @@ type KubeletConfiguration struct {
|
|||||||
// +featureGate=GracefulNodeShutdown
|
// +featureGate=GracefulNodeShutdown
|
||||||
// +optional
|
// +optional
|
||||||
ShutdownGracePeriodCriticalPods metav1.Duration `json:"shutdownGracePeriodCriticalPods,omitempty"`
|
ShutdownGracePeriodCriticalPods metav1.Duration `json:"shutdownGracePeriodCriticalPods,omitempty"`
|
||||||
|
// shutdownGracePeriodByPodPriority specifies the shutdown grace period for Pods based
|
||||||
|
// on their associated priority class value.
|
||||||
|
// When a shutdown request is received, the Kubelet will initiate shutdown on all pods
|
||||||
|
// running on the node with a grace period that depends on the priority of the pod,
|
||||||
|
// and then wait for all pods to exit.
|
||||||
|
// Each entry in the array represents the graceful shutdown time a pod with a priority
|
||||||
|
// class value that lies in the range of that value and the next higher entry in the
|
||||||
|
// list when the node is shutting down.
|
||||||
|
// For example, to allow critical pods 10s to shutdown, priority>=10000 pods 20s to
|
||||||
|
// shutdown, and all remaining pods 30s to shutdown.
|
||||||
|
//
|
||||||
|
// shutdownGracePeriodByPodPriority:
|
||||||
|
// - priority: 2000000000
|
||||||
|
// shutdownGracePeriodSeconds: 10
|
||||||
|
// - priority: 10000
|
||||||
|
// shutdownGracePeriodSeconds: 20
|
||||||
|
// - priority: 0
|
||||||
|
// shutdownGracePeriodSeconds: 30
|
||||||
|
//
|
||||||
|
// The time the Kubelet will wait before exiting will at most be the maximum of all
|
||||||
|
// shutdownGracePeriodSeconds for each priority class range represented on the node.
|
||||||
|
// When all pods have exited or reached their grace periods, the Kubelet will release
|
||||||
|
// the shutdown inhibit lock.
|
||||||
|
// Requires the GracefulNodeShutdown feature gate to be enabled.
|
||||||
|
// This configuration must be empty if either ShutdownGracePeriod or ShutdownGracePeriodCriticalPods is set.
|
||||||
|
// Default: nil
|
||||||
|
// +featureGate=GracefulNodeShutdownBasedOnPodPriority
|
||||||
|
// +optional
|
||||||
|
ShutdownGracePeriodByPodPriority []ShutdownGracePeriodByPodPriority `json:"shutdownGracePeriodByPodPriority,omitempty"`
|
||||||
// reservedMemory specifies a comma-separated list of memory reservations for NUMA nodes.
|
// reservedMemory specifies a comma-separated list of memory reservations for NUMA nodes.
|
||||||
// The parameter makes sense only in the context of the memory manager feature.
|
// The parameter makes sense only in the context of the memory manager feature.
|
||||||
// The memory manager will not allocate reserved memory for container workloads.
|
// The memory manager will not allocate reserved memory for container workloads.
|
||||||
@ -1136,6 +1165,14 @@ type MemoryReservation struct {
|
|||||||
Limits v1.ResourceList `json:"limits"`
|
Limits v1.ResourceList `json:"limits"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ShutdownGracePeriodByPodPriority specifies the shutdown grace period for Pods based on their associated priority class value
|
||||||
|
type ShutdownGracePeriodByPodPriority struct {
|
||||||
|
// priority is the priority value associated with the shutdown grace period
|
||||||
|
Priority int32 `json:"priority"`
|
||||||
|
// shutdownGracePeriodSeconds is the shutdown grace period in seconds
|
||||||
|
ShutdownGracePeriodSeconds int64 `json:"shutdownGracePeriodSeconds"`
|
||||||
|
}
|
||||||
|
|
||||||
type MemorySwapConfiguration struct {
|
type MemorySwapConfiguration struct {
|
||||||
// swapBehavior configures swap memory available to container workloads. May be one of
|
// swapBehavior configures swap memory available to container workloads. May be one of
|
||||||
// "", "LimitedSwap": workload combined memory and swap usage cannot exceed pod memory limit
|
// "", "LimitedSwap": workload combined memory and swap usage cannot exceed pod memory limit
|
||||||
|
@ -318,6 +318,11 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
|
|||||||
}
|
}
|
||||||
out.ShutdownGracePeriod = in.ShutdownGracePeriod
|
out.ShutdownGracePeriod = in.ShutdownGracePeriod
|
||||||
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
|
out.ShutdownGracePeriodCriticalPods = in.ShutdownGracePeriodCriticalPods
|
||||||
|
if in.ShutdownGracePeriodByPodPriority != nil {
|
||||||
|
in, out := &in.ShutdownGracePeriodByPodPriority, &out.ShutdownGracePeriodByPodPriority
|
||||||
|
*out = make([]ShutdownGracePeriodByPodPriority, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
if in.ReservedMemory != nil {
|
if in.ReservedMemory != nil {
|
||||||
in, out := &in.ReservedMemory, &out.ReservedMemory
|
in, out := &in.ReservedMemory, &out.ReservedMemory
|
||||||
*out = make([]MemoryReservation, len(*in))
|
*out = make([]MemoryReservation, len(*in))
|
||||||
@ -498,3 +503,19 @@ func (in *SerializedNodeConfigSource) DeepCopyObject() runtime.Object {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||||
|
func (in *ShutdownGracePeriodByPodPriority) DeepCopyInto(out *ShutdownGracePeriodByPodPriority) {
|
||||||
|
*out = *in
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ShutdownGracePeriodByPodPriority.
|
||||||
|
func (in *ShutdownGracePeriodByPodPriority) DeepCopy() *ShutdownGracePeriodByPodPriority {
|
||||||
|
if in == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
out := new(ShutdownGracePeriodByPodPriority)
|
||||||
|
in.DeepCopyInto(out)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user