mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-13 22:05:59 +00:00
Added cpu-manager-reconcile-period config.
- Defaults to sync-frequency.
This commit is contained in:
parent
7c6e31617d
commit
50674ec614
@ -328,6 +328,7 @@ func AddKubeletConfigFlags(fs *pflag.FlagSet, c *kubeletconfig.KubeletConfigurat
|
||||
fs.StringVar(&c.CgroupDriver, "cgroup-driver", c.CgroupDriver, "Driver that the kubelet uses to manipulate cgroups on the host. Possible values: 'cgroupfs', 'systemd'")
|
||||
fs.StringVar(&c.CgroupRoot, "cgroup-root", c.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
|
||||
fs.StringVar(&c.CPUManagerPolicy, "cpu-manager-policy", c.CPUManagerPolicy, "<Warning: Alpha feature> CPU Manager policy to use. Possible values: 'none'. Default: 'none'")
|
||||
fs.DurationVar(&c.CPUManagerReconcilePeriod.Duration, "cpu-manager-reconcile-period", c.CPUManagerReconcilePeriod.Duration, "<Warning: Alpha feature> CPU Manager reconciliation period. Examples: '10s', or '1m'. If not supplied, defaults to `NodeStatusUpdateFrequency`")
|
||||
fs.StringVar(&c.ContainerRuntime, "container-runtime", c.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'.")
|
||||
fs.DurationVar(&c.RuntimeRequestTimeout.Duration, "runtime-request-timeout", c.RuntimeRequestTimeout.Duration, "Timeout of all runtime requests except long running request - pull, logs, exec and attach. When timeout exceeded, kubelet will cancel the request, throw out an error and retry later.")
|
||||
fs.StringVar(&c.LockFilePath, "lock-file", c.LockFilePath, "<Warning: Alpha feature> The path to file for kubelet to use as a lock file.")
|
||||
|
@ -446,8 +446,9 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) {
|
||||
SystemReserved: systemReserved,
|
||||
HardEvictionThresholds: hardEvictionThresholds,
|
||||
},
|
||||
ExperimentalQOSReserved: *experimentalQOSReserved,
|
||||
ExperimentalCPUManagerPolicy: s.CPUManagerPolicy,
|
||||
ExperimentalQOSReserved: *experimentalQOSReserved,
|
||||
ExperimentalCPUManagerPolicy: s.CPUManagerPolicy,
|
||||
ExperimentalCPUManagerReconcilePeriod: s.CPUManagerReconcilePeriod.Duration,
|
||||
},
|
||||
s.FailSwapOn,
|
||||
kubeDeps.Recorder)
|
||||
|
@ -212,6 +212,8 @@ type KubeletConfiguration struct {
|
||||
ContainerRuntime string
|
||||
// CPUManagerPolicy is the name of the policy to use.
|
||||
CPUManagerPolicy string
|
||||
// CPU Manager reconciliation period.
|
||||
CPUManagerReconcilePeriod metav1.Duration
|
||||
// remoteRuntimeEndpoint is the endpoint of remote runtime service
|
||||
RemoteRuntimeEndpoint string
|
||||
// remoteImageEndpoint is the endpoint of remote image service
|
||||
|
@ -88,9 +88,6 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
||||
if obj.ContainerRuntime == "" {
|
||||
obj.ContainerRuntime = "docker"
|
||||
}
|
||||
if obj.CPUManagerPolicy == "" {
|
||||
obj.CPUManagerPolicy = "none"
|
||||
}
|
||||
if obj.RuntimeRequestTimeout == zeroDuration {
|
||||
obj.RuntimeRequestTimeout = metav1.Duration{Duration: 2 * time.Minute}
|
||||
}
|
||||
@ -174,6 +171,12 @@ func SetDefaults_KubeletConfiguration(obj *KubeletConfiguration) {
|
||||
if obj.NodeStatusUpdateFrequency == zeroDuration {
|
||||
obj.NodeStatusUpdateFrequency = metav1.Duration{Duration: 10 * time.Second}
|
||||
}
|
||||
if obj.CPUManagerPolicy == "" {
|
||||
obj.CPUManagerPolicy = "none"
|
||||
}
|
||||
if obj.CPUManagerReconcilePeriod == zeroDuration {
|
||||
obj.CPUManagerReconcilePeriod = obj.NodeStatusUpdateFrequency
|
||||
}
|
||||
if obj.OOMScoreAdj == nil {
|
||||
temp := int32(qos.KubeletOOMScoreAdj)
|
||||
obj.OOMScoreAdj = &temp
|
||||
|
@ -203,7 +203,9 @@ type KubeletConfiguration struct {
|
||||
// containerRuntime is the container runtime to use.
|
||||
ContainerRuntime string `json:"containerRuntime"`
|
||||
// CPUManagerPolicy is the name of the policy to use.
|
||||
CPUManagerPolicy string
|
||||
CPUManagerPolicy string `json:"cpuManagerPolicy"`
|
||||
// CPU Manager reconciliation period.
|
||||
CPUManagerReconcilePeriod metav1.Duration `json:"cpuManagerReconcilePeriod"`
|
||||
// remoteRuntimeEndpoint is the endpoint of remote runtime service
|
||||
RemoteRuntimeEndpoint string `json:"remoteRuntimeEndpoint"`
|
||||
// remoteImageEndpoint is the endpoint of remote image service
|
||||
|
@ -226,6 +226,7 @@ func autoConvert_v1alpha1_KubeletConfiguration_To_kubeletconfig_KubeletConfigura
|
||||
out.CgroupDriver = in.CgroupDriver
|
||||
out.ContainerRuntime = in.ContainerRuntime
|
||||
out.CPUManagerPolicy = in.CPUManagerPolicy
|
||||
out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod
|
||||
out.RemoteRuntimeEndpoint = in.RemoteRuntimeEndpoint
|
||||
out.RemoteImageEndpoint = in.RemoteImageEndpoint
|
||||
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
|
||||
@ -390,6 +391,7 @@ func autoConvert_kubeletconfig_KubeletConfiguration_To_v1alpha1_KubeletConfigura
|
||||
out.CgroupRoot = in.CgroupRoot
|
||||
out.ContainerRuntime = in.ContainerRuntime
|
||||
out.CPUManagerPolicy = in.CPUManagerPolicy
|
||||
out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod
|
||||
out.RemoteRuntimeEndpoint = in.RemoteRuntimeEndpoint
|
||||
out.RemoteImageEndpoint = in.RemoteImageEndpoint
|
||||
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
|
||||
|
@ -299,6 +299,7 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod
|
||||
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
|
||||
if in.LockFilePath != nil {
|
||||
in, out := &in.LockFilePath, &out.LockFilePath
|
||||
|
@ -164,6 +164,7 @@ func (in *KubeletConfiguration) DeepCopyInto(out *KubeletConfiguration) {
|
||||
out.NodeStatusUpdateFrequency = in.NodeStatusUpdateFrequency
|
||||
out.ImageMinimumGCAge = in.ImageMinimumGCAge
|
||||
out.VolumeStatsAggPeriod = in.VolumeStatsAggPeriod
|
||||
out.CPUManagerReconcilePeriod = in.CPUManagerReconcilePeriod
|
||||
out.RuntimeRequestTimeout = in.RuntimeRequestTimeout
|
||||
if in.RegisterWithTaints != nil {
|
||||
in, out := &in.RegisterWithTaints, &out.RegisterWithTaints
|
||||
|
@ -17,6 +17,8 @@ limitations under the License.
|
||||
package cm
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
// TODO: Migrate kubelet to either use its own internal objects or client library.
|
||||
"k8s.io/api/core/v1"
|
||||
@ -82,8 +84,9 @@ type NodeConfig struct {
|
||||
CgroupDriver string
|
||||
ProtectKernelDefaults bool
|
||||
NodeAllocatableConfig
|
||||
ExperimentalQOSReserved map[v1.ResourceName]int64
|
||||
ExperimentalCPUManagerPolicy string
|
||||
ExperimentalQOSReserved map[v1.ResourceName]int64
|
||||
ExperimentalCPUManagerPolicy string
|
||||
ExperimentalCPUManagerReconcilePeriod time.Duration
|
||||
}
|
||||
|
||||
type NodeAllocatableConfig struct {
|
||||
|
@ -274,6 +274,7 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.CPUManager) {
|
||||
cm.cpuManager, err = cpumanager.NewManager(
|
||||
nodeConfig.ExperimentalCPUManagerPolicy,
|
||||
nodeConfig.ExperimentalCPUManagerReconcilePeriod,
|
||||
machineInfo,
|
||||
cm.GetNodeAllocatableReservation(),
|
||||
)
|
||||
|
@ -65,6 +65,9 @@ type manager struct {
|
||||
sync.Mutex
|
||||
policy Policy
|
||||
|
||||
// reconcilePeriod is the duration between calls to reconcileState.
|
||||
reconcilePeriod time.Duration
|
||||
|
||||
// state allows pluggable CPU assignment policies while sharing a common
|
||||
// representation of state for the system to inspect and reconcile.
|
||||
state state.State
|
||||
@ -91,6 +94,7 @@ var _ Manager = &manager{}
|
||||
// NewManager creates new cpu manager based on provided policy
|
||||
func NewManager(
|
||||
cpuPolicyName string,
|
||||
reconcilePeriod time.Duration,
|
||||
machineInfo *cadvisorapi.MachineInfo,
|
||||
nodeAllocatableReservation v1.ResourceList,
|
||||
) (Manager, error) {
|
||||
@ -108,6 +112,7 @@ func NewManager(
|
||||
|
||||
manager := &manager{
|
||||
policy: policy,
|
||||
reconcilePeriod: reconcilePeriod,
|
||||
state: state.NewMemoryState(),
|
||||
machineInfo: machineInfo,
|
||||
nodeAllocatableReservation: nodeAllocatableReservation,
|
||||
@ -117,6 +122,7 @@ func NewManager(
|
||||
|
||||
func (m *manager) Start(activePods ActivePodsFunc, podStatusProvider status.PodStatusProvider, containerRuntime runtimeService) {
|
||||
glog.Infof("[cpumanger] starting with %s policy", m.policy.Name())
|
||||
glog.Infof("[cpumanger] reconciling every %v", m.reconcilePeriod)
|
||||
|
||||
m.activePods = activePods
|
||||
m.podStatusProvider = podStatusProvider
|
||||
@ -126,7 +132,7 @@ func (m *manager) Start(activePods ActivePodsFunc, podStatusProvider status.PodS
|
||||
if m.policy.Name() == string(PolicyNone) {
|
||||
return
|
||||
}
|
||||
go wait.Until(func() { m.reconcileState() }, time.Second, wait.NeverStop)
|
||||
go wait.Until(func() { m.reconcileState() }, m.reconcilePeriod, wait.NeverStop)
|
||||
}
|
||||
|
||||
func (m *manager) AddContainer(p *v1.Pod, c *v1.Container, containerID string) error {
|
||||
|
Loading…
Reference in New Issue
Block a user