mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 03:41:45 +00:00
Merge pull request #116093 from swatisehgal/topologymanager-ga-graduation
node: topologymgr: Graduate Kubelet Topology Manager to GA
This commit is contained in:
commit
8d5c96fed2
@ -711,13 +711,11 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
||||
}
|
||||
|
||||
var topologyManagerPolicyOptions map[string]string
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.TopologyManager) {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.TopologyManagerPolicyOptions) {
|
||||
topologyManagerPolicyOptions = s.TopologyManagerPolicyOptions
|
||||
} else if s.TopologyManagerPolicyOptions != nil {
|
||||
return fmt.Errorf("topology manager policy options %v require feature gates %q, %q enabled",
|
||||
s.TopologyManagerPolicyOptions, features.TopologyManager, features.TopologyManagerPolicyOptions)
|
||||
}
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.TopologyManagerPolicyOptions) {
|
||||
topologyManagerPolicyOptions = s.TopologyManagerPolicyOptions
|
||||
} else if s.TopologyManagerPolicyOptions != nil {
|
||||
return fmt.Errorf("topology manager policy options %v require feature gates %q enabled",
|
||||
s.TopologyManagerPolicyOptions, features.TopologyManagerPolicyOptions)
|
||||
}
|
||||
|
||||
kubeDeps.ContainerManager, err = cm.NewContainerManager(
|
||||
@ -751,8 +749,8 @@ func run(ctx context.Context, s *options.KubeletServer, kubeDeps *kubelet.Depend
|
||||
PodPidsLimit: s.PodPidsLimit,
|
||||
EnforceCPULimits: s.CPUCFSQuota,
|
||||
CPUCFSQuotaPeriod: s.CPUCFSQuotaPeriod.Duration,
|
||||
ExperimentalTopologyManagerPolicy: s.TopologyManagerPolicy,
|
||||
ExperimentalTopologyManagerScope: s.TopologyManagerScope,
|
||||
TopologyManagerPolicy: s.TopologyManagerPolicy,
|
||||
TopologyManagerScope: s.TopologyManagerScope,
|
||||
ExperimentalTopologyManagerPolicyOptions: topologyManagerPolicyOptions,
|
||||
},
|
||||
s.FailSwapOn,
|
||||
|
@ -759,9 +759,10 @@ const (
|
||||
// Enables topology aware hints for EndpointSlices
|
||||
TopologyAwareHints featuregate.Feature = "TopologyAwareHints"
|
||||
|
||||
// owner: @lmdaly
|
||||
// owner: @lmdaly, @swatisehgal (for GA graduation)
|
||||
// alpha: v1.16
|
||||
// beta: v1.18
|
||||
// GA: v1.27
|
||||
//
|
||||
// Enable resource managers to make NUMA aligned decisions
|
||||
TopologyManager featuregate.Feature = "TopologyManager"
|
||||
@ -1061,7 +1062,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
TopologyAwareHints: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
TopologyManager: {Default: true, PreRelease: featuregate.Beta},
|
||||
TopologyManager: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.27; remove in 1.29
|
||||
|
||||
TopologyManagerPolicyAlphaOptions: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
|
4
pkg/generated/openapi/zz_generated.openapi.go
generated
4
pkg/generated/openapi/zz_generated.openapi.go
generated
@ -57695,14 +57695,14 @@ func schema_k8sio_kubelet_config_v1beta1_KubeletConfiguration(ref common.Referen
|
||||
},
|
||||
"topologyManagerPolicy": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "topologyManagerPolicy is the name of the topology manager policy to use. Valid values include:\n\n- `restricted`: kubelet only allows pods with optimal NUMA node alignment for\n requested resources;\n- `best-effort`: kubelet will favor pods with NUMA alignment of CPU and device\n resources;\n- `none`: kubelet has no knowledge of NUMA alignment of a pod's CPU and device resources. - `single-numa-node`: kubelet only allows pods with a single NUMA alignment\n of CPU and device resources.\n\nPolicies other than \"none\" require the TopologyManager feature gate to be enabled. Default: \"none\"",
|
||||
Description: "topologyManagerPolicy is the name of the topology manager policy to use. Valid values include:\n\n- `restricted`: kubelet only allows pods with optimal NUMA node alignment for\n requested resources;\n- `best-effort`: kubelet will favor pods with NUMA alignment of CPU and device\n resources;\n- `none`: kubelet has no knowledge of NUMA alignment of a pod's CPU and device resources. - `single-numa-node`: kubelet only allows pods with a single NUMA alignment\n of CPU and device resources.\n\nDefault: \"none\"",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"topologyManagerScope": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "topologyManagerScope represents the scope of topology hint generation that topology manager requests and hint providers generate. Valid values include:\n\n- `container`: topology policy is applied on a per-container basis. - `pod`: topology policy is applied on a per-pod basis.\n\n\"pod\" scope requires the TopologyManager feature gate to be enabled. Default: \"container\"",
|
||||
Description: "topologyManagerScope represents the scope of topology hint generation that topology manager requests and hint providers generate. Valid values include:\n\n- `container`: topology policy is applied on a per-container basis. - `pod`: topology policy is applied on a per-pod basis.\n\nDefault: \"container\"",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
|
@ -233,11 +233,9 @@ type KubeletConfiguration struct {
|
||||
// Requires the MemoryManager feature gate to be enabled.
|
||||
MemoryManagerPolicy string
|
||||
// TopologyManagerPolicy is the name of the policy to use.
|
||||
// Policies other than "none" require the TopologyManager feature gate to be enabled.
|
||||
TopologyManagerPolicy string
|
||||
// TopologyManagerScope represents the scope of topology hint generation
|
||||
// that topology manager requests and hint providers generate.
|
||||
// "pod" scope requires the TopologyManager feature gate to be enabled.
|
||||
// Default: "container"
|
||||
// +optional
|
||||
TopologyManagerScope string
|
||||
|
@ -131,9 +131,6 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration, featur
|
||||
if kc.ServerTLSBootstrap && !localFeatureGate.Enabled(features.RotateKubeletServerCertificate) {
|
||||
allErrors = append(allErrors, fmt.Errorf("invalid configuration: serverTLSBootstrap %v requires feature gate RotateKubeletServerCertificate", kc.ServerTLSBootstrap))
|
||||
}
|
||||
if kc.TopologyManagerPolicy != kubeletconfig.NoneTopologyManagerPolicy && !localFeatureGate.Enabled(features.TopologyManager) {
|
||||
allErrors = append(allErrors, fmt.Errorf("invalid configuration: topologyManagerPolicy %v requires feature gate TopologyManager", kc.TopologyManagerPolicy))
|
||||
}
|
||||
|
||||
for _, nodeTaint := range kc.RegisterWithTaints {
|
||||
if err := utiltaints.CheckTaintValidation(nodeTaint); err != nil {
|
||||
@ -152,9 +149,7 @@ func ValidateKubeletConfiguration(kc *kubeletconfig.KubeletConfiguration, featur
|
||||
default:
|
||||
allErrors = append(allErrors, fmt.Errorf("invalid configuration: topologyManagerPolicy (--topology-manager-policy) %q must be one of: %q", kc.TopologyManagerPolicy, []string{kubeletconfig.NoneTopologyManagerPolicy, kubeletconfig.BestEffortTopologyManagerPolicy, kubeletconfig.RestrictedTopologyManagerPolicy, kubeletconfig.SingleNumaNodeTopologyManagerPolicy}))
|
||||
}
|
||||
if kc.TopologyManagerScope != kubeletconfig.ContainerTopologyManagerScope && !localFeatureGate.Enabled(features.TopologyManager) {
|
||||
allErrors = append(allErrors, fmt.Errorf("invalid configuration: topologyManagerScope %v requires feature gate TopologyManager", kc.TopologyManagerScope))
|
||||
}
|
||||
|
||||
switch kc.TopologyManagerScope {
|
||||
case kubeletconfig.ContainerTopologyManagerScope:
|
||||
case kubeletconfig.PodTopologyManagerScope:
|
||||
|
@ -333,15 +333,6 @@ func TestValidateKubeletConfiguration(t *testing.T) {
|
||||
},
|
||||
errMsg: "invalid configuration: serverTLSBootstrap true requires feature gate RotateKubeletServerCertificate",
|
||||
},
|
||||
{
|
||||
name: "use SingleNumaNodeTopologyManagerPolicy without enabling TopologyManager",
|
||||
configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration {
|
||||
conf.FeatureGates = map[string]bool{"TopologyManager": false}
|
||||
conf.TopologyManagerPolicy = kubeletconfig.SingleNumaNodeTopologyManagerPolicy
|
||||
return conf
|
||||
},
|
||||
errMsg: "invalid configuration: topologyManagerPolicy single-numa-node requires feature gate TopologyManager",
|
||||
},
|
||||
{
|
||||
name: "invalid TopologyManagerPolicy",
|
||||
configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration {
|
||||
@ -350,15 +341,6 @@ func TestValidateKubeletConfiguration(t *testing.T) {
|
||||
},
|
||||
errMsg: "invalid configuration: topologyManagerPolicy (--topology-manager-policy) \"invalid-policy\" must be one of: [\"none\" \"best-effort\" \"restricted\" \"single-numa-node\"]",
|
||||
},
|
||||
{
|
||||
name: "use PodTopologyManagerScope without enabling TopologyManager",
|
||||
configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration {
|
||||
conf.FeatureGates = map[string]bool{"TopologyManager": false}
|
||||
conf.TopologyManagerScope = kubeletconfig.PodTopologyManagerScope
|
||||
return conf
|
||||
},
|
||||
errMsg: "invalid configuration: topologyManagerScope pod requires feature gate TopologyManager",
|
||||
},
|
||||
{
|
||||
name: "invalid TopologyManagerScope",
|
||||
configure: func(conf *kubeletconfig.KubeletConfiguration) *kubeletconfig.KubeletConfiguration {
|
||||
|
@ -148,14 +148,14 @@ type NodeConfig struct {
|
||||
QOSReserved map[v1.ResourceName]int64
|
||||
CPUManagerPolicy string
|
||||
CPUManagerPolicyOptions map[string]string
|
||||
ExperimentalTopologyManagerScope string
|
||||
TopologyManagerScope string
|
||||
CPUManagerReconcilePeriod time.Duration
|
||||
ExperimentalMemoryManagerPolicy string
|
||||
ExperimentalMemoryManagerReservedMemory []kubeletconfig.MemoryReservation
|
||||
PodPidsLimit int64
|
||||
EnforceCPULimits bool
|
||||
CPUCFSQuotaPeriod time.Duration
|
||||
ExperimentalTopologyManagerPolicy string
|
||||
TopologyManagerPolicy string
|
||||
ExperimentalTopologyManagerPolicyOptions map[string]string
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,6 @@ import (
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cadvisor"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/admission"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/containermap"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/devicemanager"
|
||||
@ -289,20 +288,15 @@ func NewContainerManager(mountUtil mount.Interface, cadvisorInterface cadvisor.I
|
||||
qosContainerManager: qosContainerManager,
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManager) {
|
||||
cm.topologyManager, err = topologymanager.NewManager(
|
||||
machineInfo.Topology,
|
||||
nodeConfig.ExperimentalTopologyManagerPolicy,
|
||||
nodeConfig.ExperimentalTopologyManagerScope,
|
||||
nodeConfig.ExperimentalTopologyManagerPolicyOptions,
|
||||
)
|
||||
cm.topologyManager, err = topologymanager.NewManager(
|
||||
machineInfo.Topology,
|
||||
nodeConfig.TopologyManagerPolicy,
|
||||
nodeConfig.TopologyManagerScope,
|
||||
nodeConfig.ExperimentalTopologyManagerPolicyOptions,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
cm.topologyManager = topologymanager.NewFakeManager()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
klog.InfoS("Creating device plugin manager")
|
||||
@ -687,50 +681,7 @@ func (cm *containerManagerImpl) UpdatePluginResources(node *schedulerframework.N
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) GetAllocateResourcesPodAdmitHandler() lifecycle.PodAdmitHandler {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManager) {
|
||||
return cm.topologyManager
|
||||
}
|
||||
// TODO: we need to think about a better way to do this. This will work for
|
||||
// now so long as we have only the cpuManager and deviceManager relying on
|
||||
// allocations here. However, going forward it is not generalized enough to
|
||||
// work as we add more and more hint providers that the TopologyManager
|
||||
// needs to call Allocate() on (that may not be directly intstantiated
|
||||
// inside this component).
|
||||
return &resourceAllocator{cm.cpuManager, cm.memoryManager, cm.deviceManager, cm.draManager}
|
||||
}
|
||||
|
||||
type resourceAllocator struct {
|
||||
cpuManager cpumanager.Manager
|
||||
memoryManager memorymanager.Manager
|
||||
deviceManager devicemanager.Manager
|
||||
draManager dra.Manager
|
||||
}
|
||||
|
||||
func (m *resourceAllocator) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAdmitResult {
|
||||
pod := attrs.Pod
|
||||
|
||||
for _, container := range append(pod.Spec.InitContainers, pod.Spec.Containers...) {
|
||||
err := m.deviceManager.Allocate(pod, &container)
|
||||
if err != nil {
|
||||
return admission.GetPodAdmitResult(err)
|
||||
}
|
||||
|
||||
if m.cpuManager != nil {
|
||||
err = m.cpuManager.Allocate(pod, &container)
|
||||
if err != nil {
|
||||
return admission.GetPodAdmitResult(err)
|
||||
}
|
||||
}
|
||||
|
||||
if m.memoryManager != nil {
|
||||
err = m.memoryManager.Allocate(pod, &container)
|
||||
if err != nil {
|
||||
return admission.GetPodAdmitResult(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return admission.GetPodAdmitResult(nil)
|
||||
return cm.topologyManager
|
||||
}
|
||||
|
||||
func (cm *containerManagerImpl) SystemCgroupsLimit() v1.ResourceList {
|
||||
|
@ -18,9 +18,7 @@ package cm
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
runtimeapi "k8s.io/cri-api/pkg/apis/runtime/v1"
|
||||
kubefeatures "k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/cpumanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/memorymanager"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm/topologymanager"
|
||||
@ -49,9 +47,8 @@ func (i *internalContainerLifecycleImpl) PreStartContainer(pod *v1.Pod, containe
|
||||
i.memoryManager.AddContainer(pod, container, containerID)
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManager) {
|
||||
i.topologyManager.AddContainer(pod, container, containerID)
|
||||
}
|
||||
i.topologyManager.AddContainer(pod, container, containerID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -60,11 +57,5 @@ func (i *internalContainerLifecycleImpl) PreStopContainer(containerID string) er
|
||||
}
|
||||
|
||||
func (i *internalContainerLifecycleImpl) PostStopContainer(containerID string) error {
|
||||
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.TopologyManager) {
|
||||
err := i.topologyManager.RemoveContainer(containerID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return i.topologyManager.RemoveContainer(containerID)
|
||||
}
|
||||
|
@ -368,7 +368,6 @@ type KubeletConfiguration struct {
|
||||
// - `single-numa-node`: kubelet only allows pods with a single NUMA alignment
|
||||
// of CPU and device resources.
|
||||
//
|
||||
// Policies other than "none" require the TopologyManager feature gate to be enabled.
|
||||
// Default: "none"
|
||||
// +optional
|
||||
TopologyManagerPolicy string `json:"topologyManagerPolicy,omitempty"`
|
||||
@ -378,7 +377,6 @@ type KubeletConfiguration struct {
|
||||
// - `container`: topology policy is applied on a per-container basis.
|
||||
// - `pod`: topology policy is applied on a per-pod basis.
|
||||
//
|
||||
// "pod" scope requires the TopologyManager feature gate to be enabled.
|
||||
// Default: "container"
|
||||
// +optional
|
||||
TopologyManagerScope string `json:"topologyManagerScope,omitempty"`
|
||||
|
@ -34,7 +34,7 @@ import (
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
var _ = SIGDescribe("Topology Manager Metrics [Serial][Feature:TopologyManager]", func() {
|
||||
var _ = SIGDescribe("Topology Manager Metrics [Serial][NodeFeature:TopologyManager]", func() {
|
||||
f := framework.NewDefaultFramework("topologymanager-metrics")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
|
||||
|
@ -199,8 +199,6 @@ func configureTopologyManagerInKubelet(oldCfg *kubeletconfig.KubeletConfiguratio
|
||||
newCfg.FeatureGates = make(map[string]bool)
|
||||
}
|
||||
|
||||
newCfg.FeatureGates["TopologyManager"] = true
|
||||
|
||||
// Set the Topology Manager policy
|
||||
newCfg.TopologyManagerPolicy = policy
|
||||
|
||||
@ -946,7 +944,7 @@ func hostPrecheck() (int, int) {
|
||||
}
|
||||
|
||||
// Serial because the test updates kubelet configuration.
|
||||
var _ = SIGDescribe("Topology Manager [Serial] [Feature:TopologyManager][NodeFeature:TopologyManager]", func() {
|
||||
var _ = SIGDescribe("Topology Manager [Serial] [NodeFeature:TopologyManager]", func() {
|
||||
f := framework.NewDefaultFramework("topology-manager-test")
|
||||
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user