Merge pull request #87650 from nolancon/beta-feature-gate

Update TopologyManager Feature Gate
This commit is contained in:
Kubernetes Prow Robot 2020-03-05 20:03:04 -08:00 committed by GitHub
commit 48541a0b16
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 11 additions and 7 deletions

View File

@ -140,6 +140,7 @@ const (
// owner: @lmdaly // owner: @lmdaly
// alpha: v1.16 // alpha: v1.16
// beta: v1.18
// //
// Enable resource managers to make NUMA aligned decisions // Enable resource managers to make NUMA aligned decisions
TopologyManager featuregate.Feature = "TopologyManager" TopologyManager featuregate.Feature = "TopologyManager"
@ -598,7 +599,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
AttachVolumeLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19 AttachVolumeLimit: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.19
CPUManager: {Default: true, PreRelease: featuregate.Beta}, CPUManager: {Default: true, PreRelease: featuregate.Beta},
CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha}, CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha},
TopologyManager: {Default: false, PreRelease: featuregate.Alpha}, TopologyManager: {Default: true, PreRelease: featuregate.Beta},
ServiceNodeExclusion: {Default: false, PreRelease: featuregate.Alpha}, ServiceNodeExclusion: {Default: false, PreRelease: featuregate.Alpha},
NodeDisruptionExclusion: {Default: false, PreRelease: featuregate.Alpha}, NodeDisruptionExclusion: {Default: false, PreRelease: featuregate.Alpha},
CSIDriverRegistry: {Default: true, PreRelease: featuregate.Beta}, CSIDriverRegistry: {Default: true, PreRelease: featuregate.Beta},

View File

@ -53,7 +53,6 @@ func TestValidateKubeletConfiguration(t *testing.T) {
HairpinMode: kubeletconfig.PromiscuousBridge, HairpinMode: kubeletconfig.PromiscuousBridge,
NodeLeaseDurationSeconds: 1, NodeLeaseDurationSeconds: 1,
CPUCFSQuotaPeriod: metav1.Duration{Duration: 100 * time.Millisecond}, CPUCFSQuotaPeriod: metav1.Duration{Duration: 100 * time.Millisecond},
TopologyManagerPolicy: "none",
} }
if allErrors := ValidateKubeletConfiguration(successCase1); allErrors != nil { if allErrors := ValidateKubeletConfiguration(successCase1); allErrors != nil {
t.Errorf("expect no errors, got %v", allErrors) t.Errorf("expect no errors, got %v", allErrors)
@ -86,7 +85,6 @@ func TestValidateKubeletConfiguration(t *testing.T) {
HairpinMode: kubeletconfig.PromiscuousBridge, HairpinMode: kubeletconfig.PromiscuousBridge,
NodeLeaseDurationSeconds: 1, NodeLeaseDurationSeconds: 1,
CPUCFSQuotaPeriod: metav1.Duration{Duration: 100 * time.Millisecond}, CPUCFSQuotaPeriod: metav1.Duration{Duration: 100 * time.Millisecond},
TopologyManagerPolicy: "none",
ReservedSystemCPUs: "0-3", ReservedSystemCPUs: "0-3",
} }
if allErrors := ValidateKubeletConfiguration(successCase2); allErrors != nil { if allErrors := ValidateKubeletConfiguration(successCase2); allErrors != nil {
@ -118,9 +116,8 @@ func TestValidateKubeletConfiguration(t *testing.T) {
HairpinMode: "foo", HairpinMode: "foo",
NodeLeaseDurationSeconds: -1, NodeLeaseDurationSeconds: -1,
CPUCFSQuotaPeriod: metav1.Duration{Duration: 0}, CPUCFSQuotaPeriod: metav1.Duration{Duration: 0},
TopologyManagerPolicy: "",
} }
const numErrsErrorCase1 = 26 const numErrsErrorCase1 = 25
if allErrors := ValidateKubeletConfiguration(errorCase1); len(allErrors.(utilerrors.Aggregate).Errors()) != numErrsErrorCase1 { if allErrors := ValidateKubeletConfiguration(errorCase1); len(allErrors.(utilerrors.Aggregate).Errors()) != numErrsErrorCase1 {
t.Errorf("expect %d errors, got %v", numErrsErrorCase1, len(allErrors.(utilerrors.Aggregate).Errors())) t.Errorf("expect %d errors, got %v", numErrsErrorCase1, len(allErrors.(utilerrors.Aggregate).Errors()))
} }
@ -152,7 +149,6 @@ func TestValidateKubeletConfiguration(t *testing.T) {
HairpinMode: kubeletconfig.PromiscuousBridge, HairpinMode: kubeletconfig.PromiscuousBridge,
NodeLeaseDurationSeconds: 1, NodeLeaseDurationSeconds: 1,
CPUCFSQuotaPeriod: metav1.Duration{Duration: 100 * time.Millisecond}, CPUCFSQuotaPeriod: metav1.Duration{Duration: 100 * time.Millisecond},
TopologyManagerPolicy: "none",
ReservedSystemCPUs: "0-3", ReservedSystemCPUs: "0-3",
} }
const numErrsErrorCase2 = 1 const numErrsErrorCase2 = 1

View File

@ -18,6 +18,7 @@ package topologymanager
import ( import (
"fmt" "fmt"
"sync"
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/klog" "k8s.io/klog"
@ -54,6 +55,7 @@ type Manager interface {
} }
type manager struct { type manager struct {
mutex sync.Mutex
//The list of components registered with the Manager //The list of components registered with the Manager
hintProviders []HintProvider hintProviders []HintProvider
//Mapping of a Pods mapping of Containers and their TopologyHints //Mapping of a Pods mapping of Containers and their TopologyHints
@ -203,13 +205,18 @@ func (m *manager) AddHintProvider(h HintProvider) {
} }
func (m *manager) AddContainer(pod *v1.Pod, containerID string) error { func (m *manager) AddContainer(pod *v1.Pod, containerID string) error {
m.mutex.Lock()
defer m.mutex.Unlock()
m.podMap[containerID] = string(pod.UID) m.podMap[containerID] = string(pod.UID)
return nil return nil
} }
func (m *manager) RemoveContainer(containerID string) error { func (m *manager) RemoveContainer(containerID string) error {
klog.Infof("[topologymanager] RemoveContainer - Container ID: %v", containerID) m.mutex.Lock()
defer m.mutex.Unlock()
klog.Infof("[topologymanager] RemoveContainer - Container ID: %v", containerID)
podUIDString := m.podMap[containerID] podUIDString := m.podMap[containerID]
delete(m.podMap, containerID) delete(m.podMap, containerID)
if _, exists := m.podTopologyHints[podUIDString]; exists { if _, exists := m.podTopologyHints[podUIDString]; exists {