diff --git a/pkg/kubelet/eviction/BUILD b/pkg/kubelet/eviction/BUILD index 65617ac5790..06b695d89a4 100644 --- a/pkg/kubelet/eviction/BUILD +++ b/pkg/kubelet/eviction/BUILD @@ -54,7 +54,6 @@ go_test( "//pkg/api/v1:go_default_library", "//pkg/kubelet/api/v1alpha1/stats:go_default_library", "//pkg/kubelet/lifecycle:go_default_library", - "//pkg/kubelet/types:go_default_library", "//pkg/quota:go_default_library", "//vendor:k8s.io/apimachinery/pkg/api/resource", "//vendor:k8s.io/apimachinery/pkg/apis/meta/v1", diff --git a/pkg/kubelet/eviction/eviction_manager.go b/pkg/kubelet/eviction/eviction_manager.go index 8d55989faa3..4726df19fa7 100644 --- a/pkg/kubelet/eviction/eviction_manager.go +++ b/pkg/kubelet/eviction/eviction_manager.go @@ -33,7 +33,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/lifecycle" "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/kubelet/server/stats" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" ) @@ -110,7 +109,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd // the node has memory pressure, admit if not best-effort if hasNodeCondition(m.nodeConditions, v1.NodeMemoryPressure) { notBestEffort := v1.PodQOSBestEffort != qos.GetPodQOS(attrs.Pod) - if notBestEffort || kubetypes.IsCriticalPod(attrs.Pod) { + if notBestEffort { return lifecycle.PodAdmitResult{Admit: true} } } diff --git a/pkg/kubelet/eviction/eviction_manager_test.go b/pkg/kubelet/eviction/eviction_manager_test.go index e441b4204bb..f6e74bbe077 100644 --- a/pkg/kubelet/eviction/eviction_manager_test.go +++ b/pkg/kubelet/eviction/eviction_manager_test.go @@ -28,7 +28,8 @@ import ( "k8s.io/kubernetes/pkg/api/v1" statsapi "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/lifecycle" - kubetypes "k8s.io/kubernetes/pkg/kubelet/types" + "k8s.io/kubernetes/pkg/types" + "k8s.io/kubernetes/pkg/util/clock" ) // mockPodKiller is used to testing which pod is killed @@ -212,8 +213,6 @@ func TestMemoryPressure(t *testing.T) { // create a best effort pod to test admission bestEffortPodToAdmit, _ := podMaker("best-admit", newResourceList("", ""), newResourceList("", ""), "0Gi") burstablePodToAdmit, _ := podMaker("burst-admit", newResourceList("100m", "100Mi"), newResourceList("200m", "200Mi"), "0Gi") - criticalBestEffortPodToAdmit, _ := podMaker("critical-best-admit", newResourceList("", ""), newResourceList("", ""), "0Gi") - criticalBestEffortPodToAdmit.ObjectMeta.Annotations = map[string]string{kubetypes.CriticalPodAnnotationKey: ""} // synchronize manager.synchronize(diskInfoProvider, activePodsFunc) @@ -224,8 +223,8 @@ func TestMemoryPressure(t *testing.T) { } // try to admit our pods (they should succeed) - expected := []bool{true, true, true} - for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit, criticalBestEffortPodToAdmit} { + expected := []bool{true, true} + for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) } @@ -300,10 +299,9 @@ func TestMemoryPressure(t *testing.T) { t.Errorf("Manager chose to kill pod with incorrect grace period. Expected: %d, actual: %d", 0, observedGracePeriod) } - // the best-effort pod without critical annotation should not admit, - // burstable and critical pods should - expected = []bool{false, true, true} - for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit, criticalBestEffortPodToAdmit} { + // the best-effort pod should not admit, burstable should + expected = []bool{false, true} + for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) } @@ -325,9 +323,9 @@ func TestMemoryPressure(t *testing.T) { t.Errorf("Manager chose to kill pod: %v when no pod should have been killed", podKiller.pod.Name) } - // the best-effort pod should not admit, burstable and critical pods should - expected = []bool{false, true, true} - for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit, criticalBestEffortPodToAdmit} { + // the best-effort pod should not admit, burstable should + expected = []bool{false, true} + for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) } @@ -350,8 +348,8 @@ func TestMemoryPressure(t *testing.T) { } // all pods should admit now - expected = []bool{true, true, true} - for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit, criticalBestEffortPodToAdmit} { + expected = []bool{true, true} + for i, pod := range []*v1.Pod{bestEffortPodToAdmit, burstablePodToAdmit} { if result := manager.Admit(&lifecycle.PodAdmitAttributes{Pod: pod}); expected[i] != result.Admit { t.Errorf("Admit pod: %v, expected: %v, actual: %v", pod, expected[i], result.Admit) } diff --git a/pkg/kubelet/pod/pod_manager.go b/pkg/kubelet/pod/pod_manager.go index dd069b0fa3e..39d9e3c4791 100644 --- a/pkg/kubelet/pod/pod_manager.go +++ b/pkg/kubelet/pod/pod_manager.go @@ -23,6 +23,7 @@ import ( "k8s.io/kubernetes/pkg/api/v1" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/secret" + "k8s.io/kubernetes/pkg/types" ) // Manager stores and manages access to pods, maintaining the mappings diff --git a/pkg/kubelet/types/pod_update.go b/pkg/kubelet/types/pod_update.go index c91077c1c23..b99ec9de6ae 100644 --- a/pkg/kubelet/types/pod_update.go +++ b/pkg/kubelet/types/pod_update.go @@ -28,15 +28,6 @@ const ConfigMirrorAnnotationKey = "kubernetes.io/config.mirror" const ConfigFirstSeenAnnotationKey = "kubernetes.io/config.seen" const ConfigHashAnnotationKey = "kubernetes.io/config.hash" -// This key needs to sync with the key used by the rescheduler, which currently -// lives in contrib. Its presence indicates 2 things, as far as the kubelet is -// concerned: -// 1. Resource related admission checks will prioritize the admission of -// pods bearing the key, over pods without the key, regardless of QoS. -// 2. The OOM score of pods bearing the key will be <= pods without -// the key (where the <= part is determied by QoS). -const CriticalPodAnnotationKey = "scheduler.alpha.kubernetes.io/critical-pod" - // PodOperation defines what changes will be made on a pod configuration. type PodOperation int