Merge pull request #36190 from dashpole/revert_node_inode_pressure_split

Automatic merge from submit-queue

We only report diskpressure to users, and no longer report inodepressure

See #36180 for more information on why #33218 was reverted.
This commit is contained in:
Kubernetes Submit Queue 2016-11-06 03:00:34 -08:00 committed by GitHub
commit 4b1e36f970
12 changed files with 21 additions and 288 deletions

View File

@ -2578,8 +2578,6 @@ const (
NodeDiskPressure NodeConditionType = "DiskPressure"
// NodeNetworkUnavailable means that network for the node is not correctly configured.
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
// NodeInodePressure means the kublet is under pressure due to insufficient available inodes.
NodeInodePressure NodeConditionType = "InodePressure"
)
type NodeCondition struct {

View File

@ -17,6 +17,7 @@ limitations under the License.
package eviction
import (
"fmt"
"sort"
"sync"
"time"
@ -113,7 +114,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
return lifecycle.PodAdmitResult{
Admit: false,
Reason: reason,
Message: message,
Message: fmt.Sprintf(message, m.nodeConditions),
}
}
@ -138,13 +139,6 @@ func (m *managerImpl) IsUnderDiskPressure() bool {
return hasNodeCondition(m.nodeConditions, api.NodeDiskPressure)
}
// IsUnderDiskPressure returns true if the node is under disk pressure.
func (m *managerImpl) IsUnderInodePressure() bool {
m.RLock()
defer m.RUnlock()
return hasNodeCondition(m.nodeConditions, api.NodeInodePressure)
}
// synchronize is the main control loop that enforces eviction thresholds.
func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc) {
// if we have nothing to do, just return
@ -261,11 +255,11 @@ func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc Act
pod := activePods[i]
status := api.PodStatus{
Phase: api.PodFailed,
Message: message,
Message: fmt.Sprintf(message, resourceToReclaim),
Reason: reason,
}
// record that we are evicting the pod
m.recorder.Eventf(pod, api.EventTypeWarning, reason, message)
m.recorder.Eventf(pod, api.EventTypeWarning, reason, fmt.Sprintf(message, resourceToReclaim))
gracePeriodOverride := int64(0)
if softEviction {
gracePeriodOverride = m.config.MaxPodGracePeriodSeconds

View File

@ -1023,8 +1023,8 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
// synchronize
manager.synchronize(diskInfoProvider, activePodsFunc)
// we should not have inode pressure
if manager.IsUnderInodePressure() {
// we should not have disk pressure
if manager.IsUnderDiskPressure() {
t.Errorf("Manager should not report inode pressure")
}
@ -1038,8 +1038,8 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
manager.synchronize(diskInfoProvider, activePodsFunc)
// we should have inode pressure
if !manager.IsUnderInodePressure() {
// we should have disk pressure
if !manager.IsUnderDiskPressure() {
t.Errorf("Manager should report inode pressure since soft threshold was met")
}
@ -1053,8 +1053,8 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
manager.synchronize(diskInfoProvider, activePodsFunc)
// we should have inode pressure
if !manager.IsUnderInodePressure() {
// we should have disk pressure
if !manager.IsUnderDiskPressure() {
t.Errorf("Manager should report inode pressure since soft threshold was met")
}
@ -1078,8 +1078,8 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
manager.synchronize(diskInfoProvider, activePodsFunc)
// we should not have inode pressure
if manager.IsUnderInodePressure() {
// we should not have disk pressure
if manager.IsUnderDiskPressure() {
t.Errorf("Manager should not report inode pressure")
}
@ -1088,8 +1088,8 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
summaryProvider.result = summaryStatsMaker("0.5Mi", "4Mi", podStats)
manager.synchronize(diskInfoProvider, activePodsFunc)
// we should have inode pressure
if !manager.IsUnderInodePressure() {
// we should have disk pressure
if !manager.IsUnderDiskPressure() {
t.Errorf("Manager should report inode pressure")
}
@ -1113,8 +1113,8 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
podKiller.pod = nil // reset state
manager.synchronize(diskInfoProvider, activePodsFunc)
// we should have inode pressure (because transition period not yet met)
if !manager.IsUnderInodePressure() {
// we should have disk pressure (because transition period not yet met)
if !manager.IsUnderDiskPressure() {
t.Errorf("Manager should report inode pressure")
}
@ -1134,8 +1134,8 @@ func TestInodePressureNodeFsInodes(t *testing.T) {
podKiller.pod = nil // reset state
manager.synchronize(diskInfoProvider, activePodsFunc)
// we should not have inode pressure (because transition period met)
if manager.IsUnderInodePressure() {
// we should not have disk pressure (because transition period met)
if manager.IsUnderDiskPressure() {
t.Errorf("Manager should not report inode pressure")
}

View File

@ -38,7 +38,7 @@ const (
// the reason reported back in status.
reason = "Evicted"
// the message associated with the reason.
message = "The node was low on compute resources."
message = "The node was low on resource: %v."
// disk, in bytes. internal to this module, used to account for local disk usage.
resourceDisk api.ResourceName = "disk"
// inodes, number. internal to this module, used to account for local disk inode consumption.
@ -68,8 +68,8 @@ func init() {
signalToNodeCondition[SignalMemoryAvailable] = api.NodeMemoryPressure
signalToNodeCondition[SignalImageFsAvailable] = api.NodeDiskPressure
signalToNodeCondition[SignalNodeFsAvailable] = api.NodeDiskPressure
signalToNodeCondition[SignalImageFsInodesFree] = api.NodeInodePressure
signalToNodeCondition[SignalNodeFsInodesFree] = api.NodeInodePressure
signalToNodeCondition[SignalImageFsInodesFree] = api.NodeDiskPressure
signalToNodeCondition[SignalNodeFsInodesFree] = api.NodeDiskPressure
// map signals to resources (and vice-versa)
signalToResource = map[Signal]api.ResourceName{}

View File

@ -105,9 +105,6 @@ type Manager interface {
// IsUnderDiskPressure returns true if the node is under disk pressure.
IsUnderDiskPressure() bool
// IsUnderInodePressure returns true if the node is under disk pressure.
IsUnderInodePressure() bool
}
// DiskInfoProvider is responsible for informing the manager how disk is configured.

View File

@ -766,65 +766,6 @@ func (kl *Kubelet) setNodeDiskPressureCondition(node *api.Node) {
}
}
// setNodeInodePressureCondition for the node.
// TODO: this needs to move somewhere centralized...
func (kl *Kubelet) setNodeInodePressureCondition(node *api.Node) {
currentTime := unversioned.NewTime(kl.clock.Now())
var condition *api.NodeCondition
// Check if NodeInodePressure condition already exists and if it does, just pick it up for update.
for i := range node.Status.Conditions {
if node.Status.Conditions[i].Type == api.NodeInodePressure {
condition = &node.Status.Conditions[i]
}
}
newCondition := false
// If the NodeInodePressure condition doesn't exist, create one
if condition == nil {
condition = &api.NodeCondition{
Type: api.NodeInodePressure,
Status: api.ConditionUnknown,
}
// cannot be appended to node.Status.Conditions here because it gets
// copied to the slice. So if we append to the slice here none of the
// updates we make below are reflected in the slice.
newCondition = true
}
// Update the heartbeat time
condition.LastHeartbeatTime = currentTime
// Note: The conditions below take care of the case when a new NodeInodePressure condition is
// created and as well as the case when the condition already exists. When a new condition
// is created its status is set to api.ConditionUnknown which matches either
// condition.Status != api.ConditionTrue or
// condition.Status != api.ConditionFalse in the conditions below depending on whether
// the kubelet is under inode pressure or not.
if kl.evictionManager.IsUnderInodePressure() {
if condition.Status != api.ConditionTrue {
condition.Status = api.ConditionTrue
condition.Reason = "KubeletHasInodePressure"
condition.Message = "kubelet has inode pressure"
condition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasInodePressure")
}
} else {
if condition.Status != api.ConditionFalse {
condition.Status = api.ConditionFalse
condition.Reason = "KubeletHasNoInodePressure"
condition.Message = "kubelet has no inode pressure"
condition.LastTransitionTime = currentTime
kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasNoInodePressure")
}
}
if newCondition {
node.Status.Conditions = append(node.Status.Conditions, *condition)
}
}
// Set OODcondition for the node.
func (kl *Kubelet) setNodeOODCondition(node *api.Node) {
currentTime := unversioned.NewTime(kl.clock.Now())
@ -943,7 +884,6 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*api.Node) error {
withoutError(kl.setNodeOODCondition),
withoutError(kl.setNodeMemoryPressureCondition),
withoutError(kl.setNodeDiskPressureCondition),
withoutError(kl.setNodeInodePressureCondition),
withoutError(kl.setNodeReadyCondition),
withoutError(kl.setNodeVolumesInUseStatus),
withoutError(kl.recordNodeSchedulableEvent),

View File

@ -149,14 +149,6 @@ func TestUpdateNewNodeStatus(t *testing.T) {
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeInodePressure,
Status: api.ConditionFalse,
Reason: "KubeletHasNoInodePressure",
Message: fmt.Sprintf("kubelet has no inode pressure"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
@ -349,14 +341,6 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: api.NodeInodePressure,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientInode",
Message: fmt.Sprintf("kubelet has sufficient inodes available"),
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
@ -429,14 +413,6 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeInodePressure,
Status: api.ConditionFalse,
Reason: "KubeletHasSufficientInode",
Message: fmt.Sprintf("kubelet has sufficient inodes available"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeReady,
Status: api.ConditionTrue,
@ -742,14 +718,6 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{
Type: api.NodeInodePressure,
Status: api.ConditionFalse,
Reason: "KubeletHasNoInodePressure",
Message: fmt.Sprintf("kubelet has no inode pressure"),
LastHeartbeatTime: unversioned.Time{},
LastTransitionTime: unversioned.Time{},
},
{}, //placeholder
},
NodeInfo: api.NodeSystemInfo{

View File

@ -37,7 +37,6 @@ var (
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount")
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure")
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure")
ErrNodeUnderInodePressure = newPredicateFailureError("NodeUnderInodePressure")
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error
// as ErrFakePredicate.
ErrFakePredicate = newPredicateFailureError("FakePredicateError")

View File

@ -1219,21 +1219,3 @@ func CheckNodeDiskPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *sc
return true, nil, nil
}
// CheckNodeInodePressurePredicate checks if a pod can be scheduled on a node
// reporting inode pressure condition.
func CheckNodeInodePressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
node := nodeInfo.Node()
if node == nil {
return false, nil, fmt.Errorf("node not found")
}
// is node under presure?
for _, cond := range node.Status.Conditions {
if cond.Type == api.NodeInodePressure && cond.Status == api.ConditionTrue {
return false, []algorithm.PredicateFailureReason{ErrNodeUnderInodePressure}, nil
}
}
return true, nil, nil
}

View File

@ -3142,75 +3142,3 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) {
}
}
}
func TestPodSchedulesOnNodeWithInodePressureCondition(t *testing.T) {
pod := &api.Pod{
Spec: api.PodSpec{
Containers: []api.Container{
{
Name: "container",
Image: "image",
ImagePullPolicy: "Always",
},
},
},
}
// specify a node with no inode pressure condition on
noPressureNode := &api.Node{
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeReady,
Status: api.ConditionTrue,
},
},
},
}
// specify a node with pressure condition on
pressureNode := &api.Node{
Status: api.NodeStatus{
Conditions: []api.NodeCondition{
{
Type: api.NodeInodePressure,
Status: api.ConditionTrue,
},
},
},
}
tests := []struct {
pod *api.Pod
nodeInfo *schedulercache.NodeInfo
fits bool
name string
}{
{
pod: pod,
nodeInfo: makeEmptyNodeInfo(noPressureNode),
fits: true,
name: "pod schedulable on node without inode pressure condition on",
},
{
pod: pod,
nodeInfo: makeEmptyNodeInfo(pressureNode),
fits: false,
name: "pod not schedulable on node with inode pressure condition on",
},
}
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderInodePressure}
for _, test := range tests {
fits, reasons, err := CheckNodeInodePressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
if err != nil {
t.Errorf("%s: unexpected error: %v", test.name, err)
}
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.name, reasons, expectedFailureReasons)
}
if fits != test.fits {
t.Errorf("%s: expected %v got %v", test.name, test.fits, fits)
}
}
}

View File

@ -306,76 +306,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
},
},
},
// Do not change this JSON after the corresponding release has been tagged.
// A failure indicates backwards compatibility with the specified release was broken.
"1.5": {
JSON: `{
"kind": "Policy",
"apiVersion": "v1",
"predicates": [
{"name": "MatchNodeSelector"},
{"name": "PodFitsResources"},
{"name": "PodFitsHostPorts"},
{"name": "HostName"},
{"name": "NoDiskConflict"},
{"name": "NoVolumeZoneConflict"},
{"name": "PodToleratesNodeTaints"},
{"name": "CheckNodeMemoryPressure"},
{"name": "CheckNodeDiskPressure"},
{"name": "CheckNodeInodePressure"},
{"name": "MaxEBSVolumeCount"},
{"name": "MaxGCEPDVolumeCount"},
{"name": "MatchInterPodAffinity"},
{"name": "GeneralPredicates"},
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
],"priorities": [
{"name": "EqualPriority", "weight": 2},
{"name": "ImageLocalityPriority", "weight": 2},
{"name": "LeastRequestedPriority", "weight": 2},
{"name": "BalancedResourceAllocation", "weight": 2},
{"name": "SelectorSpreadPriority", "weight": 2},
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
{"name": "NodeAffinityPriority", "weight": 2},
{"name": "TaintTolerationPriority", "weight": 2},
{"name": "InterPodAffinityPriority", "weight": 2},
{"name": "MostRequestedPriority", "weight": 2}
]
}`,
ExpectedPolicy: schedulerapi.Policy{
Predicates: []schedulerapi.PredicatePolicy{
{Name: "MatchNodeSelector"},
{Name: "PodFitsResources"},
{Name: "PodFitsHostPorts"},
{Name: "HostName"},
{Name: "NoDiskConflict"},
{Name: "NoVolumeZoneConflict"},
{Name: "PodToleratesNodeTaints"},
{Name: "CheckNodeMemoryPressure"},
{Name: "CheckNodeDiskPressure"},
{Name: "CheckNodeInodePressure"},
{Name: "MaxEBSVolumeCount"},
{Name: "MaxGCEPDVolumeCount"},
{Name: "MatchInterPodAffinity"},
{Name: "GeneralPredicates"},
{Name: "TestServiceAffinity", Argument: &schedulerapi.PredicateArgument{ServiceAffinity: &schedulerapi.ServiceAffinity{Labels: []string{"region"}}}},
{Name: "TestLabelsPresence", Argument: &schedulerapi.PredicateArgument{LabelsPresence: &schedulerapi.LabelsPresence{Labels: []string{"foo"}, Presence: true}}},
},
Priorities: []schedulerapi.PriorityPolicy{
{Name: "EqualPriority", Weight: 2},
{Name: "ImageLocalityPriority", Weight: 2},
{Name: "LeastRequestedPriority", Weight: 2},
{Name: "BalancedResourceAllocation", Weight: 2},
{Name: "SelectorSpreadPriority", Weight: 2},
{Name: "NodePreferAvoidPodsPriority", Weight: 2},
{Name: "NodeAffinityPriority", Weight: 2},
{Name: "TaintTolerationPriority", Weight: 2},
{Name: "InterPodAffinityPriority", Weight: 2},
{Name: "MostRequestedPriority", Weight: 2},
},
},
},
}
registeredPredicates := sets.NewString(factory.ListRegisteredFitPredicates()...)

View File

@ -158,9 +158,6 @@ func defaultPredicates() sets.String {
// Fit is determined by node disk pressure condition.
factory.RegisterFitPredicate("CheckNodeDiskPressure", predicates.CheckNodeDiskPressurePredicate),
// Fit is determined by node inode pressure condition.
factory.RegisterFitPredicate("CheckNodeInodePressure", predicates.CheckNodeInodePressurePredicate),
)
}