mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 08:17:26 +00:00
Merge pull request #33218 from dashpole/NodeInodePressure_type
Automatic merge from submit-queue Split NodeDiskPressure into NodeInodePressure and NodeDiskPressure Added NodeInodePressure as a NodeConditionType. SignalImageFsInodesFree and SignalNodeFsInodesFree signal this pressure. Also added simple pieces to the scheduler predicates so that it takes InodePressure into account.
This commit is contained in:
commit
092f9edb06
@ -2224,6 +2224,8 @@ const (
|
|||||||
NodeDiskPressure NodeConditionType = "DiskPressure"
|
NodeDiskPressure NodeConditionType = "DiskPressure"
|
||||||
// NodeNetworkUnavailable means that network for the node is not correctly configured.
|
// NodeNetworkUnavailable means that network for the node is not correctly configured.
|
||||||
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
|
NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable"
|
||||||
|
// NodeInodePressure means the kublet is under pressure due to insufficient available inodes.
|
||||||
|
NodeInodePressure NodeConditionType = "InodePressure"
|
||||||
)
|
)
|
||||||
|
|
||||||
type NodeCondition struct {
|
type NodeCondition struct {
|
||||||
|
@ -136,6 +136,13 @@ func (m *managerImpl) IsUnderDiskPressure() bool {
|
|||||||
return hasNodeCondition(m.nodeConditions, api.NodeDiskPressure)
|
return hasNodeCondition(m.nodeConditions, api.NodeDiskPressure)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsUnderDiskPressure returns true if the node is under disk pressure.
|
||||||
|
func (m *managerImpl) IsUnderInodePressure() bool {
|
||||||
|
m.RLock()
|
||||||
|
defer m.RUnlock()
|
||||||
|
return hasNodeCondition(m.nodeConditions, api.NodeInodePressure)
|
||||||
|
}
|
||||||
|
|
||||||
// synchronize is the main control loop that enforces eviction thresholds.
|
// synchronize is the main control loop that enforces eviction thresholds.
|
||||||
func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc) {
|
func (m *managerImpl) synchronize(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc) {
|
||||||
// if we have nothing to do, just return
|
// if we have nothing to do, just return
|
||||||
|
@ -916,7 +916,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDiskPressureNodeFsInodes(t *testing.T) {
|
func TestDiskPressureNodeFsInodes(t *testing.T) {
|
||||||
// TODO: we need to know inodes used when cadvisor supports per container stats
|
// TODO(dashpole): we need to know inodes used when cadvisor supports per container stats
|
||||||
podMaker := func(name string, requests api.ResourceList, limits api.ResourceList) (*api.Pod, statsapi.PodStats) {
|
podMaker := func(name string, requests api.ResourceList, limits api.ResourceList) (*api.Pod, statsapi.PodStats) {
|
||||||
pod := newPod(name, []api.Container{
|
pod := newPod(name, []api.Container{
|
||||||
newContainer(name, requests, limits),
|
newContainer(name, requests, limits),
|
||||||
@ -943,7 +943,7 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
// TODO: pass inodes used in future when supported by cadvisor.
|
// TODO(dashpole): pass inodes used in future when supported by cadvisor.
|
||||||
podsToMake := []struct {
|
podsToMake := []struct {
|
||||||
name string
|
name string
|
||||||
requests api.ResourceList
|
requests api.ResourceList
|
||||||
@ -1013,9 +1013,9 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
// synchronize
|
// synchronize
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should not have disk pressure
|
// we should not have inode pressure
|
||||||
if manager.IsUnderDiskPressure() {
|
if manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should not report disk pressure")
|
t.Errorf("Manager should not report inode pressure")
|
||||||
}
|
}
|
||||||
|
|
||||||
// try to admit our pod (should succeed)
|
// try to admit our pod (should succeed)
|
||||||
@ -1028,9 +1028,9 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
|
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should have disk pressure
|
// we should have inode pressure
|
||||||
if !manager.IsUnderDiskPressure() {
|
if !manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should report disk pressure since soft threshold was met")
|
t.Errorf("Manager should report inode pressure since soft threshold was met")
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify no pod was yet killed because there has not yet been enough time passed.
|
// verify no pod was yet killed because there has not yet been enough time passed.
|
||||||
@ -1043,9 +1043,9 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
|
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should have disk pressure
|
// we should have inode pressure
|
||||||
if !manager.IsUnderDiskPressure() {
|
if !manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should report disk pressure since soft threshold was met")
|
t.Errorf("Manager should report inode pressure since soft threshold was met")
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify the right pod was killed with the right grace period.
|
// verify the right pod was killed with the right grace period.
|
||||||
@ -1063,24 +1063,24 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
podKiller.pod = nil
|
podKiller.pod = nil
|
||||||
podKiller.gracePeriodOverride = nil
|
podKiller.gracePeriodOverride = nil
|
||||||
|
|
||||||
// remove disk pressure
|
// remove inode pressure
|
||||||
fakeClock.Step(20 * time.Minute)
|
fakeClock.Step(20 * time.Minute)
|
||||||
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should not have disk pressure
|
// we should not have inode pressure
|
||||||
if manager.IsUnderDiskPressure() {
|
if manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should not report disk pressure")
|
t.Errorf("Manager should not report inode pressure")
|
||||||
}
|
}
|
||||||
|
|
||||||
// induce disk pressure!
|
// induce inode pressure!
|
||||||
fakeClock.Step(1 * time.Minute)
|
fakeClock.Step(1 * time.Minute)
|
||||||
summaryProvider.result = summaryStatsMaker("0.5Mi", "4Mi", podStats)
|
summaryProvider.result = summaryStatsMaker("0.5Mi", "4Mi", podStats)
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should have disk pressure
|
// we should have inode pressure
|
||||||
if !manager.IsUnderDiskPressure() {
|
if !manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should report disk pressure")
|
t.Errorf("Manager should report inode pressure")
|
||||||
}
|
}
|
||||||
|
|
||||||
// check the right pod was killed
|
// check the right pod was killed
|
||||||
@ -1097,15 +1097,15 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit)
|
t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit)
|
||||||
}
|
}
|
||||||
|
|
||||||
// reduce disk pressure
|
// reduce inode pressure
|
||||||
fakeClock.Step(1 * time.Minute)
|
fakeClock.Step(1 * time.Minute)
|
||||||
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
||||||
podKiller.pod = nil // reset state
|
podKiller.pod = nil // reset state
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should have disk pressure (because transition period not yet met)
|
// we should have inode pressure (because transition period not yet met)
|
||||||
if !manager.IsUnderDiskPressure() {
|
if !manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should report disk pressure")
|
t.Errorf("Manager should report inode pressure")
|
||||||
}
|
}
|
||||||
|
|
||||||
// no pod should have been killed
|
// no pod should have been killed
|
||||||
@ -1124,9 +1124,9 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
podKiller.pod = nil // reset state
|
podKiller.pod = nil // reset state
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should not have disk pressure (because transition period met)
|
// we should not have inode pressure (because transition period met)
|
||||||
if manager.IsUnderDiskPressure() {
|
if manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should not report disk pressure")
|
t.Errorf("Manager should not report inode pressure")
|
||||||
}
|
}
|
||||||
|
|
||||||
// no pod should have been killed
|
// no pod should have been killed
|
||||||
|
@ -68,8 +68,8 @@ func init() {
|
|||||||
signalToNodeCondition[SignalMemoryAvailable] = api.NodeMemoryPressure
|
signalToNodeCondition[SignalMemoryAvailable] = api.NodeMemoryPressure
|
||||||
signalToNodeCondition[SignalImageFsAvailable] = api.NodeDiskPressure
|
signalToNodeCondition[SignalImageFsAvailable] = api.NodeDiskPressure
|
||||||
signalToNodeCondition[SignalNodeFsAvailable] = api.NodeDiskPressure
|
signalToNodeCondition[SignalNodeFsAvailable] = api.NodeDiskPressure
|
||||||
signalToNodeCondition[SignalImageFsInodesFree] = api.NodeDiskPressure
|
signalToNodeCondition[SignalImageFsInodesFree] = api.NodeInodePressure
|
||||||
signalToNodeCondition[SignalNodeFsInodesFree] = api.NodeDiskPressure
|
signalToNodeCondition[SignalNodeFsInodesFree] = api.NodeInodePressure
|
||||||
|
|
||||||
// map signals to resources (and vice-versa)
|
// map signals to resources (and vice-versa)
|
||||||
signalToResource = map[Signal]api.ResourceName{}
|
signalToResource = map[Signal]api.ResourceName{}
|
||||||
|
@ -104,6 +104,9 @@ type Manager interface {
|
|||||||
|
|
||||||
// IsUnderDiskPressure returns true if the node is under disk pressure.
|
// IsUnderDiskPressure returns true if the node is under disk pressure.
|
||||||
IsUnderDiskPressure() bool
|
IsUnderDiskPressure() bool
|
||||||
|
|
||||||
|
// IsUnderInodePressure returns true if the node is under disk pressure.
|
||||||
|
IsUnderInodePressure() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// DiskInfoProvider is responsible for informing the manager how disk is configured.
|
// DiskInfoProvider is responsible for informing the manager how disk is configured.
|
||||||
|
@ -742,6 +742,65 @@ func (kl *Kubelet) setNodeDiskPressureCondition(node *api.Node) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setNodeInodePressureCondition for the node.
|
||||||
|
// TODO: this needs to move somewhere centralized...
|
||||||
|
func (kl *Kubelet) setNodeInodePressureCondition(node *api.Node) {
|
||||||
|
currentTime := unversioned.NewTime(kl.clock.Now())
|
||||||
|
var condition *api.NodeCondition
|
||||||
|
|
||||||
|
// Check if NodeInodePressure condition already exists and if it does, just pick it up for update.
|
||||||
|
for i := range node.Status.Conditions {
|
||||||
|
if node.Status.Conditions[i].Type == api.NodeInodePressure {
|
||||||
|
condition = &node.Status.Conditions[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
newCondition := false
|
||||||
|
// If the NodeInodePressure condition doesn't exist, create one
|
||||||
|
if condition == nil {
|
||||||
|
condition = &api.NodeCondition{
|
||||||
|
Type: api.NodeInodePressure,
|
||||||
|
Status: api.ConditionUnknown,
|
||||||
|
}
|
||||||
|
// cannot be appended to node.Status.Conditions here because it gets
|
||||||
|
// copied to the slice. So if we append to the slice here none of the
|
||||||
|
// updates we make below are reflected in the slice.
|
||||||
|
newCondition = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the heartbeat time
|
||||||
|
condition.LastHeartbeatTime = currentTime
|
||||||
|
|
||||||
|
// Note: The conditions below take care of the case when a new NodeInodePressure condition is
|
||||||
|
// created and as well as the case when the condition already exists. When a new condition
|
||||||
|
// is created its status is set to api.ConditionUnknown which matches either
|
||||||
|
// condition.Status != api.ConditionTrue or
|
||||||
|
// condition.Status != api.ConditionFalse in the conditions below depending on whether
|
||||||
|
// the kubelet is under inode pressure or not.
|
||||||
|
if kl.evictionManager.IsUnderInodePressure() {
|
||||||
|
if condition.Status != api.ConditionTrue {
|
||||||
|
condition.Status = api.ConditionTrue
|
||||||
|
condition.Reason = "KubeletHasInodePressure"
|
||||||
|
condition.Message = "kubelet has inode pressure"
|
||||||
|
condition.LastTransitionTime = currentTime
|
||||||
|
kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasInodePressure")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if condition.Status != api.ConditionFalse {
|
||||||
|
condition.Status = api.ConditionFalse
|
||||||
|
condition.Reason = "KubeletHasNoInodePressure"
|
||||||
|
condition.Message = "kubelet has no inode pressure"
|
||||||
|
condition.LastTransitionTime = currentTime
|
||||||
|
kl.recordNodeStatusEvent(api.EventTypeNormal, "NodeHasNoInodePressure")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if newCondition {
|
||||||
|
node.Status.Conditions = append(node.Status.Conditions, *condition)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
// Set OODcondition for the node.
|
// Set OODcondition for the node.
|
||||||
func (kl *Kubelet) setNodeOODCondition(node *api.Node) {
|
func (kl *Kubelet) setNodeOODCondition(node *api.Node) {
|
||||||
currentTime := unversioned.NewTime(kl.clock.Now())
|
currentTime := unversioned.NewTime(kl.clock.Now())
|
||||||
@ -856,6 +915,7 @@ func (kl *Kubelet) defaultNodeStatusFuncs() []func(*api.Node) error {
|
|||||||
withoutError(kl.setNodeOODCondition),
|
withoutError(kl.setNodeOODCondition),
|
||||||
withoutError(kl.setNodeMemoryPressureCondition),
|
withoutError(kl.setNodeMemoryPressureCondition),
|
||||||
withoutError(kl.setNodeDiskPressureCondition),
|
withoutError(kl.setNodeDiskPressureCondition),
|
||||||
|
withoutError(kl.setNodeInodePressureCondition),
|
||||||
withoutError(kl.setNodeReadyCondition),
|
withoutError(kl.setNodeReadyCondition),
|
||||||
withoutError(kl.setNodeVolumesInUseStatus),
|
withoutError(kl.setNodeVolumesInUseStatus),
|
||||||
withoutError(kl.recordNodeSchedulableEvent),
|
withoutError(kl.recordNodeSchedulableEvent),
|
||||||
|
@ -149,6 +149,14 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
|||||||
LastHeartbeatTime: unversioned.Time{},
|
LastHeartbeatTime: unversioned.Time{},
|
||||||
LastTransitionTime: unversioned.Time{},
|
LastTransitionTime: unversioned.Time{},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Type: api.NodeInodePressure,
|
||||||
|
Status: api.ConditionFalse,
|
||||||
|
Reason: "KubeletHasNoInodePressure",
|
||||||
|
Message: fmt.Sprintf("kubelet has no inode pressure"),
|
||||||
|
LastHeartbeatTime: unversioned.Time{},
|
||||||
|
LastTransitionTime: unversioned.Time{},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Type: api.NodeReady,
|
Type: api.NodeReady,
|
||||||
Status: api.ConditionTrue,
|
Status: api.ConditionTrue,
|
||||||
@ -340,6 +348,14 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||||||
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||||
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Type: api.NodeInodePressure,
|
||||||
|
Status: api.ConditionFalse,
|
||||||
|
Reason: "KubeletHasSufficientInode",
|
||||||
|
Message: fmt.Sprintf("kubelet has sufficient inodes available"),
|
||||||
|
LastHeartbeatTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||||
|
LastTransitionTime: unversioned.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Type: api.NodeReady,
|
Type: api.NodeReady,
|
||||||
Status: api.ConditionTrue,
|
Status: api.ConditionTrue,
|
||||||
@ -412,6 +428,14 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
|||||||
LastHeartbeatTime: unversioned.Time{},
|
LastHeartbeatTime: unversioned.Time{},
|
||||||
LastTransitionTime: unversioned.Time{},
|
LastTransitionTime: unversioned.Time{},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Type: api.NodeInodePressure,
|
||||||
|
Status: api.ConditionFalse,
|
||||||
|
Reason: "KubeletHasSufficientInode",
|
||||||
|
Message: fmt.Sprintf("kubelet has sufficient inodes available"),
|
||||||
|
LastHeartbeatTime: unversioned.Time{},
|
||||||
|
LastTransitionTime: unversioned.Time{},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
Type: api.NodeReady,
|
Type: api.NodeReady,
|
||||||
Status: api.ConditionTrue,
|
Status: api.ConditionTrue,
|
||||||
@ -716,6 +740,14 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
|||||||
LastHeartbeatTime: unversioned.Time{},
|
LastHeartbeatTime: unversioned.Time{},
|
||||||
LastTransitionTime: unversioned.Time{},
|
LastTransitionTime: unversioned.Time{},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
Type: api.NodeInodePressure,
|
||||||
|
Status: api.ConditionFalse,
|
||||||
|
Reason: "KubeletHasNoInodePressure",
|
||||||
|
Message: fmt.Sprintf("kubelet has no inode pressure"),
|
||||||
|
LastHeartbeatTime: unversioned.Time{},
|
||||||
|
LastTransitionTime: unversioned.Time{},
|
||||||
|
},
|
||||||
{}, //placeholder
|
{}, //placeholder
|
||||||
},
|
},
|
||||||
NodeInfo: api.NodeSystemInfo{
|
NodeInfo: api.NodeSystemInfo{
|
||||||
|
@ -37,6 +37,7 @@ var (
|
|||||||
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount")
|
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount")
|
||||||
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure")
|
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure")
|
||||||
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure")
|
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure")
|
||||||
|
ErrNodeUnderInodePressure = newPredicateFailureError("NodeUnderInodePressure")
|
||||||
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error
|
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error
|
||||||
// as ErrFakePredicate.
|
// as ErrFakePredicate.
|
||||||
ErrFakePredicate = newPredicateFailureError("FakePredicateError")
|
ErrFakePredicate = newPredicateFailureError("FakePredicateError")
|
||||||
|
@ -1168,3 +1168,21 @@ func CheckNodeDiskPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *sc
|
|||||||
|
|
||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CheckNodeInodePressurePredicate checks if a pod can be scheduled on a node
|
||||||
|
// reporting inode pressure condition.
|
||||||
|
func CheckNodeInodePressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
|
node := nodeInfo.Node()
|
||||||
|
if node == nil {
|
||||||
|
return false, nil, fmt.Errorf("node not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
// is node under presure?
|
||||||
|
for _, cond := range node.Status.Conditions {
|
||||||
|
if cond.Type == api.NodeInodePressure && cond.Status == api.ConditionTrue {
|
||||||
|
return false, []algorithm.PredicateFailureReason{ErrNodeUnderInodePressure}, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil, nil
|
||||||
|
}
|
||||||
|
@ -3028,3 +3028,75 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestPodSchedulesOnNodeWithInodePressureCondition(t *testing.T) {
|
||||||
|
pod := &api.Pod{
|
||||||
|
Spec: api.PodSpec{
|
||||||
|
Containers: []api.Container{
|
||||||
|
{
|
||||||
|
Name: "container",
|
||||||
|
Image: "image",
|
||||||
|
ImagePullPolicy: "Always",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// specify a node with no inode pressure condition on
|
||||||
|
noPressureNode := &api.Node{
|
||||||
|
Status: api.NodeStatus{
|
||||||
|
Conditions: []api.NodeCondition{
|
||||||
|
{
|
||||||
|
Type: api.NodeReady,
|
||||||
|
Status: api.ConditionTrue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// specify a node with pressure condition on
|
||||||
|
pressureNode := &api.Node{
|
||||||
|
Status: api.NodeStatus{
|
||||||
|
Conditions: []api.NodeCondition{
|
||||||
|
{
|
||||||
|
Type: api.NodeInodePressure,
|
||||||
|
Status: api.ConditionTrue,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
pod *api.Pod
|
||||||
|
nodeInfo *schedulercache.NodeInfo
|
||||||
|
fits bool
|
||||||
|
name string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
pod: pod,
|
||||||
|
nodeInfo: makeEmptyNodeInfo(noPressureNode),
|
||||||
|
fits: true,
|
||||||
|
name: "pod schedulable on node without inode pressure condition on",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
pod: pod,
|
||||||
|
nodeInfo: makeEmptyNodeInfo(pressureNode),
|
||||||
|
fits: false,
|
||||||
|
name: "pod not schedulable on node with inode pressure condition on",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderInodePressure}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
fits, reasons, err := CheckNodeInodePressurePredicate(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("%s: unexpected error: %v", test.name, err)
|
||||||
|
}
|
||||||
|
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
|
||||||
|
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.name, reasons, expectedFailureReasons)
|
||||||
|
}
|
||||||
|
if fits != test.fits {
|
||||||
|
t.Errorf("%s: expected %v got %v", test.name, test.fits, fits)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -306,6 +306,76 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
|
// Do not change this JSON after the corresponding release has been tagged.
|
||||||
|
// A failure indicates backwards compatibility with the specified release was broken.
|
||||||
|
"1.5": {
|
||||||
|
JSON: `{
|
||||||
|
"kind": "Policy",
|
||||||
|
"apiVersion": "v1",
|
||||||
|
"predicates": [
|
||||||
|
{"name": "MatchNodeSelector"},
|
||||||
|
{"name": "PodFitsResources"},
|
||||||
|
{"name": "PodFitsHostPorts"},
|
||||||
|
{"name": "HostName"},
|
||||||
|
{"name": "NoDiskConflict"},
|
||||||
|
{"name": "NoVolumeZoneConflict"},
|
||||||
|
{"name": "PodToleratesNodeTaints"},
|
||||||
|
{"name": "CheckNodeMemoryPressure"},
|
||||||
|
{"name": "CheckNodeDiskPressure"},
|
||||||
|
{"name": "CheckNodeInodePressure"},
|
||||||
|
{"name": "MaxEBSVolumeCount"},
|
||||||
|
{"name": "MaxGCEPDVolumeCount"},
|
||||||
|
{"name": "MatchInterPodAffinity"},
|
||||||
|
{"name": "GeneralPredicates"},
|
||||||
|
{"name": "TestServiceAffinity", "argument": {"serviceAffinity" : {"labels" : ["region"]}}},
|
||||||
|
{"name": "TestLabelsPresence", "argument": {"labelsPresence" : {"labels" : ["foo"], "presence":true}}}
|
||||||
|
],"priorities": [
|
||||||
|
{"name": "EqualPriority", "weight": 2},
|
||||||
|
{"name": "ImageLocalityPriority", "weight": 2},
|
||||||
|
{"name": "LeastRequestedPriority", "weight": 2},
|
||||||
|
{"name": "BalancedResourceAllocation", "weight": 2},
|
||||||
|
{"name": "SelectorSpreadPriority", "weight": 2},
|
||||||
|
{"name": "NodePreferAvoidPodsPriority", "weight": 2},
|
||||||
|
{"name": "NodeAffinityPriority", "weight": 2},
|
||||||
|
{"name": "TaintTolerationPriority", "weight": 2},
|
||||||
|
{"name": "InterPodAffinityPriority", "weight": 2},
|
||||||
|
{"name": "MostRequestedPriority", "weight": 2}
|
||||||
|
]
|
||||||
|
}`,
|
||||||
|
ExpectedPolicy: schedulerapi.Policy{
|
||||||
|
Predicates: []schedulerapi.PredicatePolicy{
|
||||||
|
{Name: "MatchNodeSelector"},
|
||||||
|
{Name: "PodFitsResources"},
|
||||||
|
{Name: "PodFitsHostPorts"},
|
||||||
|
{Name: "HostName"},
|
||||||
|
{Name: "NoDiskConflict"},
|
||||||
|
{Name: "NoVolumeZoneConflict"},
|
||||||
|
{Name: "PodToleratesNodeTaints"},
|
||||||
|
{Name: "CheckNodeMemoryPressure"},
|
||||||
|
{Name: "CheckNodeDiskPressure"},
|
||||||
|
{Name: "CheckNodeInodePressure"},
|
||||||
|
{Name: "MaxEBSVolumeCount"},
|
||||||
|
{Name: "MaxGCEPDVolumeCount"},
|
||||||
|
{Name: "MatchInterPodAffinity"},
|
||||||
|
{Name: "GeneralPredicates"},
|
||||||
|
{Name: "TestServiceAffinity", Argument: &schedulerapi.PredicateArgument{ServiceAffinity: &schedulerapi.ServiceAffinity{Labels: []string{"region"}}}},
|
||||||
|
{Name: "TestLabelsPresence", Argument: &schedulerapi.PredicateArgument{LabelsPresence: &schedulerapi.LabelsPresence{Labels: []string{"foo"}, Presence: true}}},
|
||||||
|
},
|
||||||
|
Priorities: []schedulerapi.PriorityPolicy{
|
||||||
|
{Name: "EqualPriority", Weight: 2},
|
||||||
|
{Name: "ImageLocalityPriority", Weight: 2},
|
||||||
|
{Name: "LeastRequestedPriority", Weight: 2},
|
||||||
|
{Name: "BalancedResourceAllocation", Weight: 2},
|
||||||
|
{Name: "SelectorSpreadPriority", Weight: 2},
|
||||||
|
{Name: "NodePreferAvoidPodsPriority", Weight: 2},
|
||||||
|
{Name: "NodeAffinityPriority", Weight: 2},
|
||||||
|
{Name: "TaintTolerationPriority", Weight: 2},
|
||||||
|
{Name: "InterPodAffinityPriority", Weight: 2},
|
||||||
|
{Name: "MostRequestedPriority", Weight: 2},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
registeredPredicates := sets.NewString(factory.ListRegisteredFitPredicates()...)
|
registeredPredicates := sets.NewString(factory.ListRegisteredFitPredicates()...)
|
||||||
|
@ -158,6 +158,9 @@ func defaultPredicates() sets.String {
|
|||||||
// Fit is determined by node disk pressure condition.
|
// Fit is determined by node disk pressure condition.
|
||||||
factory.RegisterFitPredicate("CheckNodeDiskPressure", predicates.CheckNodeDiskPressurePredicate),
|
factory.RegisterFitPredicate("CheckNodeDiskPressure", predicates.CheckNodeDiskPressurePredicate),
|
||||||
|
|
||||||
|
// Fit is determined by node inode pressure condition.
|
||||||
|
factory.RegisterFitPredicate("CheckNodeInodePressure", predicates.CheckNodeInodePressurePredicate),
|
||||||
|
|
||||||
// Fit is determined by inter-pod affinity.
|
// Fit is determined by inter-pod affinity.
|
||||||
factory.RegisterFitPredicateFactory(
|
factory.RegisterFitPredicateFactory(
|
||||||
"MatchInterPodAffinity",
|
"MatchInterPodAffinity",
|
||||||
|
Loading…
Reference in New Issue
Block a user