mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-09-04 18:52:38 +00:00
addressed comments
This commit is contained in:
@@ -916,7 +916,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestDiskPressureNodeFsInodes(t *testing.T) {
|
func TestDiskPressureNodeFsInodes(t *testing.T) {
|
||||||
// TODO: we need to know inodes used when cadvisor supports per container stats
|
// TODO(dashpole): we need to know inodes used when cadvisor supports per container stats
|
||||||
podMaker := func(name string, requests api.ResourceList, limits api.ResourceList) (*api.Pod, statsapi.PodStats) {
|
podMaker := func(name string, requests api.ResourceList, limits api.ResourceList) (*api.Pod, statsapi.PodStats) {
|
||||||
pod := newPod(name, []api.Container{
|
pod := newPod(name, []api.Container{
|
||||||
newContainer(name, requests, limits),
|
newContainer(name, requests, limits),
|
||||||
@@ -943,7 +943,7 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
// TODO: pass inodes used in future when supported by cadvisor.
|
// TODO(dashpole): pass inodes used in future when supported by cadvisor.
|
||||||
podsToMake := []struct {
|
podsToMake := []struct {
|
||||||
name string
|
name string
|
||||||
requests api.ResourceList
|
requests api.ResourceList
|
||||||
@@ -1013,9 +1013,9 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
// synchronize
|
// synchronize
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should not have disk pressure
|
// we should not have inode pressure
|
||||||
if manager.IsUnderInodePressure() {
|
if manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should not report disk pressure")
|
t.Errorf("Manager should not report inode pressure")
|
||||||
}
|
}
|
||||||
|
|
||||||
// try to admit our pod (should succeed)
|
// try to admit our pod (should succeed)
|
||||||
@@ -1028,9 +1028,9 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
|
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should have disk pressure
|
// we should have inode pressure
|
||||||
if !manager.IsUnderInodePressure() {
|
if !manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should report disk pressure since soft threshold was met")
|
t.Errorf("Manager should report inode pressure since soft threshold was met")
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify no pod was yet killed because there has not yet been enough time passed.
|
// verify no pod was yet killed because there has not yet been enough time passed.
|
||||||
@@ -1043,9 +1043,9 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
|
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should have disk pressure
|
// we should have inode pressure
|
||||||
if !manager.IsUnderInodePressure() {
|
if !manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should report disk pressure since soft threshold was met")
|
t.Errorf("Manager should report inode pressure since soft threshold was met")
|
||||||
}
|
}
|
||||||
|
|
||||||
// verify the right pod was killed with the right grace period.
|
// verify the right pod was killed with the right grace period.
|
||||||
@@ -1063,24 +1063,24 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
podKiller.pod = nil
|
podKiller.pod = nil
|
||||||
podKiller.gracePeriodOverride = nil
|
podKiller.gracePeriodOverride = nil
|
||||||
|
|
||||||
// remove disk pressure
|
// remove inode pressure
|
||||||
fakeClock.Step(20 * time.Minute)
|
fakeClock.Step(20 * time.Minute)
|
||||||
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should not have disk pressure
|
// we should not have inode pressure
|
||||||
if manager.IsUnderInodePressure() {
|
if manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should not report disk pressure")
|
t.Errorf("Manager should not report inode pressure")
|
||||||
}
|
}
|
||||||
|
|
||||||
// induce disk pressure!
|
// induce inode pressure!
|
||||||
fakeClock.Step(1 * time.Minute)
|
fakeClock.Step(1 * time.Minute)
|
||||||
summaryProvider.result = summaryStatsMaker("0.5Mi", "4Mi", podStats)
|
summaryProvider.result = summaryStatsMaker("0.5Mi", "4Mi", podStats)
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should have disk pressure
|
// we should have inode pressure
|
||||||
if !manager.IsUnderInodePressure() {
|
if !manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should report disk pressure")
|
t.Errorf("Manager should report inode pressure")
|
||||||
}
|
}
|
||||||
|
|
||||||
// check the right pod was killed
|
// check the right pod was killed
|
||||||
@@ -1097,15 +1097,15 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit)
|
t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit)
|
||||||
}
|
}
|
||||||
|
|
||||||
// reduce disk pressure
|
// reduce inode pressure
|
||||||
fakeClock.Step(1 * time.Minute)
|
fakeClock.Step(1 * time.Minute)
|
||||||
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
||||||
podKiller.pod = nil // reset state
|
podKiller.pod = nil // reset state
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should have disk pressure (because transition period not yet met)
|
// we should have inode pressure (because transition period not yet met)
|
||||||
if !manager.IsUnderInodePressure() {
|
if !manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should report disk pressure")
|
t.Errorf("Manager should report inode pressure")
|
||||||
}
|
}
|
||||||
|
|
||||||
// no pod should have been killed
|
// no pod should have been killed
|
||||||
@@ -1124,9 +1124,9 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
|||||||
podKiller.pod = nil // reset state
|
podKiller.pod = nil // reset state
|
||||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||||
|
|
||||||
// we should not have disk pressure (because transition period met)
|
// we should not have inode pressure (because transition period met)
|
||||||
if manager.IsUnderInodePressure() {
|
if manager.IsUnderInodePressure() {
|
||||||
t.Errorf("Manager should not report disk pressure")
|
t.Errorf("Manager should not report inode pressure")
|
||||||
}
|
}
|
||||||
|
|
||||||
// no pod should have been killed
|
// no pod should have been killed
|
||||||
|
@@ -1169,8 +1169,8 @@ func CheckNodeDiskPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *sc
|
|||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
|
// CheckNodeInodePressurePredicate checks if a pod can be scheduled on a node
|
||||||
// reporting disk pressure condition.
|
// reporting inode pressure condition.
|
||||||
func CheckNodeInodePressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func CheckNodeInodePressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
|
@@ -3047,8 +3047,8 @@ func TestPodSchedulesOnNodeWithInodePressureCondition(t *testing.T) {
|
|||||||
Status: api.NodeStatus{
|
Status: api.NodeStatus{
|
||||||
Conditions: []api.NodeCondition{
|
Conditions: []api.NodeCondition{
|
||||||
{
|
{
|
||||||
Type: "Ready",
|
Type: api.NodeReady,
|
||||||
Status: "True",
|
Status: api.ConditionTrue,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -3059,8 +3059,8 @@ func TestPodSchedulesOnNodeWithInodePressureCondition(t *testing.T) {
|
|||||||
Status: api.NodeStatus{
|
Status: api.NodeStatus{
|
||||||
Conditions: []api.NodeCondition{
|
Conditions: []api.NodeCondition{
|
||||||
{
|
{
|
||||||
Type: "InodePressure",
|
Type: api.NodeInodePressure,
|
||||||
Status: "True",
|
Status: api.ConditionTrue,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -3076,13 +3076,13 @@ func TestPodSchedulesOnNodeWithInodePressureCondition(t *testing.T) {
|
|||||||
pod: pod,
|
pod: pod,
|
||||||
nodeInfo: makeEmptyNodeInfo(noPressureNode),
|
nodeInfo: makeEmptyNodeInfo(noPressureNode),
|
||||||
fits: true,
|
fits: true,
|
||||||
name: "pod schedulable on node without pressure condition on",
|
name: "pod schedulable on node without inode pressure condition on",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: pod,
|
pod: pod,
|
||||||
nodeInfo: makeEmptyNodeInfo(pressureNode),
|
nodeInfo: makeEmptyNodeInfo(pressureNode),
|
||||||
fits: false,
|
fits: false,
|
||||||
name: "pod not schedulable on node with pressure condition on",
|
name: "pod not schedulable on node with inode pressure condition on",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderInodePressure}
|
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderInodePressure}
|
||||||
|
@@ -158,7 +158,7 @@ func defaultPredicates() sets.String {
|
|||||||
// Fit is determined by node disk pressure condition.
|
// Fit is determined by node disk pressure condition.
|
||||||
factory.RegisterFitPredicate("CheckNodeDiskPressure", predicates.CheckNodeDiskPressurePredicate),
|
factory.RegisterFitPredicate("CheckNodeDiskPressure", predicates.CheckNodeDiskPressurePredicate),
|
||||||
|
|
||||||
// Fit is determined by node disk pressure condition.
|
// Fit is determined by node inode pressure condition.
|
||||||
factory.RegisterFitPredicate("CheckNodeInodePressure", predicates.CheckNodeInodePressurePredicate),
|
factory.RegisterFitPredicate("CheckNodeInodePressure", predicates.CheckNodeInodePressurePredicate),
|
||||||
|
|
||||||
// Fit is determined by inter-pod affinity.
|
// Fit is determined by inter-pod affinity.
|
||||||
|
Reference in New Issue
Block a user