mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-31 08:36:16 +00:00
addressed comments
This commit is contained in:
@@ -916,7 +916,7 @@ func TestNodeReclaimFuncs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDiskPressureNodeFsInodes(t *testing.T) {
|
||||
// TODO: we need to know inodes used when cadvisor supports per container stats
|
||||
// TODO(dashpole): we need to know inodes used when cadvisor supports per container stats
|
||||
podMaker := func(name string, requests api.ResourceList, limits api.ResourceList) (*api.Pod, statsapi.PodStats) {
|
||||
pod := newPod(name, []api.Container{
|
||||
newContainer(name, requests, limits),
|
||||
@@ -943,7 +943,7 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
||||
}
|
||||
return result
|
||||
}
|
||||
// TODO: pass inodes used in future when supported by cadvisor.
|
||||
// TODO(dashpole): pass inodes used in future when supported by cadvisor.
|
||||
podsToMake := []struct {
|
||||
name string
|
||||
requests api.ResourceList
|
||||
@@ -1013,9 +1013,9 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
||||
// synchronize
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||
|
||||
// we should not have disk pressure
|
||||
// we should not have inode pressure
|
||||
if manager.IsUnderInodePressure() {
|
||||
t.Errorf("Manager should not report disk pressure")
|
||||
t.Errorf("Manager should not report inode pressure")
|
||||
}
|
||||
|
||||
// try to admit our pod (should succeed)
|
||||
@@ -1028,9 +1028,9 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
||||
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||
|
||||
// we should have disk pressure
|
||||
// we should have inode pressure
|
||||
if !manager.IsUnderInodePressure() {
|
||||
t.Errorf("Manager should report disk pressure since soft threshold was met")
|
||||
t.Errorf("Manager should report inode pressure since soft threshold was met")
|
||||
}
|
||||
|
||||
// verify no pod was yet killed because there has not yet been enough time passed.
|
||||
@@ -1043,9 +1043,9 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
||||
summaryProvider.result = summaryStatsMaker("1.5Mi", "4Mi", podStats)
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||
|
||||
// we should have disk pressure
|
||||
// we should have inode pressure
|
||||
if !manager.IsUnderInodePressure() {
|
||||
t.Errorf("Manager should report disk pressure since soft threshold was met")
|
||||
t.Errorf("Manager should report inode pressure since soft threshold was met")
|
||||
}
|
||||
|
||||
// verify the right pod was killed with the right grace period.
|
||||
@@ -1063,24 +1063,24 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
||||
podKiller.pod = nil
|
||||
podKiller.gracePeriodOverride = nil
|
||||
|
||||
// remove disk pressure
|
||||
// remove inode pressure
|
||||
fakeClock.Step(20 * time.Minute)
|
||||
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||
|
||||
// we should not have disk pressure
|
||||
// we should not have inode pressure
|
||||
if manager.IsUnderInodePressure() {
|
||||
t.Errorf("Manager should not report disk pressure")
|
||||
t.Errorf("Manager should not report inode pressure")
|
||||
}
|
||||
|
||||
// induce disk pressure!
|
||||
// induce inode pressure!
|
||||
fakeClock.Step(1 * time.Minute)
|
||||
summaryProvider.result = summaryStatsMaker("0.5Mi", "4Mi", podStats)
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||
|
||||
// we should have disk pressure
|
||||
// we should have inode pressure
|
||||
if !manager.IsUnderInodePressure() {
|
||||
t.Errorf("Manager should report disk pressure")
|
||||
t.Errorf("Manager should report inode pressure")
|
||||
}
|
||||
|
||||
// check the right pod was killed
|
||||
@@ -1097,15 +1097,15 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
||||
t.Errorf("Admit pod: %v, expected: %v, actual: %v", podToAdmit, false, result.Admit)
|
||||
}
|
||||
|
||||
// reduce disk pressure
|
||||
// reduce inode pressure
|
||||
fakeClock.Step(1 * time.Minute)
|
||||
summaryProvider.result = summaryStatsMaker("3Mi", "4Mi", podStats)
|
||||
podKiller.pod = nil // reset state
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||
|
||||
// we should have disk pressure (because transition period not yet met)
|
||||
// we should have inode pressure (because transition period not yet met)
|
||||
if !manager.IsUnderInodePressure() {
|
||||
t.Errorf("Manager should report disk pressure")
|
||||
t.Errorf("Manager should report inode pressure")
|
||||
}
|
||||
|
||||
// no pod should have been killed
|
||||
@@ -1124,9 +1124,9 @@ func TestDiskPressureNodeFsInodes(t *testing.T) {
|
||||
podKiller.pod = nil // reset state
|
||||
manager.synchronize(diskInfoProvider, activePodsFunc)
|
||||
|
||||
// we should not have disk pressure (because transition period met)
|
||||
// we should not have inode pressure (because transition period met)
|
||||
if manager.IsUnderInodePressure() {
|
||||
t.Errorf("Manager should not report disk pressure")
|
||||
t.Errorf("Manager should not report inode pressure")
|
||||
}
|
||||
|
||||
// no pod should have been killed
|
||||
|
@@ -1169,8 +1169,8 @@ func CheckNodeDiskPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *sc
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
|
||||
// reporting disk pressure condition.
|
||||
// CheckNodeInodePressurePredicate checks if a pod can be scheduled on a node
|
||||
// reporting inode pressure condition.
|
||||
func CheckNodeInodePressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
|
@@ -3047,8 +3047,8 @@ func TestPodSchedulesOnNodeWithInodePressureCondition(t *testing.T) {
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: "Ready",
|
||||
Status: "True",
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -3059,8 +3059,8 @@ func TestPodSchedulesOnNodeWithInodePressureCondition(t *testing.T) {
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: "InodePressure",
|
||||
Status: "True",
|
||||
Type: api.NodeInodePressure,
|
||||
Status: api.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -3076,13 +3076,13 @@ func TestPodSchedulesOnNodeWithInodePressureCondition(t *testing.T) {
|
||||
pod: pod,
|
||||
nodeInfo: makeEmptyNodeInfo(noPressureNode),
|
||||
fits: true,
|
||||
name: "pod schedulable on node without pressure condition on",
|
||||
name: "pod schedulable on node without inode pressure condition on",
|
||||
},
|
||||
{
|
||||
pod: pod,
|
||||
nodeInfo: makeEmptyNodeInfo(pressureNode),
|
||||
fits: false,
|
||||
name: "pod not schedulable on node with pressure condition on",
|
||||
name: "pod not schedulable on node with inode pressure condition on",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeUnderInodePressure}
|
||||
|
@@ -158,7 +158,7 @@ func defaultPredicates() sets.String {
|
||||
// Fit is determined by node disk pressure condition.
|
||||
factory.RegisterFitPredicate("CheckNodeDiskPressure", predicates.CheckNodeDiskPressurePredicate),
|
||||
|
||||
// Fit is determined by node disk pressure condition.
|
||||
// Fit is determined by node inode pressure condition.
|
||||
factory.RegisterFitPredicate("CheckNodeInodePressure", predicates.CheckNodeInodePressurePredicate),
|
||||
|
||||
// Fit is determined by inter-pod affinity.
|
||||
|
Reference in New Issue
Block a user