mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 19:56:01 +00:00
Scheduler does not place pods on nodes that have disk pressure
This commit is contained in:
parent
c3324b88a0
commit
9604b47c13
@ -49,6 +49,7 @@ The purpose of filtering the nodes is to filter out the nodes that do not meet c
|
||||
- `MaxEBSVolumeCount`: Ensure that the number of attached ElasticBlockStore volumes does not exceed a maximum value (by default, 39, since Amazon recommends a maximum of 40 with one of those 40 reserved for the root volume -- see [Amazon's documentation](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/volume_limits.html#linux-specific-volume-limits)). The maximum value can be controlled by setting the `KUBE_MAX_PD_VOLS` environment variable.
|
||||
- `MaxGCEPDVolumeCount`: Ensure that the number of attached GCE PersistentDisk volumes does not exceed a maximum value (by default, 16, which is the maximum GCE allows -- see [GCE's documentation](https://cloud.google.com/compute/docs/disks/persistent-disks#limits_for_predefined_machine_types)). The maximum value can be controlled by setting the `KUBE_MAX_PD_VOLS` environment variable.
|
||||
- `CheckNodeMemoryPressure`: Check if a pod can be scheduled on a node reporting memory pressure condition. Currently, no ``BestEffort`` should be placed on a node under memory pressure as it gets automatically evicted by kubelet.
|
||||
- `CheckNodeDiskPressure`: Check if a pod can be scheduled on a node reporting disk pressure condition. Currently, no pods should be placed on a node under disk pressure as it gets automatically evicted by kubelet.
|
||||
|
||||
The details of the above predicates can be found in [plugin/pkg/scheduler/algorithm/predicates/predicates.go](http://releases.k8s.io/HEAD/plugin/pkg/scheduler/algorithm/predicates/predicates.go). All predicates mentioned above can be used in combination to perform a sophisticated filtering policy. Kubernetes uses some, but not all, of these predicates by default. You can see which ones are used by default in [plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go](http://releases.k8s.io/HEAD/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go).
|
||||
|
||||
|
@ -39,6 +39,7 @@ var (
|
||||
ErrServiceAffinityViolated = newPredicateFailureError("CheckServiceAffinity")
|
||||
ErrMaxVolumeCountExceeded = newPredicateFailureError("MaxVolumeCount")
|
||||
ErrNodeUnderMemoryPressure = newPredicateFailureError("NodeUnderMemoryPressure")
|
||||
ErrNodeUnderDiskPressure = newPredicateFailureError("NodeUnderDiskPressure")
|
||||
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error
|
||||
// as ErrFakePredicate.
|
||||
ErrFakePredicate = newPredicateFailureError("FakePredicateError")
|
||||
|
@ -1041,3 +1041,21 @@ func CheckNodeMemoryPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
|
||||
// reporting disk pressure condition.
|
||||
func CheckNodeDiskPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
// is node under presure?
|
||||
for _, cond := range node.Status.Conditions {
|
||||
if cond.Type == api.NodeDiskPressure && cond.Status == api.ConditionTrue {
|
||||
return false, ErrNodeUnderDiskPressure
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
@ -2906,3 +2906,72 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) {
|
||||
pod := &api.Pod{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{
|
||||
Name: "container",
|
||||
Image: "image",
|
||||
ImagePullPolicy: "Always",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// specify a node with no disk pressure condition on
|
||||
noPressureNode := &api.Node{
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: "Ready",
|
||||
Status: "True",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// specify a node with pressure condition on
|
||||
pressureNode := &api.Node{
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{
|
||||
Type: "DiskPressure",
|
||||
Status: "True",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
nodeInfo *schedulercache.NodeInfo
|
||||
fits bool
|
||||
name string
|
||||
}{
|
||||
{
|
||||
pod: pod,
|
||||
nodeInfo: makeEmptyNodeInfo(noPressureNode),
|
||||
fits: true,
|
||||
name: "pod schedulable on node without pressure condition on",
|
||||
},
|
||||
{
|
||||
pod: pod,
|
||||
nodeInfo: makeEmptyNodeInfo(pressureNode),
|
||||
fits: false,
|
||||
name: "pod not schedulable on node with pressure condition on",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
fits, err := CheckNodeDiskPressurePredicate(test.pod, test.nodeInfo)
|
||||
if fits != test.fits {
|
||||
t.Errorf("%s: expected %v got %v", test.name, test.fits, fits)
|
||||
}
|
||||
|
||||
if err != nil && err != ErrNodeUnderDiskPressure {
|
||||
t.Errorf("%s: unexpected error: %v", test.name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -151,6 +151,9 @@ func defaultPredicates() sets.String {
|
||||
|
||||
// Fit is determined by node memory pressure condition.
|
||||
factory.RegisterFitPredicate("CheckNodeMemoryPressure", predicates.CheckNodeMemoryPressurePredicate),
|
||||
|
||||
// Fit is determined by node disk pressure condition.
|
||||
factory.RegisterFitPredicate("CheckNodeDiskPressure", predicates.CheckNodeDiskPressurePredicate),
|
||||
)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user