From a9aea022ce1d7dbbda4e6097c21f91536b8ed490 Mon Sep 17 00:00:00 2001 From: Dave Chen Date: Mon, 17 Jun 2019 17:45:37 +0800 Subject: [PATCH] Update the comments on how to check disk conflict Signed-off-by: Dave Chen --- pkg/scheduler/algorithm/predicates/predicates.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/scheduler/algorithm/predicates/predicates.go b/pkg/scheduler/algorithm/predicates/predicates.go index be61a41902c..bd33b81e95a 100644 --- a/pkg/scheduler/algorithm/predicates/predicates.go +++ b/pkg/scheduler/algorithm/predicates/predicates.go @@ -279,11 +279,11 @@ func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool { // NoDiskConflict evaluates if a pod can fit due to the volumes it requests, and those that // are already mounted. If there is already a volume mounted on that node, another pod that uses the same volume // can't be scheduled there. -// This is GCE, Amazon EBS, and Ceph RBD specific for now: +// This is GCE, Amazon EBS, ISCSI and Ceph RBD specific for now: // - GCE PD allows multiple mounts as long as they're all read-only // - AWS EBS forbids any two pods mounting the same volume ID -// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image. -// - ISCSI forbids if any two pods share at least same IQN, LUN and Target +// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image, and the image is read-only +// - ISCSI forbids if any two pods share at least same IQN and ISCSI volume is read-only // TODO: migrate this into some per-volume specific code? func NoDiskConflict(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) { for _, v := range pod.Spec.Volumes {