From d2dfb4906f0c9d09f0bc2727523dc9c554ce1f5e Mon Sep 17 00:00:00 2001 From: Xiang Li Date: Wed, 9 Dec 2015 11:19:57 -0800 Subject: [PATCH 1/2] scheduler: clean up NoDiskConflict code in predicates.go --- .../algorithm/predicates/predicates.go | 60 +++++++------------ 1 file changed, 22 insertions(+), 38 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index f93ea54a52f..a885637d23b 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -73,46 +73,31 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*api.Node, error) { } func isVolumeConflict(volume api.Volume, pod *api.Pod) bool { - if volume.GCEPersistentDisk != nil { - disk := volume.GCEPersistentDisk + for _, existingVolume := range pod.Spec.Volumes { + // Same GCE Disk can be mounted as read-only by multiple pod simultaneously. + // Or they conflicts + if volume.GCEPersistentDisk != nil && existingVolume.GCEPersistentDisk != nil { + disk, existingDisk := volume.GCEPersistentDisk, existingVolume.GCEPersistentDisk + if disk.PDName == existingDisk.PDName && !(disk.ReadOnly && existingDisk.ReadOnly) { + return true + } + } - existingPod := &(pod.Spec) - for ix := range existingPod.Volumes { - if existingPod.Volumes[ix].GCEPersistentDisk != nil && - existingPod.Volumes[ix].GCEPersistentDisk.PDName == disk.PDName && - !(existingPod.Volumes[ix].GCEPersistentDisk.ReadOnly && disk.ReadOnly) { + if volume.AWSElasticBlockStore != nil && existingVolume.AWSElasticBlockStore != nil { + if volume.AWSElasticBlockStore.VolumeID == existingVolume.AWSElasticBlockStore.VolumeID { + return true + } + } + + if volume.RBD != nil && existingVolume.RBD != nil { + mon, pool, image := volume.RBD.CephMonitors, volume.RBD.RBDPool, volume.RBD.RBDImage + emon, epool, eimage := existingVolume.RBD.CephMonitors, existingVolume.RBD.RBDPool, existingVolume.RBD.RBDImage + if haveSame(mon, emon) && pool == epool && image == eimage { return true } } } - if volume.AWSElasticBlockStore != nil { - volumeID := volume.AWSElasticBlockStore.VolumeID - existingPod := &(pod.Spec) - for ix := range existingPod.Volumes { - if existingPod.Volumes[ix].AWSElasticBlockStore != nil && - existingPod.Volumes[ix].AWSElasticBlockStore.VolumeID == volumeID { - return true - } - } - } - if volume.RBD != nil { - mon := volume.RBD.CephMonitors - pool := volume.RBD.RBDPool - image := volume.RBD.RBDImage - - existingPod := &(pod.Spec) - for ix := range existingPod.Volumes { - if existingPod.Volumes[ix].RBD != nil { - mon_m := existingPod.Volumes[ix].RBD.CephMonitors - pool_m := existingPod.Volumes[ix].RBD.RBDPool - image_m := existingPod.Volumes[ix].RBD.RBDImage - if haveSame(mon, mon_m) && pool_m == pool && image_m == image { - return true - } - } - } - } return false } @@ -125,10 +110,9 @@ func isVolumeConflict(volume api.Volume, pod *api.Pod) bool { // - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image. // TODO: migrate this into some per-volume specific code? func NoDiskConflict(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { - podSpec := &(pod.Spec) - for ix := range podSpec.Volumes { - for podIx := range existingPods { - if isVolumeConflict(podSpec.Volumes[ix], existingPods[podIx]) { + for _, v := range pod.Spec.Volumes { + for _, ev := range existingPods { + if isVolumeConflict(v, ev) { return false, nil } } From a0e6d68026eb39bdf0b8e1c6917136cbe3238f7b Mon Sep 17 00:00:00 2001 From: Xiang Li Date: Wed, 9 Dec 2015 11:45:56 -0800 Subject: [PATCH 2/2] scheduler: fast check when there is no conflicts --- plugin/pkg/scheduler/algorithm/predicates/predicates.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index a885637d23b..4755a9d48ce 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -73,9 +73,13 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*api.Node, error) { } func isVolumeConflict(volume api.Volume, pod *api.Pod) bool { + // fast path if there is no conflict checking targets. + if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil { + return false + } + for _, existingVolume := range pod.Spec.Volumes { - // Same GCE Disk can be mounted as read-only by multiple pod simultaneously. - // Or they conflicts + // Same GCE disk mounted by multiple pods conflicts unless all pods mount it read-only. if volume.GCEPersistentDisk != nil && existingVolume.GCEPersistentDisk != nil { disk, existingDisk := volume.GCEPersistentDisk, existingVolume.GCEPersistentDisk if disk.PDName == existingDisk.PDName && !(disk.ReadOnly && existingDisk.ReadOnly) {