Merge pull request #18452 from xiang90/refactor_nodiskconflicts

scheduler: clean up NoDiskConflict code in predicates.go
This commit is contained in:
Alex Mohr 2016-01-21 10:58:41 -08:00
commit fd9310fa6e

View File

@ -82,46 +82,35 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*api.Node, error) {
} }
func isVolumeConflict(volume api.Volume, pod *api.Pod) bool { func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
if volume.GCEPersistentDisk != nil { // fast path if there is no conflict checking targets.
disk := volume.GCEPersistentDisk if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil {
return false
}
existingPod := &(pod.Spec) for _, existingVolume := range pod.Spec.Volumes {
for ix := range existingPod.Volumes { // Same GCE disk mounted by multiple pods conflicts unless all pods mount it read-only.
if existingPod.Volumes[ix].GCEPersistentDisk != nil && if volume.GCEPersistentDisk != nil && existingVolume.GCEPersistentDisk != nil {
existingPod.Volumes[ix].GCEPersistentDisk.PDName == disk.PDName && disk, existingDisk := volume.GCEPersistentDisk, existingVolume.GCEPersistentDisk
!(existingPod.Volumes[ix].GCEPersistentDisk.ReadOnly && disk.ReadOnly) { if disk.PDName == existingDisk.PDName && !(disk.ReadOnly && existingDisk.ReadOnly) {
return true
}
}
if volume.AWSElasticBlockStore != nil && existingVolume.AWSElasticBlockStore != nil {
if volume.AWSElasticBlockStore.VolumeID == existingVolume.AWSElasticBlockStore.VolumeID {
return true
}
}
if volume.RBD != nil && existingVolume.RBD != nil {
mon, pool, image := volume.RBD.CephMonitors, volume.RBD.RBDPool, volume.RBD.RBDImage
emon, epool, eimage := existingVolume.RBD.CephMonitors, existingVolume.RBD.RBDPool, existingVolume.RBD.RBDImage
if haveSame(mon, emon) && pool == epool && image == eimage {
return true return true
} }
} }
} }
if volume.AWSElasticBlockStore != nil {
volumeID := volume.AWSElasticBlockStore.VolumeID
existingPod := &(pod.Spec)
for ix := range existingPod.Volumes {
if existingPod.Volumes[ix].AWSElasticBlockStore != nil &&
existingPod.Volumes[ix].AWSElasticBlockStore.VolumeID == volumeID {
return true
}
}
}
if volume.RBD != nil {
mon := volume.RBD.CephMonitors
pool := volume.RBD.RBDPool
image := volume.RBD.RBDImage
existingPod := &(pod.Spec)
for ix := range existingPod.Volumes {
if existingPod.Volumes[ix].RBD != nil {
mon_m := existingPod.Volumes[ix].RBD.CephMonitors
pool_m := existingPod.Volumes[ix].RBD.RBDPool
image_m := existingPod.Volumes[ix].RBD.RBDImage
if haveSame(mon, mon_m) && pool_m == pool && image_m == image {
return true
}
}
}
}
return false return false
} }
@ -134,10 +123,9 @@ func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image. // - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
// TODO: migrate this into some per-volume specific code? // TODO: migrate this into some per-volume specific code?
func NoDiskConflict(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) { func NoDiskConflict(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
podSpec := &(pod.Spec) for _, v := range pod.Spec.Volumes {
for ix := range podSpec.Volumes { for _, ev := range existingPods {
for podIx := range existingPods { if isVolumeConflict(v, ev) {
if isVolumeConflict(podSpec.Volumes[ix], existingPods[podIx]) {
return false, nil return false, nil
} }
} }