rbd: support NoDiskConflicts scheduler predicates

Signed-off-by: Huamin Chen <hchen@redhat.com>
This commit is contained in:
Huamin Chen 2015-10-20 13:24:23 -04:00
parent 0888c2ef38
commit bd10664851
2 changed files with 85 additions and 1 deletions

View File

@ -76,13 +76,30 @@ func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
}
}
}
if volume.RBD != nil {
mon := volume.RBD.CephMonitors
pool := volume.RBD.RBDPool
image := volume.RBD.RBDImage
manifest := &(pod.Spec)
for ix := range manifest.Volumes {
if manifest.Volumes[ix].RBD != nil {
mon_m := manifest.Volumes[ix].RBD.CephMonitors
pool_m := manifest.Volumes[ix].RBD.RBDPool
image_m := manifest.Volumes[ix].RBD.RBDImage
if haveSame(mon, mon_m) && pool_m == pool && image_m == image {
return true
}
}
}
}
return false
}
// NoDiskConflict evaluates if a pod can fit due to the volumes it requests, and those that
// are already mounted. Some times of volumes are mounted onto node machines. For now, these mounts
// are exclusive so if there is already a volume mounted on that node, another pod can't schedule
// there. This is GCE and Amazon EBS specific for now.
// there. This is GCE, Amazon EBS, and Ceph RBD specific for now.
// TODO: migrate this into some per-volume specific code?
func NoDiskConflict(pod *api.Pod, existingPods []*api.Pod, node string) (bool, error) {
manifest := &(pod.Spec)
@ -412,3 +429,15 @@ func MapPodsToMachines(lister algorithm.PodLister) (map[string][]*api.Pod, error
}
return machineToPods, nil
}
// search two arrays and return true if they have at least one common element; return false otherwise
func haveSame(a1, a2 []string) bool {
for _, val1 := range a1 {
for _, val2 := range a2 {
if val1 == val2 {
return true
}
}
}
return false
}

View File

@ -435,6 +435,61 @@ func TestAWSDiskConflicts(t *testing.T) {
}
}
func TestRBDDiskConflicts(t *testing.T) {
volState := api.PodSpec{
Volumes: []api.Volume{
{
VolumeSource: api.VolumeSource{
RBD: &api.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDPool: "foo",
RBDImage: "bar",
FSType: "ext4",
},
},
},
},
}
volState2 := api.PodSpec{
Volumes: []api.Volume{
{
VolumeSource: api.VolumeSource{
RBD: &api.RBDVolumeSource{
CephMonitors: []string{"c", "d"},
RBDPool: "foo",
RBDImage: "bar",
FSType: "ext4",
},
},
},
},
}
tests := []struct {
pod *api.Pod
existingPods []*api.Pod
isOk bool
test string
}{
{&api.Pod{}, []*api.Pod{}, true, "nothing"},
{&api.Pod{}, []*api.Pod{{Spec: volState}}, true, "one state"},
{&api.Pod{Spec: volState}, []*api.Pod{{Spec: volState}}, false, "same state"},
{&api.Pod{Spec: volState2}, []*api.Pod{{Spec: volState}}, true, "different state"},
}
for _, test := range tests {
ok, err := NoDiskConflict(test.pod, test.existingPods, "machine")
if err != nil {
t.Fatalf("unexpected error: %v", err)
}
if test.isOk && !ok {
t.Errorf("expected ok, got none. %v %v %s", test.pod, test.existingPods, test.test)
}
if !test.isOk && ok {
t.Errorf("expected no ok, got one. %v %v %s", test.pod, test.existingPods, test.test)
}
}
}
func TestPodFitsSelector(t *testing.T) {
tests := []struct {
pod *api.Pod