From 8f7fcd5adc9c8fd8dfd6c932b76076c870af5487 Mon Sep 17 00:00:00 2001 From: Seungcheol Ko Date: Tue, 11 Sep 2018 11:54:52 +0000 Subject: [PATCH 1/2] Add VolumeScheduling support for Cinder --- pkg/volume/cinder/cinder.go | 19 +++++++++++++-- pkg/volume/cinder/cinder_test.go | 40 ++++++++++++++++++++++++++++++-- pkg/volume/cinder/cinder_util.go | 16 +++++++++---- 3 files changed, 67 insertions(+), 8 deletions(-) diff --git a/pkg/volume/cinder/cinder.go b/pkg/volume/cinder/cinder.go index 7086cc06f80..f1274a7b86b 100644 --- a/pkg/volume/cinder/cinder.go +++ b/pkg/volume/cinder/cinder.go @@ -276,7 +276,7 @@ type cdManager interface { // Detaches the disk from the kubelet's host machine. DetachDisk(unmounter *cinderVolumeUnmounter) error // Creates a volume - CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) + CreateVolume(provisioner *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) // Deletes a volume DeleteVolume(deleter *cinderVolumeDeleter) error } @@ -507,7 +507,7 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes()) } - volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c) + volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c, selectedNode, allowedTopologies) if err != nil { return nil, err } @@ -550,6 +550,21 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo pv.Spec.AccessModes = c.plugin.GetAccessModes() } + if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) { + requirements := make([]v1.NodeSelectorRequirement, 0) + for k, v := range labels { + if v != "" { + requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}}) + } + } + if len(requirements) > 0 { + pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity) + pv.Spec.NodeAffinity.Required = new(v1.NodeSelector) + pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1) + pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = requirements + } + } + return pv, nil } diff --git a/pkg/volume/cinder/cinder_test.go b/pkg/volume/cinder/cinder_test.go index d16b3d09c9f..409cedcf23c 100644 --- a/pkg/volume/cinder/cinder_test.go +++ b/pkg/volume/cinder/cinder_test.go @@ -26,6 +26,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" utiltesting "k8s.io/client-go/util/testing" + kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -116,8 +117,10 @@ func (fake *fakePDManager) DetachDisk(c *cinderVolumeUnmounter) error { return nil } -func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) { - return "test-volume-name", 1, nil, "", nil +func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) { + labels = make(map[string]string) + labels[kubeletapis.LabelZoneFailureDomain] = "nova" + return "test-volume-name", 1, labels, "", nil } func (fake *fakePDManager) DeleteVolume(cd *cinderVolumeDeleter) error { @@ -210,6 +213,39 @@ func TestPlugin(t *testing.T) { t.Errorf("Provision() returned unexpected volume size: %v", size) } + // check nodeaffinity members + if persistentSpec.Spec.NodeAffinity == nil { + t.Errorf("Provision() returned unexpected nil NodeAffinity") + } + + if persistentSpec.Spec.NodeAffinity.Required == nil { + t.Errorf("Provision() returned unexpected nil NodeAffinity.Required") + } + + n := len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms) + if n != 1 { + t.Errorf("Provision() returned unexpected number of NodeSelectorTerms %d. Expected %d", n, 1) + } + + n = len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions) + if n != 1 { + t.Errorf("Provision() returned unexpected number of MatchExpressions %d. Expected %d", n, 1) + } + + req := persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0] + + if req.Key != kubeletapis.LabelZoneFailureDomain { + t.Errorf("Provision() returned unexpected requirement key in NodeAffinity %v", req.Key) + } + + if req.Operator != v1.NodeSelectorOpIn { + t.Errorf("Provision() returned unexpected requirement operator in NodeAffinity %v", req.Operator) + } + + if len(req.Values) != 1 || req.Values[0] != "nova" { + t.Errorf("Provision() returned unexpected requirement value in NodeAffinity %v", req.Values) + } + // Test Deleter volSpec := &volume.Spec{ PersistentVolume: persistentSpec, diff --git a/pkg/volume/cinder/cinder_util.go b/pkg/volume/cinder/cinder_util.go index ab115f83add..accc40a2a6e 100644 --- a/pkg/volume/cinder/cinder_util.go +++ b/pkg/volume/cinder/cinder_util.go @@ -162,7 +162,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) { } // CreateVolume uses the cloud provider entrypoint for creating a volume -func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) { +func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) { cloud, err := c.plugin.getCloudProvider() if err != nil { return "", 0, nil, "", err @@ -207,7 +207,11 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, // if we did not get any zones, lets leave it blank and gophercloud will // use zone "nova" as default if len(zones) > 0 { - availability = volutil.ChooseZoneForVolume(zones, c.options.PVC.Name) + availability, err = volutil.SelectZoneForVolume(false, false, "", nil, zones, node, allowedTopologies, c.options.PVC.Name) + if err != nil { + glog.V(2).Infof("error selecting zone for volume: %v", err) + return "", 0, nil, "", err + } } } @@ -221,8 +225,12 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, // these are needed that pod is spawning to same AZ volumeLabels = make(map[string]string) if IgnoreVolumeAZ == false { - volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ - volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion + if volumeAZ != "" { + volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ + } + if volumeRegion != "" { + volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion + } } return volumeID, volSizeGiB, volumeLabels, fstype, nil } From 4b3e50b61cf1c27bd9d83ef11d6e52fe3b4ae47e Mon Sep 17 00:00:00 2001 From: Seungcheol Ko Date: Wed, 12 Sep 2018 07:27:33 +0000 Subject: [PATCH 2/2] fix verify-bazel error --- pkg/volume/cinder/BUILD | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/volume/cinder/BUILD b/pkg/volume/cinder/BUILD index c37c3f20881..d332afb01d0 100644 --- a/pkg/volume/cinder/BUILD +++ b/pkg/volume/cinder/BUILD @@ -50,6 +50,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/cloudprovider:go_default_library", + "//pkg/kubelet/apis:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library",