mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-30 06:54:01 +00:00
Merge pull request #68536 from sngchlko/affinity-for-cinder
Add VolumeScheduling support for Cinder
This commit is contained in:
commit
7db813178d
@ -50,6 +50,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/testing:go_default_library",
|
||||
|
@ -276,7 +276,7 @@ type cdManager interface {
|
||||
// Detaches the disk from the kubelet's host machine.
|
||||
DetachDisk(unmounter *cinderVolumeUnmounter) error
|
||||
// Creates a volume
|
||||
CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
|
||||
CreateVolume(provisioner *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
|
||||
// Deletes a volume
|
||||
DeleteVolume(deleter *cinderVolumeDeleter) error
|
||||
}
|
||||
@ -507,7 +507,7 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
|
||||
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c, selectedNode, allowedTopologies)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -550,6 +550,21 @@ func (c *cinderVolumeProvisioner) Provision(selectedNode *v1.Node, allowedTopolo
|
||||
pv.Spec.AccessModes = c.plugin.GetAccessModes()
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.VolumeScheduling) {
|
||||
requirements := make([]v1.NodeSelectorRequirement, 0)
|
||||
for k, v := range labels {
|
||||
if v != "" {
|
||||
requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}})
|
||||
}
|
||||
}
|
||||
if len(requirements) > 0 {
|
||||
pv.Spec.NodeAffinity = new(v1.VolumeNodeAffinity)
|
||||
pv.Spec.NodeAffinity.Required = new(v1.NodeSelector)
|
||||
pv.Spec.NodeAffinity.Required.NodeSelectorTerms = make([]v1.NodeSelectorTerm, 1)
|
||||
pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions = requirements
|
||||
}
|
||||
}
|
||||
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utiltesting "k8s.io/client-go/util/testing"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetest "k8s.io/kubernetes/pkg/volume/testing"
|
||||
@ -116,8 +117,10 @@ func (fake *fakePDManager) DetachDisk(c *cinderVolumeUnmounter) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
|
||||
return "test-volume-name", 1, nil, "", nil
|
||||
func (fake *fakePDManager) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
|
||||
labels = make(map[string]string)
|
||||
labels[kubeletapis.LabelZoneFailureDomain] = "nova"
|
||||
return "test-volume-name", 1, labels, "", nil
|
||||
}
|
||||
|
||||
func (fake *fakePDManager) DeleteVolume(cd *cinderVolumeDeleter) error {
|
||||
@ -210,6 +213,39 @@ func TestPlugin(t *testing.T) {
|
||||
t.Errorf("Provision() returned unexpected volume size: %v", size)
|
||||
}
|
||||
|
||||
// check nodeaffinity members
|
||||
if persistentSpec.Spec.NodeAffinity == nil {
|
||||
t.Errorf("Provision() returned unexpected nil NodeAffinity")
|
||||
}
|
||||
|
||||
if persistentSpec.Spec.NodeAffinity.Required == nil {
|
||||
t.Errorf("Provision() returned unexpected nil NodeAffinity.Required")
|
||||
}
|
||||
|
||||
n := len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms)
|
||||
if n != 1 {
|
||||
t.Errorf("Provision() returned unexpected number of NodeSelectorTerms %d. Expected %d", n, 1)
|
||||
}
|
||||
|
||||
n = len(persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions)
|
||||
if n != 1 {
|
||||
t.Errorf("Provision() returned unexpected number of MatchExpressions %d. Expected %d", n, 1)
|
||||
}
|
||||
|
||||
req := persistentSpec.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions[0]
|
||||
|
||||
if req.Key != kubeletapis.LabelZoneFailureDomain {
|
||||
t.Errorf("Provision() returned unexpected requirement key in NodeAffinity %v", req.Key)
|
||||
}
|
||||
|
||||
if req.Operator != v1.NodeSelectorOpIn {
|
||||
t.Errorf("Provision() returned unexpected requirement operator in NodeAffinity %v", req.Operator)
|
||||
}
|
||||
|
||||
if len(req.Values) != 1 || req.Values[0] != "nova" {
|
||||
t.Errorf("Provision() returned unexpected requirement value in NodeAffinity %v", req.Values)
|
||||
}
|
||||
|
||||
// Test Deleter
|
||||
volSpec := &volume.Spec{
|
||||
PersistentVolume: persistentSpec,
|
||||
|
@ -162,7 +162,7 @@ func getZonesFromNodes(kubeClient clientset.Interface) (sets.String, error) {
|
||||
}
|
||||
|
||||
// CreateVolume uses the cloud provider entrypoint for creating a volume
|
||||
func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) {
|
||||
func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner, node *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (volumeID string, volumeSizeGB int, volumeLabels map[string]string, fstype string, err error) {
|
||||
cloud, err := c.plugin.getCloudProvider()
|
||||
if err != nil {
|
||||
return "", 0, nil, "", err
|
||||
@ -207,7 +207,11 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string,
|
||||
// if we did not get any zones, lets leave it blank and gophercloud will
|
||||
// use zone "nova" as default
|
||||
if len(zones) > 0 {
|
||||
availability = volutil.ChooseZoneForVolume(zones, c.options.PVC.Name)
|
||||
availability, err = volutil.SelectZoneForVolume(false, false, "", nil, zones, node, allowedTopologies, c.options.PVC.Name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("error selecting zone for volume: %v", err)
|
||||
return "", 0, nil, "", err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -221,8 +225,12 @@ func (util *DiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID string,
|
||||
// these are needed that pod is spawning to same AZ
|
||||
volumeLabels = make(map[string]string)
|
||||
if IgnoreVolumeAZ == false {
|
||||
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
|
||||
volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion
|
||||
if volumeAZ != "" {
|
||||
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
|
||||
}
|
||||
if volumeRegion != "" {
|
||||
volumeLabels[kubeletapis.LabelZoneRegion] = volumeRegion
|
||||
}
|
||||
}
|
||||
return volumeID, volSizeGiB, volumeLabels, fstype, nil
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user