Merge pull request #53523 from zetaab/ignore_volume_label

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

add possibility to ignore volume label in dynamic provisioning

**What this PR does / why we need it**: this is needed if openstack cinder zone name does not match to compute zone names. For instance if there is only one cinder zone and many compute zones.

**Which issue this PR fixes**: fixes #53488

**Special notes for your reviewer**: 

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2017-10-09 09:17:04 -07:00 committed by GitHub
commit 92210a7d76
6 changed files with 19 additions and 12 deletions

View File

@ -93,6 +93,7 @@ type LoadBalancerOpts struct {
type BlockStorageOpts struct {
BSVersion string `gcfg:"bs-version"` // overrides autodetection. v1 or v2. Defaults to auto
TrustDevicePath bool `gcfg:"trust-device-path"` // See Issue #33128
IgnoreVolumeAZ bool `gcfg:"ignore-volume-az"`
}
type RouterOpts struct {
@ -187,6 +188,7 @@ func readConfig(config io.Reader) (Config, error) {
// Set default values for config params
cfg.BlockStorage.BSVersion = "auto"
cfg.BlockStorage.TrustDevicePath = false
cfg.BlockStorage.IgnoreVolumeAZ = false
cfg.Metadata.SearchOrder = fmt.Sprintf("%s,%s", configDriveID, metadataID)
err := gcfg.ReadInto(&cfg, config)

View File

@ -100,6 +100,7 @@ func TestReadConfig(t *testing.T) {
[BlockStorage]
bs-version = auto
trust-device-path = yes
ignore-volume-az = yes
[Metadata]
search-order = configDrive, metadataService
`))
@ -128,6 +129,9 @@ func TestReadConfig(t *testing.T) {
if cfg.BlockStorage.BSVersion != "auto" {
t.Errorf("incorrect bs.bs-version: %v", cfg.BlockStorage.BSVersion)
}
if cfg.BlockStorage.IgnoreVolumeAZ != true {
t.Errorf("incorrect bs.IgnoreVolumeAZ: %v", cfg.BlockStorage.IgnoreVolumeAZ)
}
if cfg.Metadata.SearchOrder != "configDrive, metadataService" {
t.Errorf("incorrect md.search-order: %v", cfg.Metadata.SearchOrder)
}
@ -531,7 +535,7 @@ func TestVolumes(t *testing.T) {
tags := map[string]string{
"test": "value",
}
vol, _, err := os.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1, "", "", &tags)
vol, _, _, err := os.CreateVolume("kubernetes-test-volume-"+rand.String(10), 1, "", "", &tags)
if err != nil {
t.Fatalf("Cannot create a new Cinder volume: %v", err)
}

View File

@ -299,11 +299,11 @@ func (os *OpenStack) getVolume(volumeID string) (Volume, error) {
}
// CreateVolume creates a volume of given size (in GiB)
func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error) {
func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) {
volumes, err := os.volumeService("")
if err != nil || volumes == nil {
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
return "", "", err
return "", "", os.bsOpts.IgnoreVolumeAZ, err
}
opts := VolumeCreateOpts{
@ -320,11 +320,11 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str
if err != nil {
glog.Errorf("Failed to create a %d GB volume: %v", size, err)
return "", "", err
return "", "", os.bsOpts.IgnoreVolumeAZ, err
}
glog.Infof("Created volume %v in Availability Zone: %v", volumeID, volumeAZ)
return volumeID, volumeAZ, nil
glog.Infof("Created volume %v in Availability Zone: %v Ignore volume AZ: %v", volumeID, volumeAZ, os.bsOpts.IgnoreVolumeAZ)
return volumeID, volumeAZ, os.bsOpts.IgnoreVolumeAZ, nil
}
// GetDevicePath returns the path of an attached block storage volume, specified by its id.

View File

@ -571,8 +571,8 @@ func (testcase *testcase) ShouldTrustDevicePath() bool {
return true
}
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error) {
return "", "", errors.New("Not implemented")
func (testcase *testcase) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error) {
return "", "", false, errors.New("Not implemented")
}
func (testcase *testcase) GetDevicePath(volumeID string) string {

View File

@ -46,7 +46,7 @@ type CinderProvider interface {
AttachDisk(instanceID, volumeID string) (string, error)
DetachDisk(instanceID, volumeID string) error
DeleteVolume(volumeID string) error
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, error)
CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (string, string, bool, error)
GetDevicePath(volumeID string) string
InstanceID() (string, error)
GetAttachmentDiskPath(instanceID, volumeID string) (string, error)

View File

@ -204,7 +204,7 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s
}
}
volumeID, volumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
volumeID, volumeAZ, IgnoreVolumeAZ, errr := cloud.CreateVolume(name, volSizeGB, vtype, availability, c.options.CloudTags)
if errr != nil {
glog.V(2).Infof("Error creating cinder volume: %v", errr)
return "", 0, nil, "", errr
@ -213,8 +213,9 @@ func (util *CinderDiskUtil) CreateVolume(c *cinderVolumeProvisioner) (volumeID s
// these are needed that pod is spawning to same AZ
volumeLabels = make(map[string]string)
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
if IgnoreVolumeAZ == false {
volumeLabels[kubeletapis.LabelZoneFailureDomain] = volumeAZ
}
return volumeID, volSizeGB, volumeLabels, fstype, nil
}