remove Azure Disk in-tree driver code

fix
This commit is contained in:
andyzhangx 2023-03-06 13:51:12 +00:00
parent 3489796d5c
commit 5d0a54dcb5
32 changed files with 7 additions and 3835 deletions

View File

@ -25,7 +25,6 @@ package main
import (
// NOTE: Importing all in-tree cloud-providers is not required when
// implementing an out-of-tree cloud-provider.
_ "k8s.io/legacy-cloud-providers/azure"
_ "k8s.io/legacy-cloud-providers/gce"
_ "k8s.io/legacy-cloud-providers/vsphere"
)

View File

@ -26,7 +26,6 @@ import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/azure_file"
"k8s.io/kubernetes/pkg/volume/azuredd"
"k8s.io/kubernetes/pkg/volume/csimigration"
"k8s.io/kubernetes/pkg/volume/gcepd"
"k8s.io/kubernetes/pkg/volume/portworx"
@ -63,7 +62,6 @@ type pluginInfo struct {
func appendAttachableLegacyProviderVolumes(allPlugins []volume.VolumePlugin, featureGate featuregate.FeatureGate) ([]volume.VolumePlugin, error) {
pluginMigrationStatus := make(map[string]pluginInfo)
pluginMigrationStatus[plugins.GCEPDInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationGCE, pluginUnregisterFeature: features.InTreePluginGCEUnregister, pluginProbeFunction: gcepd.ProbeVolumePlugins}
pluginMigrationStatus[plugins.AzureDiskInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationAzureDisk, pluginUnregisterFeature: features.InTreePluginAzureDiskUnregister, pluginProbeFunction: azuredd.ProbeVolumePlugins}
pluginMigrationStatus[plugins.VSphereInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationvSphere, pluginUnregisterFeature: features.InTreePluginvSphereUnregister, pluginProbeFunction: vsphere_volume.ProbeVolumePlugins}
pluginMigrationStatus[plugins.PortworxVolumePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationPortworx, pluginUnregisterFeature: features.InTreePluginPortworxUnregister, pluginProbeFunction: portworx.ProbeVolumePlugins}
pluginMigrationStatus[plugins.RBDVolumePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationRBD, pluginUnregisterFeature: features.InTreePluginRBDUnregister, pluginProbeFunction: rbd.ProbeVolumePlugins}

View File

@ -31,7 +31,6 @@ import (
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/azure_file"
"k8s.io/kubernetes/pkg/volume/azuredd"
"k8s.io/kubernetes/pkg/volume/csimigration"
"k8s.io/kubernetes/pkg/volume/gcepd"
"k8s.io/kubernetes/pkg/volume/portworx"
@ -69,7 +68,6 @@ type pluginInfo struct {
func appendLegacyProviderVolumes(allPlugins []volume.VolumePlugin, featureGate featuregate.FeatureGate) ([]volume.VolumePlugin, error) {
pluginMigrationStatus := make(map[string]pluginInfo)
pluginMigrationStatus[plugins.GCEPDInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationGCE, pluginUnregisterFeature: features.InTreePluginGCEUnregister, pluginProbeFunction: gcepd.ProbeVolumePlugins}
pluginMigrationStatus[plugins.AzureDiskInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationAzureDisk, pluginUnregisterFeature: features.InTreePluginAzureDiskUnregister, pluginProbeFunction: azuredd.ProbeVolumePlugins}
pluginMigrationStatus[plugins.AzureFileInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationAzureFile, pluginUnregisterFeature: features.InTreePluginAzureFileUnregister, pluginProbeFunction: azure_file.ProbeVolumePlugins}
pluginMigrationStatus[plugins.VSphereInTreePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationvSphere, pluginUnregisterFeature: features.InTreePluginvSphereUnregister, pluginProbeFunction: vsphere_volume.ProbeVolumePlugins}
pluginMigrationStatus[plugins.PortworxVolumePluginName] = pluginInfo{pluginMigrationFeature: features.CSIMigrationPortworx, pluginUnregisterFeature: features.InTreePluginPortworxUnregister, pluginProbeFunction: portworx.ProbeVolumePlugins}

View File

@ -108,14 +108,6 @@ const (
// Allow the usage of options to fine-tune the cpumanager policies.
CPUManagerPolicyOptions featuregate.Feature = "CPUManagerPolicyOptions"
// owner: @andyzhangx
// alpha: v1.15
// beta: v1.19
// GA: v1.24
//
// Enables the Azure Disk in-tree driver to Azure Disk Driver migration feature.
CSIMigrationAzureDisk featuregate.Feature = "CSIMigrationAzureDisk"
// owner: @andyzhangx
// alpha: v1.15
// beta: v1.21
@ -878,8 +870,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
CPUManagerPolicyOptions: {Default: true, PreRelease: featuregate.Beta},
CSIMigrationAzureDisk: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26
CSIMigrationAzureFile: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
CSIMigrationGCE: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27

View File

@ -22,6 +22,5 @@ package cadvisor
import (
// Register cloud info providers.
// TODO(#68522): Remove this in 1.20+ once the cAdvisor endpoints are removed.
_ "github.com/google/cadvisor/utils/cloudinfo/azure"
_ "github.com/google/cadvisor/utils/cloudinfo/gce"
)

View File

@ -92,7 +92,6 @@ import (
schedulerframework "k8s.io/kubernetes/pkg/scheduler/framework"
"k8s.io/kubernetes/pkg/util/oom"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/azuredd"
"k8s.io/kubernetes/pkg/volume/gcepd"
_ "k8s.io/kubernetes/pkg/volume/hostpath"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
@ -368,7 +367,6 @@ func newTestKubeletWithImageList(
allPlugins = append(allPlugins, plug)
} else {
allPlugins = append(allPlugins, gcepd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, azuredd.ProbeVolumePlugins()...)
}
var prober volume.DynamicPluginProber // TODO (#51147) inject mock

View File

@ -50,9 +50,7 @@ func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string) bool {
return false
}
case csilibplugins.AzureDiskInTreePluginName:
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk) {
return false
}
return true
case csilibplugins.CinderInTreePluginName:
return true
case csilibplugins.RBDVolumePluginName:

View File

@ -1090,7 +1090,7 @@ func isCSIMigrationOnForPlugin(pluginName string) bool {
case csiplugins.GCEPDInTreePluginName:
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationGCE)
case csiplugins.AzureDiskInTreePluginName:
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk)
return true
case csiplugins.CinderInTreePluginName:
return true
case csiplugins.PortworxVolumePluginName:

View File

@ -1,20 +0,0 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- andyzhangx
- feiskyer
- khenidak
reviewers:
- andyzhangx
- aramase
- feiskyer
- jingxu97
- jsafrane
- msau42
- khenidak
- ritazh
- saad-ali
emeritus_approvers:
- karataliu
- rootfs
- brendandburns

View File

@ -1,305 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strconv"
"strings"
"time"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/wait"
cloudprovider "k8s.io/cloud-provider"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/legacy-cloud-providers/azure"
)
type azureDiskDetacher struct {
plugin *azureDataDiskPlugin
cloud *azure.Cloud
}
type azureDiskAttacher struct {
plugin *azureDataDiskPlugin
cloud *azure.Cloud
}
var _ volume.Attacher = &azureDiskAttacher{}
var _ volume.Detacher = &azureDiskDetacher{}
var _ volume.DeviceMounter = &azureDiskAttacher{}
var _ volume.DeviceUnmounter = &azureDiskDetacher{}
// Attach attaches a volume.Spec to an Azure VM referenced by NodeName, returning the disk's LUN
func (a *azureDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
klog.Warningf("failed to get azure disk spec (%v)", err)
return "", err
}
diskController, err := getDiskController(a.plugin.host)
if err != nil {
return "", err
}
lun, err := diskController.GetDiskLun(volumeSource.DiskName, volumeSource.DataDiskURI, nodeName)
if err == cloudprovider.InstanceNotFound {
// Log error and continue with attach
klog.Warningf(
"Error checking if volume is already attached to current node (%q). Will continue and try attach anyway. err=%v",
nodeName, err)
}
if err == nil {
// Volume is already attached to node.
klog.V(2).Infof("Attach operation is successful. volume %q is already attached to node %q at lun %d.", volumeSource.DiskName, nodeName, lun)
} else {
klog.V(2).Infof("GetDiskLun returned: %v. Initiating attaching volume %q to node %q.", err, volumeSource.DataDiskURI, nodeName)
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
lun, err = diskController.AttachDisk(isManagedDisk, volumeSource.DiskName, volumeSource.DataDiskURI, nodeName, compute.CachingTypes(*volumeSource.CachingMode))
if err == nil {
klog.V(2).Infof("Attach operation successful: volume %q attached to node %q.", volumeSource.DataDiskURI, nodeName)
} else {
klog.V(2).Infof("Attach volume %q to instance %q failed with %v", volumeSource.DataDiskURI, nodeName, err)
return "", err
}
}
return strconv.Itoa(int(lun)), err
}
func (a *azureDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
volumeSpecMap := make(map[string]*volume.Spec)
volumeIDList := []string{}
for _, spec := range specs {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
klog.Errorf("azureDisk - Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
volumeIDList = append(volumeIDList, volumeSource.DiskName)
volumesAttachedCheck[spec] = true
volumeSpecMap[volumeSource.DiskName] = spec
}
diskController, err := getDiskController(a.plugin.host)
if err != nil {
return nil, err
}
attachedResult, err := diskController.DisksAreAttached(volumeIDList, nodeName)
if err != nil {
// Log error and continue with attach
klog.Errorf(
"azureDisk - Error checking if volumes (%v) are attached to current node (%q). err=%v",
volumeIDList, nodeName, err)
return volumesAttachedCheck, err
}
for volumeID, attached := range attachedResult {
if !attached {
spec := volumeSpecMap[volumeID]
volumesAttachedCheck[spec] = false
klog.V(2).Infof("azureDisk - VolumesAreAttached: check volume %q (specName: %q) is no longer attached", volumeID, spec.Name())
}
}
return volumesAttachedCheck, nil
}
func (a *azureDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
// devicePath could be a LUN number or
// "/dev/disk/azure/scsi1/lunx", "/dev/sdx" on Linux node
// "/dev/diskx" on Windows node
if strings.HasPrefix(devicePath, "/dev/") {
return devicePath, nil
}
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
nodeName := types.NodeName(a.plugin.host.GetHostName())
diskName := volumeSource.DiskName
lun, err := strconv.Atoi(devicePath)
if err != nil {
return "", fmt.Errorf("parse %s failed with error: %v, diskName: %s, nodeName: %s", devicePath, err, diskName, nodeName)
}
exec := a.plugin.host.GetExec(a.plugin.GetPluginName())
io := &osIOHandler{}
scsiHostRescan(io, exec)
newDevicePath := ""
err = wait.PollImmediate(1*time.Second, timeout, func() (bool, error) {
if newDevicePath, err = findDiskByLun(int(lun), io, exec); err != nil {
return false, fmt.Errorf("azureDisk - WaitForAttach ticker failed node (%s) disk (%s) lun(%v) err(%s)", nodeName, diskName, lun, err)
}
// did we find it?
if newDevicePath != "" {
return true, nil
}
// wait until timeout
return false, nil
})
if err == nil && newDevicePath == "" {
err = fmt.Errorf("azureDisk - WaitForAttach failed within timeout node (%s) diskId:(%s) lun:(%v)", nodeName, diskName, lun)
}
return newDevicePath, err
}
// to avoid name conflicts (similar *.vhd name)
// we use hash diskUri and we use it as device mount target.
// this is generalized for both managed and blob disks
// we also prefix the hash with m/b based on disk kind
func (a *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
if volumeSource.Kind == nil { // this spec was constructed from info on the node
pdPath := filepath.Join(a.plugin.host.GetPluginDir(azureDataDiskPluginName), util.MountsInGlobalPDPath, volumeSource.DataDiskURI)
return pdPath, nil
}
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
return makeGlobalPDPath(a.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
}
func (a *azureDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string, _ volume.DeviceMounterArgs) error {
mounter := a.plugin.host.GetMounter(azureDataDiskPluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
dir := deviceMountPath
if runtime.GOOS == "windows" {
// in windows, as we use mklink, only need to MkdirAll for parent directory
dir = filepath.Dir(deviceMountPath)
}
if err := os.MkdirAll(dir, 0750); err != nil {
return fmt.Errorf("azureDisk - mountDevice:CreateDirectory failed with %s", err)
}
notMnt = true
} else {
return fmt.Errorf("azureDisk - mountDevice:IsLikelyNotMountPoint failed with %s", err)
}
}
if !notMnt {
// testing original mount point, make sure the mount link is valid
if _, err := (&osIOHandler{}).ReadDir(deviceMountPath); err != nil {
// mount link is invalid, now unmount and remount later
klog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", deviceMountPath, err)
if err := mounter.Unmount(deviceMountPath); err != nil {
klog.Errorf("azureDisk - Unmount deviceMountPath %s failed with %v", deviceMountPath, err)
return err
}
notMnt = true
}
}
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if notMnt {
diskMounter := util.NewSafeFormatAndMountFromHost(azureDataDiskPluginName, a.plugin.host)
mountOptions := util.MountOptionFromSpec(spec, options...)
if runtime.GOOS == "windows" {
// only parse devicePath on Windows node
diskNum, err := getDiskNum(devicePath)
if err != nil {
return err
}
devicePath = diskNum
}
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, *volumeSource.FSType, mountOptions)
if err != nil {
if cleanErr := os.Remove(deviceMountPath); cleanErr != nil {
return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s and clean up failed with :%v", err, cleanErr)
}
return fmt.Errorf("azureDisk - mountDevice:FormatAndMount failed with %s", err)
}
}
return nil
}
// Detach detaches disk from Azure VM.
func (d *azureDiskDetacher) Detach(diskURI string, nodeName types.NodeName) error {
if diskURI == "" {
return fmt.Errorf("invalid disk to detach: %q", diskURI)
}
diskController, err := getDiskController(d.plugin.host)
if err != nil {
return err
}
err = diskController.DetachDisk("", diskURI, nodeName)
if err != nil {
klog.Errorf("failed to detach azure disk %q, err %v", diskURI, err)
}
klog.V(2).Infof("azureDisk - disk:%s was detached from node:%v", diskURI, nodeName)
return err
}
// UnmountDevice unmounts the volume on the node
func (d *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {
if runtime.GOOS == "windows" {
// Flush data cache for windows because it does not do so automatically during unmount device
exec := d.plugin.host.GetExec(d.plugin.GetPluginName())
err := util.WriteVolumeCache(deviceMountPath, exec)
if err != nil {
return err
}
}
err := mount.CleanupMountPoint(deviceMountPath, d.plugin.host.GetMounter(d.plugin.GetPluginName()), false)
if err == nil {
klog.V(2).Infof("azureDisk - Device %s was unmounted", deviceMountPath)
} else {
klog.Warningf("azureDisk - Device %s failed to unmount with error: %s", deviceMountPath, err.Error())
}
return err
}

View File

@ -1,76 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"fmt"
"testing"
"time"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume"
)
func createVolSpec(name string, readOnly bool) *volume.Spec {
return &volume.Spec{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: name,
ReadOnly: &readOnly,
},
},
},
}
}
func TestWaitForAttach(t *testing.T) {
tests := []struct {
devicePath string
expected string
expectError bool
}{
{
devicePath: "/dev/disk/azure/scsi1/lun0",
expected: "/dev/disk/azure/scsi1/lun0",
expectError: false,
},
{
devicePath: "/dev/sdc",
expected: "/dev/sdc",
expectError: false,
},
{
devicePath: "/dev/disk0",
expected: "/dev/disk0",
expectError: false,
},
}
attacher := azureDiskAttacher{}
spec := createVolSpec("fakedisk", false)
for _, test := range tests {
result, err := attacher.WaitForAttach(spec, test.devicePath, nil, 3000*time.Millisecond)
assert.Equal(t, result, test.expected)
assert.Equal(t, err != nil, test.expectError, fmt.Sprintf("error msg: %v", err))
}
}

View File

@ -1,220 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
libstrings "strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
api "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/legacy-cloud-providers/azure"
utilstrings "k8s.io/utils/strings"
)
const (
defaultStorageAccountType = compute.StandardSSDLRS
defaultAzureDiskKind = v1.AzureManagedDisk
defaultAzureDataDiskCachingMode = v1.AzureDataDiskCachingReadOnly
)
type dataDisk struct {
volume.MetricsProvider
volumeName string
diskName string
podUID types.UID
plugin *azureDataDiskPlugin
}
var (
supportedCachingModes = sets.NewString(
string(api.AzureDataDiskCachingNone),
string(api.AzureDataDiskCachingReadOnly),
string(api.AzureDataDiskCachingReadWrite))
supportedDiskKinds = sets.NewString(
string(api.AzureSharedBlobDisk), // deprecated
string(api.AzureDedicatedBlobDisk), // deprecated
string(api.AzureManagedDisk))
// only for Windows node
winDiskNumRE = regexp.MustCompile(`/dev/disk(.+)`)
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, utilstrings.EscapeQualifiedName(azureDataDiskPluginName), volName)
}
// creates a unique path for disks (even if they share the same *.vhd name)
func makeGlobalPDPath(host volume.VolumeHost, diskURI string, isManaged bool) (string, error) {
diskURI = libstrings.ToLower(diskURI) // always lower uri because users may enter it in caps.
uniqueDiskNameTemplate := "%s%s"
hashedDiskURI := azure.MakeCRC32(diskURI)
prefix := "b"
if isManaged {
prefix = "m"
}
// "{m for managed b for blob}{hashed diskURI or DiskId depending on disk kind }"
diskName := fmt.Sprintf(uniqueDiskNameTemplate, prefix, hashedDiskURI)
pdPath := filepath.Join(host.GetPluginDir(azureDataDiskPluginName), util.MountsInGlobalPDPath, diskName)
return pdPath, nil
}
func makeDataDisk(volumeName string, podUID types.UID, diskName string, host volume.VolumeHost, plugin *azureDataDiskPlugin) *dataDisk {
var metricProvider volume.MetricsProvider
if podUID != "" {
metricProvider = volume.NewMetricsStatFS(getPath(podUID, volumeName, host))
}
return &dataDisk{
MetricsProvider: metricProvider,
volumeName: volumeName,
diskName: diskName,
podUID: podUID,
plugin: plugin,
}
}
func getVolumeSource(spec *volume.Spec) (volumeSource *v1.AzureDiskVolumeSource, readOnly bool, err error) {
if spec.Volume != nil && spec.Volume.AzureDisk != nil {
return spec.Volume.AzureDisk, spec.Volume.AzureDisk.ReadOnly != nil && *spec.Volume.AzureDisk.ReadOnly, nil
}
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil {
return spec.PersistentVolume.Spec.AzureDisk, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("azureDisk - Spec does not reference an Azure disk volume type")
}
func normalizeKind(kind string) (v1.AzureDataDiskKind, error) {
if kind == "" {
return defaultAzureDiskKind, nil
}
if !supportedDiskKinds.Has(kind) {
return "", fmt.Errorf("azureDisk - %s is not supported disk kind. Supported values are %s", kind, supportedDiskKinds.List())
}
return v1.AzureDataDiskKind(kind), nil
}
func normalizeStorageAccountType(storageAccountType string) (compute.DiskStorageAccountTypes, error) {
if storageAccountType == "" {
return defaultStorageAccountType, nil
}
sku := compute.DiskStorageAccountTypes(storageAccountType)
supportedSkuNames := compute.PossibleDiskStorageAccountTypesValues()
for _, s := range supportedSkuNames {
if sku == s {
return sku, nil
}
}
return "", fmt.Errorf("azureDisk - %s is not supported sku/storageaccounttype. Supported values are %s", storageAccountType, supportedSkuNames)
}
func normalizeCachingMode(cachingMode v1.AzureDataDiskCachingMode) (v1.AzureDataDiskCachingMode, error) {
if cachingMode == "" {
return defaultAzureDataDiskCachingMode, nil
}
if !supportedCachingModes.Has(string(cachingMode)) {
return "", fmt.Errorf("azureDisk - %s is not supported cachingmode. Supported values are %s", cachingMode, supportedCachingModes.List())
}
return cachingMode, nil
}
type ioHandler interface {
ReadDir(dirname string) ([]os.FileInfo, error)
WriteFile(filename string, data []byte, perm os.FileMode) error
Readlink(name string) (string, error)
ReadFile(filename string) ([]byte, error)
}
//TODO: check if priming the iscsi interface is actually needed
type osIOHandler struct{}
func (handler *osIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
return ioutil.ReadDir(dirname)
}
func (handler *osIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return ioutil.WriteFile(filename, data, perm)
}
func (handler *osIOHandler) Readlink(name string) (string, error) {
return os.Readlink(name)
}
func (handler *osIOHandler) ReadFile(filename string) ([]byte, error) {
return ioutil.ReadFile(filename)
}
func getDiskController(host volume.VolumeHost) (DiskController, error) {
cloudProvider := host.GetCloudProvider()
az, ok := cloudProvider.(*azure.Cloud)
if !ok || az == nil {
return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return az, nil
}
func getCloud(host volume.VolumeHost) (*azure.Cloud, error) {
cloudProvider := host.GetCloudProvider()
az, ok := cloudProvider.(*azure.Cloud)
if !ok || az == nil {
return nil, fmt.Errorf("AzureDisk - failed to get Azure Cloud Provider. GetCloudProvider returned %v instead", cloudProvider)
}
return az, nil
}
func strFirstLetterToUpper(str string) string {
if len(str) < 2 {
return str
}
return libstrings.ToUpper(string(str[0])) + str[1:]
}
// getDiskNum : extract the disk num from a device path,
// deviceInfo format could be like this: e.g. /dev/disk2
func getDiskNum(deviceInfo string) (string, error) {
matches := winDiskNumRE.FindStringSubmatch(deviceInfo)
if len(matches) == 2 {
return matches[1], nil
}
return "", fmt.Errorf("cannot parse deviceInfo: %s, correct format: /dev/disk?", deviceInfo)
}

View File

@ -1,184 +0,0 @@
//go:build !providerless && linux
// +build !providerless,linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"fmt"
"path/filepath"
"strconv"
libstrings "strings"
"k8s.io/klog/v2"
utilexec "k8s.io/utils/exec"
)
// exclude those used by azure as resource and OS root in /dev/disk/azure, /dev/disk/azure/scsi0
// "/dev/disk/azure/scsi0" dir is populated in Standard_DC4s/DC2s on Ubuntu 18.04
func listAzureDiskPath(io ioHandler) []string {
var azureDiskList []string
azureResourcePaths := []string{"/dev/disk/azure/", "/dev/disk/azure/scsi0/"}
for _, azureDiskPath := range azureResourcePaths {
if dirs, err := io.ReadDir(azureDiskPath); err == nil {
for _, f := range dirs {
name := f.Name()
diskPath := filepath.Join(azureDiskPath, name)
if link, linkErr := io.Readlink(diskPath); linkErr == nil {
sd := link[(libstrings.LastIndex(link, "/") + 1):]
azureDiskList = append(azureDiskList, sd)
}
}
}
}
klog.V(12).Infof("Azure sys disks paths: %v", azureDiskList)
return azureDiskList
}
// getDiskLinkByDevName get disk link by device name from devLinkPath, e.g. /dev/disk/azure/, /dev/disk/by-id/
func getDiskLinkByDevName(io ioHandler, devLinkPath, devName string) (string, error) {
dirs, err := io.ReadDir(devLinkPath)
klog.V(12).Infof("azureDisk - begin to find %s from %s", devName, devLinkPath)
if err == nil {
for _, f := range dirs {
diskPath := devLinkPath + f.Name()
klog.V(12).Infof("azureDisk - begin to Readlink: %s", diskPath)
link, linkErr := io.Readlink(diskPath)
if linkErr != nil {
klog.Warningf("azureDisk - read link (%s) error: %v", diskPath, linkErr)
continue
}
if libstrings.HasSuffix(link, devName) {
return diskPath, nil
}
}
return "", fmt.Errorf("device name(%s) is not found under %s", devName, devLinkPath)
}
return "", fmt.Errorf("read %s error: %v", devLinkPath, err)
}
func scsiHostRescan(io ioHandler, exec utilexec.Interface) {
scsiPath := "/sys/class/scsi_host/"
if dirs, err := io.ReadDir(scsiPath); err == nil {
for _, f := range dirs {
name := scsiPath + f.Name() + "/scan"
data := []byte("- - -")
if err = io.WriteFile(name, data, 0666); err != nil {
klog.Warningf("failed to rescan scsi host %s", name)
}
}
} else {
klog.Warningf("failed to read %s, err %v", scsiPath, err)
}
}
func findDiskByLun(lun int, io ioHandler, exec utilexec.Interface) (string, error) {
azureDisks := listAzureDiskPath(io)
return findDiskByLunWithConstraint(lun, io, azureDisks)
}
// finds a device mounted to "current" node
func findDiskByLunWithConstraint(lun int, io ioHandler, azureDisks []string) (string, error) {
var err error
sysPath := "/sys/bus/scsi/devices"
if dirs, err := io.ReadDir(sysPath); err == nil {
for _, f := range dirs {
name := f.Name()
// look for path like /sys/bus/scsi/devices/3:0:0:1
arr := libstrings.Split(name, ":")
if len(arr) < 4 {
continue
}
if len(azureDisks) == 0 {
klog.V(4).Infof("/dev/disk/azure is not populated, now try to parse %v directly", name)
target, err := strconv.Atoi(arr[0])
if err != nil {
klog.Errorf("failed to parse target from %v (%v), err %v", arr[0], name, err)
continue
}
// as observed, targets 0-3 are used by OS disks. Skip them
if target <= 3 {
continue
}
}
// extract LUN from the path.
// LUN is the last index of the array, i.e. 1 in /sys/bus/scsi/devices/3:0:0:1
l, err := strconv.Atoi(arr[3])
if err != nil {
// unknown path format, continue to read the next one
klog.V(4).Infof("azure disk - failed to parse lun from %v (%v), err %v", arr[3], name, err)
continue
}
if lun == l {
// find the matching LUN
// read vendor and model to ensure it is a VHD disk
vendorPath := filepath.Join(sysPath, name, "vendor")
vendorBytes, err := io.ReadFile(vendorPath)
if err != nil {
klog.Errorf("failed to read device vendor, err: %v", err)
continue
}
vendor := libstrings.TrimSpace(string(vendorBytes))
if libstrings.ToUpper(vendor) != "MSFT" {
klog.V(4).Infof("vendor doesn't match VHD, got %s", vendor)
continue
}
modelPath := filepath.Join(sysPath, name, "model")
modelBytes, err := io.ReadFile(modelPath)
if err != nil {
klog.Errorf("failed to read device model, err: %v", err)
continue
}
model := libstrings.TrimSpace(string(modelBytes))
if libstrings.ToUpper(model) != "VIRTUAL DISK" {
klog.V(4).Infof("model doesn't match VIRTUAL DISK, got %s", model)
continue
}
// find a disk, validate name
dir := filepath.Join(sysPath, name, "block")
if dev, err := io.ReadDir(dir); err == nil {
found := false
devName := dev[0].Name()
for _, diskName := range azureDisks {
klog.V(12).Infof("azureDisk - validating disk %q with sys disk %q", devName, diskName)
if devName == diskName {
found = true
break
}
}
if !found {
devLinkPaths := []string{"/dev/disk/azure/scsi1/", "/dev/disk/by-id/"}
for _, devLinkPath := range devLinkPaths {
diskPath, err := getDiskLinkByDevName(io, devLinkPath, devName)
if err == nil {
klog.V(4).Infof("azureDisk - found %s by %s under %s", diskPath, devName, devLinkPath)
return diskPath, nil
}
klog.Warningf("azureDisk - getDiskLinkByDevName by %s under %s failed, error: %v", devName, devLinkPath, err)
}
return "/dev/" + devName, nil
}
}
}
}
}
return "", err
}

View File

@ -1,225 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"fmt"
"os"
"runtime"
"strings"
"testing"
"time"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
"github.com/stretchr/testify/assert"
"k8s.io/utils/exec"
)
type fakeFileInfo struct {
name string
}
func (fi *fakeFileInfo) Name() string {
return fi.name
}
func (fi *fakeFileInfo) Size() int64 {
return 0
}
func (fi *fakeFileInfo) Mode() os.FileMode {
return 777
}
func (fi *fakeFileInfo) ModTime() time.Time {
return time.Now()
}
func (fi *fakeFileInfo) IsDir() bool {
return false
}
func (fi *fakeFileInfo) Sys() interface{} {
return nil
}
var (
lun = 1
lunStr = "1"
diskPath = "4:0:0:" + lunStr
devName = "sdd"
lunStr1 = "2"
diskPath1 = "3:0:0:" + lunStr1
devName1 = "sde"
)
type fakeIOHandler struct{}
func (handler *fakeIOHandler) ReadDir(dirname string) ([]os.FileInfo, error) {
switch dirname {
case "/sys/bus/scsi/devices":
f1 := &fakeFileInfo{
name: "3:0:0:1",
}
f2 := &fakeFileInfo{
name: "4:0:0:0",
}
f3 := &fakeFileInfo{
name: diskPath,
}
f4 := &fakeFileInfo{
name: "host1",
}
f5 := &fakeFileInfo{
name: "target2:0:0",
}
return []os.FileInfo{f1, f2, f3, f4, f5}, nil
case "/sys/bus/scsi/devices/" + diskPath + "/block":
n := &fakeFileInfo{
name: devName,
}
return []os.FileInfo{n}, nil
case "/sys/bus/scsi/devices/" + diskPath1 + "/block":
n := &fakeFileInfo{
name: devName1,
}
return []os.FileInfo{n}, nil
}
return nil, fmt.Errorf("bad dir")
}
func (handler *fakeIOHandler) WriteFile(filename string, data []byte, perm os.FileMode) error {
return nil
}
func (handler *fakeIOHandler) Readlink(name string) (string, error) {
return "/dev/azure/disk/sda", nil
}
func (handler *fakeIOHandler) ReadFile(filename string) ([]byte, error) {
if strings.HasSuffix(filename, "vendor") {
return []byte("Msft \n"), nil
}
if strings.HasSuffix(filename, "model") {
return []byte("Virtual Disk \n"), nil
}
return nil, fmt.Errorf("unknown file")
}
func TestIoHandler(t *testing.T) {
if runtime.GOOS != "windows" && runtime.GOOS != "linux" {
t.Skipf("TestIoHandler not supported on GOOS=%s", runtime.GOOS)
}
disk, err := findDiskByLun(lun, &fakeIOHandler{}, exec.New())
if runtime.GOOS == "windows" {
if err != nil {
t.Errorf("no data disk found: disk %v err %v", disk, err)
}
} else {
// if no disk matches lun, exit
if disk != "/dev/"+devName || err != nil {
t.Errorf("no data disk found: disk %v err %v", disk, err)
}
}
}
func TestNormalizeStorageAccountType(t *testing.T) {
tests := []struct {
storageAccountType string
expectedAccountType compute.DiskStorageAccountTypes
expectError bool
}{
{
storageAccountType: "",
expectedAccountType: compute.StandardSSDLRS,
expectError: false,
},
{
storageAccountType: "NOT_EXISTING",
expectedAccountType: "",
expectError: true,
},
{
storageAccountType: "Standard_LRS",
expectedAccountType: compute.StandardLRS,
expectError: false,
},
{
storageAccountType: "Premium_LRS",
expectedAccountType: compute.PremiumLRS,
expectError: false,
},
{
storageAccountType: "StandardSSD_LRS",
expectedAccountType: compute.StandardSSDLRS,
expectError: false,
},
{
storageAccountType: "UltraSSD_LRS",
expectedAccountType: compute.UltraSSDLRS,
expectError: false,
},
}
for _, test := range tests {
result, err := normalizeStorageAccountType(test.storageAccountType)
assert.Equal(t, test.expectedAccountType, result)
assert.Equal(t, test.expectError, err != nil, fmt.Sprintf("error msg: %v", err))
}
}
func TestGetDiskNum(t *testing.T) {
tests := []struct {
deviceInfo string
expectedNum string
expectError bool
}{
{
deviceInfo: "/dev/disk0",
expectedNum: "0",
expectError: false,
},
{
deviceInfo: "/dev/disk99",
expectedNum: "99",
expectError: false,
},
{
deviceInfo: "",
expectedNum: "",
expectError: true,
},
{
deviceInfo: "/dev/disk",
expectedNum: "",
expectError: true,
},
{
deviceInfo: "999",
expectedNum: "",
expectError: true,
},
}
for _, test := range tests {
result, err := getDiskNum(test.deviceInfo)
assert.Equal(t, test.expectedNum, result)
assert.Equal(t, test.expectError, err != nil, fmt.Sprintf("error msg: %v", err))
}
}

View File

@ -1,29 +0,0 @@
//go:build !providerless && !linux && !windows
// +build !providerless,!linux,!windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import "k8s.io/utils/exec"
func scsiHostRescan(io ioHandler, exec exec.Interface) {
}
func findDiskByLun(lun int, io ioHandler, exec exec.Interface) (string, error) {
return "", nil
}

View File

@ -1,122 +0,0 @@
//go:build !providerless && windows
// +build !providerless,windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"encoding/json"
"fmt"
"strconv"
"strings"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
utilexec "k8s.io/utils/exec"
)
var winDiskNumFormat = "/dev/disk%d"
func scsiHostRescan(io ioHandler, exec utilexec.Interface) {
cmd := "Update-HostStorageCache"
output, err := exec.Command("powershell", "/c", cmd).CombinedOutput()
if err != nil {
klog.Errorf("Update-HostStorageCache failed in scsiHostRescan, error: %v, output: %q", err, string(output))
}
}
// search Windows disk number by LUN
func findDiskByLun(lun int, iohandler ioHandler, exec utilexec.Interface) (string, error) {
cmd := `Get-Disk | select number, location | ConvertTo-Json`
output, err := exec.Command("powershell", "/c", cmd).CombinedOutput()
if err != nil {
klog.Errorf("Get-Disk failed in findDiskByLun, error: %v, output: %q", err, string(output))
return "", err
}
if len(output) < 10 {
return "", fmt.Errorf("Get-Disk output is too short, output: %q", string(output))
}
var data []map[string]interface{}
if err = json.Unmarshal(output, &data); err != nil {
klog.Errorf("Get-Disk output is not a json array, output: %q", string(output))
return "", err
}
for _, v := range data {
if jsonLocation, ok := v["location"]; ok {
if location, ok := jsonLocation.(string); ok {
if !strings.Contains(location, " LUN ") {
continue
}
arr := strings.Split(location, " ")
arrLen := len(arr)
if arrLen < 3 {
klog.Warningf("unexpected json structure from Get-Disk, location: %q", jsonLocation)
continue
}
klog.V(4).Infof("found a disk, location: %q, lun: %q", location, arr[arrLen-1])
//last element of location field is LUN number, e.g.
// "location": "Integrated : Adapter 3 : Port 0 : Target 0 : LUN 1"
l, err := strconv.Atoi(arr[arrLen-1])
if err != nil {
klog.Warningf("cannot parse element from data structure, location: %q, element: %q", location, arr[arrLen-1])
continue
}
if l == lun {
klog.V(4).Infof("found a disk and lun, location: %q, lun: %d", location, lun)
if d, ok := v["number"]; ok {
if diskNum, ok := d.(float64); ok {
klog.V(2).Infof("azureDisk Mount: got disk number(%d) by LUN(%d)", int(diskNum), lun)
return fmt.Sprintf(winDiskNumFormat, int(diskNum)), nil
}
klog.Warningf("LUN(%d) found, but could not get disk number(%q), location: %q", lun, d, location)
}
return "", fmt.Errorf("LUN(%d) found, but could not get disk number, location: %q", lun, location)
}
}
}
}
return "", nil
}
func formatIfNotFormatted(disk string, fstype string, exec utilexec.Interface) error {
if err := mount.ValidateDiskNumber(disk); err != nil {
klog.Errorf("azureDisk Mount: formatIfNotFormatted failed, err: %v\n", err)
return err
}
if len(fstype) == 0 {
// Use 'NTFS' as the default
fstype = "NTFS"
}
cmd := fmt.Sprintf("Get-Disk -Number %s | Where partitionstyle -eq 'raw' | Initialize-Disk -PartitionStyle MBR -PassThru", disk)
cmd += fmt.Sprintf(" | New-Partition -AssignDriveLetter -UseMaximumSize | Format-Volume -FileSystem %s -Confirm:$false", fstype)
output, err := exec.Command("powershell", "/c", cmd).CombinedOutput()
if err != nil {
klog.Errorf("azureDisk Mount: Get-Disk failed, error: %v, output: %q", err, string(output))
return err
}
klog.Infof("azureDisk Mount: Disk successfully formatted, disk: %q, fstype: %q\n", disk, fstype)
return nil
}

View File

@ -1,154 +0,0 @@
//go:build !providerless && windows
// +build !providerless,windows
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"encoding/json"
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/utils/exec"
exectest "k8s.io/utils/exec/testing"
)
func newFakeExec(stdout []byte, err error) *exectest.FakeExec {
fakeCmd := &exectest.FakeCmd{
CombinedOutputScript: []exectest.FakeAction{
func() ([]byte, []byte, error) {
return stdout, []byte(""), err
},
},
}
return &exectest.FakeExec{
CommandScript: []exectest.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
return fakeCmd
},
},
}
}
func TestScsiHostRescan(t *testing.T) {
// NOTE: We don't have any assertions we can make for this test.
fakeExec := newFakeExec([]byte("expected output."), errors.New("expected error."))
scsiHostRescan(nil, fakeExec)
}
func TestGetDevicePath(t *testing.T) {
diskNoLun := make(map[string]interface{}, 0)
diskNoLun["location"] = "incorrect location"
// The expectation is that the string will contain at least 2 spaces
diskIncorrectLun := make(map[string]interface{}, 0)
diskIncorrectLun["location"] = " LUN 1"
diskNoIntegerLun := make(map[string]interface{}, 0)
diskNoIntegerLun["location"] = "Integrated : Adapter 1 : Port 0 : Target 0 : LUN A"
lun := 42
invalidDiskNumberLun := make(map[string]interface{}, 0)
invalidDiskNumberLun["location"] = "Integrated : Adapter 1 : Port 0 : Target 0 : LUN 42"
invalidDiskNumberLun["number"] = "not a float"
validLun := make(map[string]interface{}, 0)
validLun["location"] = "Integrated : Adapter 1 : Port 0 : Target 0 : LUN 42"
validLun["number"] = 1.5
noDiskFoundJson, _ := json.Marshal([]map[string]interface{}{diskNoLun, diskIncorrectLun, diskNoIntegerLun})
invaliDiskJson, _ := json.Marshal([]map[string]interface{}{invalidDiskNumberLun})
validJson, _ := json.Marshal([]map[string]interface{}{validLun})
testCases := []struct {
commandOutput []byte
commandError error
expectedOutput string
expectedError bool
expectedErrMsg string
}{
{
commandOutput: []byte("foolish output."),
commandError: errors.New("expected error."),
expectedError: true,
expectedErrMsg: "expected error.",
},
{
commandOutput: []byte("too short"),
expectedError: true,
expectedErrMsg: `Get-Disk output is too short, output: "too short"`,
},
{
commandOutput: []byte("not a json"),
expectedError: true,
expectedErrMsg: `invalid character 'o' in literal null (expecting 'u')`,
},
{
commandOutput: noDiskFoundJson,
expectedOutput: "",
},
{
commandOutput: invaliDiskJson,
expectedError: true,
expectedErrMsg: fmt.Sprintf("LUN(%d) found, but could not get disk number, location: %q", lun, invalidDiskNumberLun["location"]),
},
{
commandOutput: validJson,
expectedOutput: "/dev/disk1",
},
}
for _, tc := range testCases {
fakeExec := newFakeExec(tc.commandOutput, tc.commandError)
disk, err := findDiskByLun(lun, nil, fakeExec)
if tc.expectedError {
if err == nil || err.Error() != tc.expectedErrMsg {
t.Errorf("expected error message `%s` but got `%v`", tc.expectedErrMsg, err)
}
continue
}
require.NoError(t, err)
assert.Equal(t, tc.expectedOutput, disk)
}
}
func TestFormatIfNotFormatted(t *testing.T) {
fakeExec := newFakeExec([]byte{}, errors.New("expected error."))
err := formatIfNotFormatted("fake disk number", "", fakeExec)
expectedErrMsg := `wrong disk number format: "fake disk number", err: strconv.Atoi: parsing "fake disk number": invalid syntax`
if err == nil || err.Error() != expectedErrMsg {
t.Errorf("expected error message `%s` but got `%v`", expectedErrMsg, err)
}
err = formatIfNotFormatted("1", "", fakeExec)
expectedErrMsg = "expected error."
if err == nil || err.Error() != expectedErrMsg {
t.Errorf("expected error message `%s` but got `%v`", expectedErrMsg, err)
}
fakeExec = newFakeExec([]byte{}, nil)
err = formatIfNotFormatted("1", "", fakeExec)
require.NoError(t, err)
}

View File

@ -1,357 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"context"
"fmt"
"strings"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
"k8s.io/klog/v2"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/legacy-cloud-providers/azure"
)
// DiskController interface exposed by the cloud provider implementing Disk functionality
type DiskController interface {
CreateBlobDisk(dataDiskName string, storageAccountType storage.SkuName, sizeGB int) (string, error)
DeleteBlobDisk(diskURI string) error
CreateManagedDisk(options *azure.ManagedDiskOptions) (string, error)
DeleteManagedDisk(diskURI string) error
// Attaches the disk to the host machine.
AttachDisk(isManagedDisk bool, diskName, diskURI string, nodeName types.NodeName, cachingMode compute.CachingTypes) (int32, error)
// Detaches the disk, identified by disk name or uri, from the host machine.
DetachDisk(diskName, diskURI string, nodeName types.NodeName) error
// Check if a list of volumes are attached to the node with the specified NodeName
DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error)
// Get the LUN number of the disk that is attached to the host
GetDiskLun(diskName, diskURI string, nodeName types.NodeName) (int32, error)
// Get the next available LUN number to attach a new VHD
GetNextDiskLun(nodeName types.NodeName) (int32, error)
// Create a VHD blob
CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error)
// Delete a VHD blob
DeleteVolume(diskURI string) error
// Expand the disk to new size
ResizeDisk(diskURI string, oldSize resource.Quantity, newSize resource.Quantity) (resource.Quantity, error)
// GetAzureDiskLabels gets availability zone labels for Azuredisk.
GetAzureDiskLabels(diskURI string) (map[string]string, error)
// GetActiveZones returns all the zones in which k8s nodes are currently running.
GetActiveZones() (sets.String, error)
// GetLocation returns the location in which k8s cluster is currently running.
GetLocation() string
}
type azureDataDiskPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.AttachableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.VolumePluginWithAttachLimits = &azureDataDiskPlugin{}
var _ volume.ExpandableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.DeviceMountableVolumePlugin = &azureDataDiskPlugin{}
const (
azureDataDiskPluginName = "kubernetes.io/azure-disk"
defaultAzureVolumeLimit = 16
)
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&azureDataDiskPlugin{}}
}
func (plugin *azureDataDiskPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *azureDataDiskPlugin) GetPluginName() string {
return azureDataDiskPluginName
}
func (plugin *azureDataDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.DataDiskURI, nil
}
func (plugin *azureDataDiskPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.AzureDisk != nil) ||
(spec.Volume != nil && spec.Volume.AzureDisk != nil)
}
func (plugin *azureDataDiskPlugin) RequiresRemount(spec *volume.Spec) bool {
return false
}
func (plugin *azureDataDiskPlugin) SupportsMountOption() bool {
return true
}
func (plugin *azureDataDiskPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *azureDataDiskPlugin) SupportsSELinuxContextMount(spec *volume.Spec) (bool, error) {
return false, nil
}
func (plugin *azureDataDiskPlugin) GetVolumeLimits() (map[string]int64, error) {
volumeLimits := map[string]int64{
util.AzureVolumeLimitKey: defaultAzureVolumeLimit,
}
az, err := getCloud(plugin.host)
if err != nil {
// if we can't fetch cloudprovider we return an error
// hoping external CCM or admin can set it. Returning
// default values from here will mean, no one can
// override them.
return nil, fmt.Errorf("failed to get azure cloud in GetVolumeLimits, plugin.host: %s", plugin.host.GetHostName())
}
instances, ok := az.Instances()
if !ok {
klog.Warningf("Failed to get instances from cloud provider")
return volumeLimits, nil
}
instanceType, err := instances.InstanceType(context.TODO(), plugin.host.GetNodeName())
if err != nil {
klog.Errorf("Failed to get instance type from Azure cloud provider, nodeName: %s", plugin.host.GetNodeName())
return volumeLimits, nil
}
volumeLimits = map[string]int64{
util.AzureVolumeLimitKey: getMaxDataDiskCount(instanceType),
}
return volumeLimits, nil
}
func getMaxDataDiskCount(instanceType string) int64 {
vmsize := strings.ToUpper(instanceType)
maxDataDiskCount, exists := maxDataDiskCountMap[vmsize]
if exists {
klog.V(12).Infof("got a matching size in getMaxDataDiskCount, VM Size: %s, MaxDataDiskCount: %d", vmsize, maxDataDiskCount)
return maxDataDiskCount
}
klog.V(12).Infof("not found a matching size in getMaxDataDiskCount, VM Size: %s, use default volume limit: %d", vmsize, defaultAzureVolumeLimit)
return defaultAzureVolumeLimit
}
func (plugin *azureDataDiskPlugin) VolumeLimitKey(spec *volume.Spec) string {
return util.AzureVolumeLimitKey
}
func (plugin *azureDataDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
}
}
// NewAttacher initializes an Attacher
func (plugin *azureDataDiskPlugin) NewAttacher() (volume.Attacher, error) {
azure, err := getCloud(plugin.host)
if err != nil {
klog.Errorf("failed to get azure cloud in NewAttacher, plugin.host : %s, err:%v", plugin.host.GetHostName(), err)
return nil, err
}
return &azureDiskAttacher{
plugin: plugin,
cloud: azure,
}, nil
}
func (plugin *azureDataDiskPlugin) NewDetacher() (volume.Detacher, error) {
azure, err := getCloud(plugin.host)
if err != nil {
klog.V(4).Infof("failed to get azure cloud in NewDetacher, plugin.host : %s", plugin.host.GetHostName())
return nil, err
}
return &azureDiskDetacher{
plugin: plugin,
cloud: azure,
}, nil
}
func (plugin *azureDataDiskPlugin) CanAttach(spec *volume.Spec) (bool, error) {
return true, nil
}
func (plugin *azureDataDiskPlugin) CanDeviceMount(spec *volume.Spec) (bool, error) {
return true, nil
}
func (plugin *azureDataDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
disk := makeDataDisk(spec.Name(), "", volumeSource.DiskName, plugin.host, plugin)
return &azureDiskDeleter{
spec: spec,
plugin: plugin,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
if len(options.PVC.Spec.AccessModes) == 0 {
options.PVC.Spec.AccessModes = plugin.GetAccessModes()
}
return &azureDiskProvisioner{
plugin: plugin,
options: options,
}, nil
}
func (plugin *azureDataDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, options volume.VolumeOptions) (volume.Mounter, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
disk := makeDataDisk(spec.Name(), pod.UID, volumeSource.DiskName, plugin.host, plugin)
return &azureDiskMounter{
plugin: plugin,
spec: spec,
options: options,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
disk := makeDataDisk(volName, podUID, "", plugin.host, plugin)
return &azureDiskUnmounter{
plugin: plugin,
dataDisk: disk,
}, nil
}
func (plugin *azureDataDiskPlugin) RequiresFSResize() bool {
return true
}
func (plugin *azureDataDiskPlugin) ExpandVolumeDevice(
spec *volume.Spec,
newSize resource.Quantity,
oldSize resource.Quantity) (resource.Quantity, error) {
if spec.PersistentVolume == nil || spec.PersistentVolume.Spec.AzureDisk == nil {
return oldSize, fmt.Errorf("invalid PV spec")
}
diskController, err := getDiskController(plugin.host)
if err != nil {
return oldSize, err
}
return diskController.ResizeDisk(spec.PersistentVolume.Spec.AzureDisk.DataDiskURI, oldSize, newSize)
}
func (plugin *azureDataDiskPlugin) NodeExpand(resizeOptions volume.NodeResizeOptions) (bool, error) {
fsVolume, err := util.CheckVolumeModeFilesystem(resizeOptions.VolumeSpec)
if err != nil {
return false, fmt.Errorf("error checking VolumeMode: %v", err)
}
// if volume is not a fs file system, there is nothing for us to do here.
if !fsVolume {
return true, nil
}
_, err = util.GenericResizeFS(plugin.host, plugin.GetPluginName(), resizeOptions.DevicePath, resizeOptions.DeviceMountPath)
if err != nil {
return false, err
}
return true, nil
}
var _ volume.NodeExpandableVolumePlugin = &azureDataDiskPlugin{}
func (plugin *azureDataDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (volume.ReconstructedVolume, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
kvh, ok := plugin.host.(volume.KubeletVolumeHost)
if !ok {
return volume.ReconstructedVolume{}, fmt.Errorf("plugin volume host does not implement KubeletVolumeHost interface")
}
hu := kvh.GetHostUtil()
pluginMntDir := util.GetPluginMountDir(plugin.host, plugin.GetPluginName())
sourceName, err := hu.GetDeviceNameFromMount(mounter, mountPath, pluginMntDir)
if err != nil {
return volume.ReconstructedVolume{}, err
}
azureVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DataDiskURI: sourceName,
},
},
}
return volume.ReconstructedVolume{
Spec: volume.NewSpecFromVolume(azureVolume),
}, nil
}
func (plugin *azureDataDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
m := plugin.host.GetMounter(plugin.GetPluginName())
return m.GetMountRefs(deviceMountPath)
}
func (plugin *azureDataDiskPlugin) NewDeviceMounter() (volume.DeviceMounter, error) {
return plugin.NewAttacher()
}
func (plugin *azureDataDiskPlugin) NewDeviceUnmounter() (volume.DeviceUnmounter, error) {
return plugin.NewDetacher()
}

View File

@ -1,168 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"fmt"
"path/filepath"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
utilstrings "k8s.io/utils/strings"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/volumepathhandler"
)
var _ volume.VolumePlugin = &azureDataDiskPlugin{}
var _ volume.PersistentVolumePlugin = &azureDataDiskPlugin{}
var _ volume.BlockVolumePlugin = &azureDataDiskPlugin{}
var _ volume.DeletableVolumePlugin = &azureDataDiskPlugin{}
var _ volume.ProvisionableVolumePlugin = &azureDataDiskPlugin{}
func (plugin *azureDataDiskPlugin) ConstructBlockVolumeSpec(podUID types.UID, volumeName, mapPath string) (*volume.Spec, error) {
pluginDir := plugin.host.GetVolumeDevicePluginDir(azureDataDiskPluginName)
blkutil := volumepathhandler.NewBlockVolumePathHandler()
globalMapPathUUID, err := blkutil.FindGlobalMapPathUUIDFromPod(pluginDir, mapPath, podUID)
if err != nil {
return nil, err
}
klog.V(5).Infof("constructing block volume spec from globalMapPathUUID: %s", globalMapPathUUID)
globalMapPath := filepath.Dir(globalMapPathUUID)
if len(globalMapPath) <= 1 {
return nil, fmt.Errorf("failed to get volume plugin information from globalMapPathUUID: %v", globalMapPathUUID)
}
return getVolumeSpecFromGlobalMapPath(globalMapPath, volumeName)
}
func getVolumeSpecFromGlobalMapPath(globalMapPath, volumeName string) (*volume.Spec, error) {
// Get volume spec information from globalMapPath
// globalMapPath example:
// plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumeID}
// plugins/kubernetes.io/azure-disk/volumeDevices/vol-XXXXXX
diskName := filepath.Base(globalMapPath)
if len(diskName) <= 1 {
return nil, fmt.Errorf("failed to get diskName from global path=%s", globalMapPath)
}
klog.V(5).Infof("got diskName(%s) from globalMapPath: %s", globalMapPath, diskName)
block := v1.PersistentVolumeBlock
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: volumeName,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
},
},
VolumeMode: &block,
},
}
return volume.NewSpecFromPersistentVolume(pv, true), nil
}
// NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification.
func (plugin *azureDataDiskPlugin) NewBlockVolumeMapper(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.BlockVolumeMapper, error) {
// If this is called via GenerateUnmapDeviceFunc(), pod is nil.
// Pass empty string as dummy uid since uid isn't used in the case.
var uid types.UID
if pod != nil {
uid = pod.UID
}
return plugin.newBlockVolumeMapperInternal(spec, uid, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *azureDataDiskPlugin) newBlockVolumeMapperInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface) (volume.BlockVolumeMapper, error) {
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
disk := makeDataDisk(spec.Name(), podUID, volumeSource.DiskName, plugin.host, plugin)
mapper := &azureDataDiskMapper{
dataDisk: disk,
readOnly: readOnly,
}
blockPath, err := mapper.GetGlobalMapPath(spec)
if err != nil {
return nil, fmt.Errorf("failed to get device path: %v", err)
}
mapper.MetricsProvider = volume.NewMetricsBlock(filepath.Join(blockPath, string(podUID)))
return mapper, nil
}
func (plugin *azureDataDiskPlugin) NewBlockVolumeUnmapper(volName string, podUID types.UID) (volume.BlockVolumeUnmapper, error) {
return plugin.newUnmapperInternal(volName, podUID, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *azureDataDiskPlugin) newUnmapperInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.BlockVolumeUnmapper, error) {
disk := makeDataDisk(volName, podUID, "", plugin.host, plugin)
return &azureDataDiskUnmapper{dataDisk: disk}, nil
}
type azureDataDiskUnmapper struct {
*dataDisk
volume.MetricsNil
}
var _ volume.BlockVolumeUnmapper = &azureDataDiskUnmapper{}
type azureDataDiskMapper struct {
*dataDisk
readOnly bool
}
var _ volume.BlockVolumeMapper = &azureDataDiskMapper{}
// GetGlobalMapPath returns global map path and error
// path: plugins/kubernetes.io/{PluginName}/volumeDevices/volumeID
//
// plugins/kubernetes.io/azure-disk/volumeDevices/vol-XXXXXX
func (disk *dataDisk) GetGlobalMapPath(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return filepath.Join(disk.plugin.host.GetVolumeDevicePluginDir(azureDataDiskPluginName), string(volumeSource.DiskName)), nil
}
// GetPodDeviceMapPath returns pod device map path and volume name
// path: pods/{podUid}/volumeDevices/kubernetes.io~azure
func (disk *dataDisk) GetPodDeviceMapPath() (string, string) {
name := azureDataDiskPluginName
return disk.plugin.host.GetPodVolumeDeviceDir(disk.podUID, utilstrings.EscapeQualifiedName(name)), disk.volumeName
}
// SupportsMetrics returns true for azureDataDiskMapper as it initializes the
// MetricsProvider.
func (addm *azureDataDiskMapper) SupportsMetrics() bool {
return true
}

View File

@ -1,148 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"os"
"path/filepath"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
const (
testDiskName = "disk1"
testPVName = "pv1"
testGlobalPath = "plugins/kubernetes.io/azure-disk/volumeDevices/disk1"
testPodPath = "pods/poduid/volumeDevices/kubernetes.io~azure-disk"
)
func TestGetVolumeSpecFromGlobalMapPath(t *testing.T) {
// make our test path for fake GlobalMapPath
// /tmp symbolized our pluginDir
// /tmp/testGlobalPathXXXXX/plugins/kubernetes.io/azure-disk/volumeDevices/disk1
tmpVDir, err := utiltesting.MkTmpdir("azureDiskBlockTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
//deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
//Bad Path
badspec, err := getVolumeSpecFromGlobalMapPath("", "")
if badspec != nil || err == nil {
t.Errorf("Expected not to get spec from GlobalMapPath but did")
}
// Good Path
spec, err := getVolumeSpecFromGlobalMapPath(expectedGlobalPath, "")
if spec == nil || err != nil {
t.Fatalf("Failed to get spec from GlobalMapPath: %v", err)
}
if spec.PersistentVolume.Spec.AzureDisk.DiskName != testDiskName {
t.Errorf("Invalid pdName from GlobalMapPath spec: %s", spec.PersistentVolume.Spec.AzureDisk.DiskName)
}
block := v1.PersistentVolumeBlock
specMode := spec.PersistentVolume.Spec.VolumeMode
if specMode == nil {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", specMode, block)
}
if *specMode != block {
t.Errorf("Invalid volumeMode from GlobalMapPath spec: %v expected: %v", *specMode, block)
}
}
func getTestVolume(readOnly bool, path string, isBlock bool) *volume.Spec {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: testPVName,
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: testDiskName,
},
},
},
}
if isBlock {
blockMode := v1.PersistentVolumeBlock
pv.Spec.VolumeMode = &blockMode
}
return volume.NewSpecFromPersistentVolume(pv, readOnly)
}
func TestGetPodAndPluginMapPaths(t *testing.T) {
tmpVDir, err := utiltesting.MkTmpdir("azureDiskBlockTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
//deferred clean up
defer os.RemoveAll(tmpVDir)
expectedGlobalPath := filepath.Join(tmpVDir, testGlobalPath)
expectedPodPath := filepath.Join(tmpVDir, testPodPath)
spec := getTestVolume(false, tmpVDir, true /*isBlock*/)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(t, tmpVDir, nil, nil))
plug, err := plugMgr.FindMapperPluginByName(azureDataDiskPluginName)
if err != nil {
os.RemoveAll(tmpVDir)
t.Fatalf("Can't find the plugin by name: %q", azureDataDiskPluginName)
}
if plug.GetPluginName() != azureDataDiskPluginName {
t.Fatalf("Wrong name: %s", plug.GetPluginName())
}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mapper, err := plug.NewBlockVolumeMapper(spec, pod, volume.VolumeOptions{})
if err != nil {
t.Fatalf("Failed to make a new Mounter: %v", err)
}
if mapper == nil {
t.Fatalf("Got a nil Mounter")
}
//GetGlobalMapPath
gMapPath, err := mapper.GetGlobalMapPath(spec)
if err != nil || len(gMapPath) == 0 {
t.Fatalf("Invalid GlobalMapPath from spec: %s, error: %v", spec.PersistentVolume.Spec.AzureDisk.DiskName, err)
}
if gMapPath != expectedGlobalPath {
t.Errorf("Failed to get GlobalMapPath: %s, expected %s", gMapPath, expectedGlobalPath)
}
//GetPodDeviceMapPath
gDevicePath, gVolName := mapper.GetPodDeviceMapPath()
if gDevicePath != expectedPodPath {
t.Errorf("Got unexpected pod path: %s, expected %s", gDevicePath, expectedPodPath)
}
if gVolName != testPVName {
t.Errorf("Got unexpected volNamne: %s, expected %s", gVolName, testPVName)
}
}

View File

@ -1,737 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
// about how to get all VM size list,
// refer to https://github.com/kubernetes/kubernetes/issues/77461#issuecomment-492488756
var maxDataDiskCountMap = map[string]int64{
"BASIC_A0": 1,
"BASIC_A1": 2,
"BASIC_A2": 4,
"BASIC_A3": 8,
"BASIC_A4": 16,
"STANDARD_A0": 1,
"STANDARD_A1": 2,
"STANDARD_A1_V2": 2,
"STANDARD_A2": 4,
"STANDARD_A2M_V2": 4,
"STANDARD_A2_V2": 4,
"STANDARD_A3": 8,
"STANDARD_A4": 16,
"STANDARD_A4M_V2": 8,
"STANDARD_A4_V2": 8,
"STANDARD_A5": 4,
"STANDARD_A6": 8,
"STANDARD_A7": 16,
"STANDARD_A8M_V2": 16,
"STANDARD_A8_V2": 16,
"STANDARD_B12MS": 16,
"STANDARD_B16MS": 32,
"STANDARD_B1LS": 2,
"STANDARD_B1MS": 2,
"STANDARD_B1S": 2,
"STANDARD_B20MS": 32,
"STANDARD_B2MS": 4,
"STANDARD_B2S": 4,
"STANDARD_B4MS": 8,
"STANDARD_B8MS": 16,
"STANDARD_D11": 8,
"STANDARD_D11_V2": 8,
"STANDARD_D11_V2_PROMO": 8,
"STANDARD_D12": 16,
"STANDARD_D12_V2": 16,
"STANDARD_D12_V2_PROMO": 16,
"STANDARD_D13": 32,
"STANDARD_D13_V2": 32,
"STANDARD_D13_V2_PROMO": 32,
"STANDARD_D1": 4,
"STANDARD_D14": 64,
"STANDARD_D14_V2": 64,
"STANDARD_D14_V2_PROMO": 64,
"STANDARD_D15_V2": 64,
"STANDARD_D16ADS_V5": 32,
"STANDARD_D16AS_V4": 32,
"STANDARD_D16AS_V5": 32,
"STANDARD_D16A_V4": 32,
"STANDARD_D16DS_V4": 32,
"STANDARD_D16DS_V5": 32,
"STANDARD_D16D_V4": 32,
"STANDARD_D16D_V5": 32,
"STANDARD_D16PDS_V5": 32,
"STANDARD_D16PLDS_V5": 32,
"STANDARD_D16PLS_V5": 32,
"STANDARD_D16PS_V5": 32,
"STANDARD_D16S_V3": 32,
"STANDARD_D16S_V4": 32,
"STANDARD_D16S_V5": 32,
"STANDARD_D16_V3": 32,
"STANDARD_D16_V4": 32,
"STANDARD_D16_V5": 32,
"STANDARD_D1_V2": 4,
"STANDARD_D2": 8,
"STANDARD_D2ADS_V5": 4,
"STANDARD_D2AS_V4": 4,
"STANDARD_D2AS_V5": 4,
"STANDARD_D2A_V4": 4,
"STANDARD_D2DS_V4": 4,
"STANDARD_D2DS_V5": 4,
"STANDARD_D2D_V4": 4,
"STANDARD_D2D_V5": 4,
"STANDARD_D2PDS_V5": 4,
"STANDARD_D2PLDS_V5": 4,
"STANDARD_D2PLS_V5": 4,
"STANDARD_D2PS_V5": 4,
"STANDARD_D2S_V3": 4,
"STANDARD_D2S_V4": 4,
"STANDARD_D2S_V5": 4,
"STANDARD_D2_V2": 8,
"STANDARD_D2_V2_PROMO": 8,
"STANDARD_D2_V3": 4,
"STANDARD_D2_V4": 4,
"STANDARD_D2_V5": 4,
"STANDARD_D3": 16,
"STANDARD_D32ADS_V5": 32,
"STANDARD_D32AS_V4": 32,
"STANDARD_D32AS_V5": 32,
"STANDARD_D32A_V4": 32,
"STANDARD_D32DS_V4": 32,
"STANDARD_D32DS_V5": 32,
"STANDARD_D32D_V4": 32,
"STANDARD_D32D_V5": 32,
"STANDARD_D32PDS_V5": 32,
"STANDARD_D32PLDS_V5": 32,
"STANDARD_D32PLS_V5": 32,
"STANDARD_D32PS_V5": 32,
"STANDARD_D32S_V3": 32,
"STANDARD_D32S_V4": 32,
"STANDARD_D32S_V5": 32,
"STANDARD_D32_V3": 32,
"STANDARD_D32_V4": 32,
"STANDARD_D32_V5": 32,
"STANDARD_D3_V2": 16,
"STANDARD_D3_V2_PROMO": 16,
"STANDARD_D4": 32,
"STANDARD_D48ADS_V5": 32,
"STANDARD_D48AS_V4": 32,
"STANDARD_D48AS_V5": 32,
"STANDARD_D48A_V4": 32,
"STANDARD_D48DS_V4": 32,
"STANDARD_D48DS_V5": 32,
"STANDARD_D48D_V4": 32,
"STANDARD_D48D_V5": 32,
"STANDARD_D48PDS_V5": 32,
"STANDARD_D48PLDS_V5": 32,
"STANDARD_D48PLS_V5": 32,
"STANDARD_D48PS_V5": 32,
"STANDARD_D48S_V3": 32,
"STANDARD_D48S_V4": 32,
"STANDARD_D48S_V5": 32,
"STANDARD_D48_V3": 32,
"STANDARD_D48_V4": 32,
"STANDARD_D48_V5": 32,
"STANDARD_D4ADS_V5": 8,
"STANDARD_D4AS_V4": 8,
"STANDARD_D4AS_V5": 8,
"STANDARD_D4A_V4": 8,
"STANDARD_D4DS_V4": 8,
"STANDARD_D4DS_V5": 8,
"STANDARD_D4D_V4": 8,
"STANDARD_D4D_V5": 8,
"STANDARD_D4PDS_V5": 8,
"STANDARD_D4PLDS_V5": 8,
"STANDARD_D4PLS_V5": 8,
"STANDARD_D4PS_V5": 8,
"STANDARD_D4S_V3": 8,
"STANDARD_D4S_V4": 8,
"STANDARD_D4S_V5": 8,
"STANDARD_D4_V2": 32,
"STANDARD_D4_V2_PROMO": 32,
"STANDARD_D4_V3": 8,
"STANDARD_D4_V4": 8,
"STANDARD_D4_V5": 8,
"STANDARD_D5_V2": 64,
"STANDARD_D5_V2_PROMO": 64,
"STANDARD_D64ADS_V5": 32,
"STANDARD_D64AS_V4": 32,
"STANDARD_D64AS_V5": 32,
"STANDARD_D64A_V4": 32,
"STANDARD_D64DS_V4": 32,
"STANDARD_D64DS_V5": 32,
"STANDARD_D64D_V4": 32,
"STANDARD_D64D_V5": 32,
"STANDARD_D64PDS_V5": 32,
"STANDARD_D64PLDS_V5": 32,
"STANDARD_D64PLS_V5": 32,
"STANDARD_D64PS_V5": 32,
"STANDARD_D64S_V3": 32,
"STANDARD_D64S_V4": 32,
"STANDARD_D64S_V5": 32,
"STANDARD_D64_V3": 32,
"STANDARD_D64_V4": 32,
"STANDARD_D64_V5": 32,
"STANDARD_D8ADS_V5": 16,
"STANDARD_D8AS_V4": 16,
"STANDARD_D8AS_V5": 16,
"STANDARD_D8A_V4": 16,
"STANDARD_D8DS_V4": 16,
"STANDARD_D8DS_V5": 16,
"STANDARD_D8D_V4": 16,
"STANDARD_D8D_V5": 16,
"STANDARD_D8PDS_V5": 16,
"STANDARD_D8PLDS_V5": 16,
"STANDARD_D8PLS_V5": 16,
"STANDARD_D8PS_V5": 16,
"STANDARD_D8S_V3": 16,
"STANDARD_D8S_V4": 16,
"STANDARD_D8S_V5": 16,
"STANDARD_D8_V3": 16,
"STANDARD_D8_V4": 16,
"STANDARD_D8_V5": 16,
"STANDARD_D96ADS_V5": 32,
"STANDARD_D96AS_V4": 32,
"STANDARD_D96AS_V5": 32,
"STANDARD_D96A_V4": 32,
"STANDARD_D96DS_V5": 32,
"STANDARD_D96D_V5": 32,
"STANDARD_D96S_V5": 32,
"STANDARD_D96_V5": 32,
"STANDARD_DC16ADS_V5": 32,
"STANDARD_DC16AS_V5": 32,
"STANDARD_DC16DS_V3": 32,
"STANDARD_DC16S_V3": 32,
"STANDARD_DC1DS_V3": 4,
"STANDARD_DC1S_V2": 1,
"STANDARD_DC1S_V3": 4,
"STANDARD_DC24DS_V3": 32,
"STANDARD_DC24S_V3": 32,
"STANDARD_DC2ADS_V5": 4,
"STANDARD_DC2AS_V5": 4,
"STANDARD_DC2DS_V3": 8,
"STANDARD_DC2S": 2,
"STANDARD_DC2S_V2": 2,
"STANDARD_DC2S_V3": 8,
"STANDARD_DC32ADS_V5": 32,
"STANDARD_DC32AS_V5": 32,
"STANDARD_DC32DS_V3": 32,
"STANDARD_DC32S_V3": 32,
"STANDARD_DC48ADS_V5": 32,
"STANDARD_DC48AS_V5": 32,
"STANDARD_DC48DS_V3": 32,
"STANDARD_DC48S_V3": 32,
"STANDARD_DC4ADS_V5": 8,
"STANDARD_DC4AS_V5": 8,
"STANDARD_DC4DS_V3": 16,
"STANDARD_DC4S": 4,
"STANDARD_DC4S_V2": 4,
"STANDARD_DC4S_V3": 16,
"STANDARD_DC64ADS_V5": 32,
"STANDARD_DC64AS_V5": 32,
"STANDARD_DC8ADS_V5": 16,
"STANDARD_DC8AS_V5": 16,
"STANDARD_DC8DS_V3": 32,
"STANDARD_DC8S_V3": 32,
"STANDARD_DC8_V2": 8,
"STANDARD_DC96ADS_V5": 32,
"STANDARD_DC96AS_V5": 32,
"STANDARD_DS11-1_V2": 8,
"STANDARD_DS11": 8,
"STANDARD_DS11_V2": 8,
"STANDARD_DS11_V2_PROMO": 8,
"STANDARD_DS12": 16,
"STANDARD_DS12-1_V2": 16,
"STANDARD_DS12-2_V2": 16,
"STANDARD_DS12_V2": 16,
"STANDARD_DS12_V2_PROMO": 16,
"STANDARD_DS13-2_V2": 32,
"STANDARD_DS13": 32,
"STANDARD_DS13-4_V2": 32,
"STANDARD_DS13_V2": 32,
"STANDARD_DS13_V2_PROMO": 32,
"STANDARD_DS1": 4,
"STANDARD_DS14-4_V2": 64,
"STANDARD_DS14": 64,
"STANDARD_DS14-8_V2": 64,
"STANDARD_DS14_V2": 64,
"STANDARD_DS14_V2_PROMO": 64,
"STANDARD_DS15_V2": 64,
"STANDARD_DS1_V2": 4,
"STANDARD_DS2": 8,
"STANDARD_DS2_V2": 8,
"STANDARD_DS2_V2_PROMO": 8,
"STANDARD_DS3": 16,
"STANDARD_DS3_V2": 16,
"STANDARD_DS3_V2_PROMO": 16,
"STANDARD_DS4": 32,
"STANDARD_DS4_V2": 32,
"STANDARD_DS4_V2_PROMO": 32,
"STANDARD_DS5_V2": 64,
"STANDARD_DS5_V2_PROMO": 64,
"STANDARD_E104IDS_V5": 64,
"STANDARD_E104ID_V5": 64,
"STANDARD_E104IS_V5": 64,
"STANDARD_E104I_V5": 64,
"STANDARD_E112IADS_V5": 64,
"STANDARD_E112IAS_V5": 64,
"STANDARD_E16-4ADS_V5": 32,
"STANDARD_E16-4AS_V4": 32,
"STANDARD_E16-4AS_V5": 32,
"STANDARD_E16-4DS_V4": 32,
"STANDARD_E16-4DS_V5": 32,
"STANDARD_E16-4S_V3": 32,
"STANDARD_E16-4S_V4": 32,
"STANDARD_E16-4S_V5": 32,
"STANDARD_E16-8ADS_V5": 32,
"STANDARD_E16-8AS_V4": 32,
"STANDARD_E16-8AS_V5": 32,
"STANDARD_E16-8DS_V4": 32,
"STANDARD_E16-8DS_V5": 32,
"STANDARD_E16-8S_V3": 32,
"STANDARD_E16-8S_V4": 32,
"STANDARD_E16-8S_V5": 32,
"STANDARD_E16ADS_V5": 32,
"STANDARD_E16AS_V4": 32,
"STANDARD_E16AS_V5": 32,
"STANDARD_E16A_V4": 32,
"STANDARD_E16BDS_V5": 32,
"STANDARD_E16BS_V5": 32,
"STANDARD_E16DS_V4": 32,
"STANDARD_E16DS_V5": 32,
"STANDARD_E16D_V4": 32,
"STANDARD_E16D_V5": 32,
"STANDARD_E16PDS_V5": 32,
"STANDARD_E16PS_V5": 32,
"STANDARD_E16S_V3": 32,
"STANDARD_E16S_V4": 32,
"STANDARD_E16S_V5": 32,
"STANDARD_E16_V3": 32,
"STANDARD_E16_V4": 32,
"STANDARD_E16_V5": 32,
"STANDARD_E20ADS_V5": 32,
"STANDARD_E20AS_V4": 32,
"STANDARD_E20AS_V5": 32,
"STANDARD_E20A_V4": 32,
"STANDARD_E20DS_V4": 32,
"STANDARD_E20DS_V5": 32,
"STANDARD_E20D_V4": 32,
"STANDARD_E20D_V5": 32,
"STANDARD_E20PDS_V5": 32,
"STANDARD_E20PS_V5": 32,
"STANDARD_E20S_V3": 32,
"STANDARD_E20S_V4": 32,
"STANDARD_E20S_V5": 32,
"STANDARD_E20_V3": 32,
"STANDARD_E20_V4": 32,
"STANDARD_E20_V5": 32,
"STANDARD_E2ADS_V5": 4,
"STANDARD_E2AS_V4": 4,
"STANDARD_E2AS_V5": 4,
"STANDARD_E2A_V4": 4,
"STANDARD_E2BDS_V5": 4,
"STANDARD_E2BS_V5": 4,
"STANDARD_E2DS_V4": 4,
"STANDARD_E2DS_V5": 4,
"STANDARD_E2D_V4": 4,
"STANDARD_E2D_V5": 4,
"STANDARD_E2PDS_V5": 4,
"STANDARD_E2PS_V5": 4,
"STANDARD_E2S_V3": 4,
"STANDARD_E2S_V4": 4,
"STANDARD_E2S_V5": 4,
"STANDARD_E2_V3": 4,
"STANDARD_E2_V4": 4,
"STANDARD_E2_V5": 4,
"STANDARD_E32-16ADS_V5": 32,
"STANDARD_E32-16AS_V4": 32,
"STANDARD_E32-16AS_V5": 32,
"STANDARD_E32-16DS_V4": 32,
"STANDARD_E32-16DS_V5": 32,
"STANDARD_E32-16S_V3": 32,
"STANDARD_E32-16S_V4": 32,
"STANDARD_E32-16S_V5": 32,
"STANDARD_E32-8ADS_V5": 32,
"STANDARD_E32-8AS_V4": 32,
"STANDARD_E32-8AS_V5": 32,
"STANDARD_E32-8DS_V4": 32,
"STANDARD_E32-8DS_V5": 32,
"STANDARD_E32-8S_V3": 32,
"STANDARD_E32-8S_V4": 32,
"STANDARD_E32-8S_V5": 32,
"STANDARD_E32ADS_V5": 32,
"STANDARD_E32AS_V4": 32,
"STANDARD_E32AS_V5": 32,
"STANDARD_E32A_V4": 32,
"STANDARD_E32BDS_V5": 32,
"STANDARD_E32BS_V5": 32,
"STANDARD_E32DS_V4": 32,
"STANDARD_E32DS_V5": 32,
"STANDARD_E32D_V4": 32,
"STANDARD_E32D_V5": 32,
"STANDARD_E32PDS_V5": 32,
"STANDARD_E32PS_V5": 32,
"STANDARD_E32S_V3": 32,
"STANDARD_E32S_V4": 32,
"STANDARD_E32S_V5": 32,
"STANDARD_E32_V3": 32,
"STANDARD_E32_V4": 32,
"STANDARD_E32_V5": 32,
"STANDARD_E4-2ADS_V5": 8,
"STANDARD_E4-2AS_V4": 8,
"STANDARD_E4-2AS_V5": 8,
"STANDARD_E4-2DS_V4": 8,
"STANDARD_E4-2DS_V5": 8,
"STANDARD_E4-2S_V3": 8,
"STANDARD_E4-2S_V4": 8,
"STANDARD_E4-2S_V5": 8,
"STANDARD_E48ADS_V5": 32,
"STANDARD_E48AS_V4": 32,
"STANDARD_E48AS_V5": 32,
"STANDARD_E48A_V4": 32,
"STANDARD_E48BDS_V5": 32,
"STANDARD_E48BS_V5": 32,
"STANDARD_E48DS_V4": 32,
"STANDARD_E48DS_V5": 32,
"STANDARD_E48D_V4": 32,
"STANDARD_E48D_V5": 32,
"STANDARD_E48S_V3": 32,
"STANDARD_E48S_V4": 32,
"STANDARD_E48S_V5": 32,
"STANDARD_E48_V3": 32,
"STANDARD_E48_V4": 32,
"STANDARD_E48_V5": 32,
"STANDARD_E4ADS_V5": 8,
"STANDARD_E4AS_V4": 8,
"STANDARD_E4AS_V5": 8,
"STANDARD_E4A_V4": 8,
"STANDARD_E4BDS_V5": 8,
"STANDARD_E4BS_V5": 8,
"STANDARD_E4DS_V4": 8,
"STANDARD_E4DS_V5": 8,
"STANDARD_E4D_V4": 8,
"STANDARD_E4D_V5": 8,
"STANDARD_E4PDS_V5": 8,
"STANDARD_E4PS_V5": 8,
"STANDARD_E4S_V3": 8,
"STANDARD_E4S_V4": 8,
"STANDARD_E4S_V5": 8,
"STANDARD_E4_V3": 8,
"STANDARD_E4_V4": 8,
"STANDARD_E4_V5": 8,
"STANDARD_E64-16ADS_V5": 32,
"STANDARD_E64-16AS_V4": 32,
"STANDARD_E64-16AS_V5": 32,
"STANDARD_E64-16DS_V4": 32,
"STANDARD_E64-16DS_V5": 32,
"STANDARD_E64-16S_V3": 32,
"STANDARD_E64-16S_V4": 32,
"STANDARD_E64-16S_V5": 32,
"STANDARD_E64-32ADS_V5": 32,
"STANDARD_E64-32AS_V4": 32,
"STANDARD_E64-32AS_V5": 32,
"STANDARD_E64-32DS_V4": 32,
"STANDARD_E64-32DS_V5": 32,
"STANDARD_E64-32S_V3": 32,
"STANDARD_E64-32S_V4": 32,
"STANDARD_E64-32S_V5": 32,
"STANDARD_E64ADS_V5": 32,
"STANDARD_E64AS_V4": 32,
"STANDARD_E64AS_V5": 32,
"STANDARD_E64A_V4": 32,
"STANDARD_E64BDS_V5": 32,
"STANDARD_E64BS_V5": 32,
"STANDARD_E64DS_V4": 32,
"STANDARD_E64DS_V5": 32,
"STANDARD_E64D_V4": 32,
"STANDARD_E64D_V5": 32,
"STANDARD_E64IS_V3": 32,
"STANDARD_E64I_V3": 32,
"STANDARD_E64S_V3": 32,
"STANDARD_E64S_V4": 32,
"STANDARD_E64S_V5": 32,
"STANDARD_E64_V3": 32,
"STANDARD_E64_V4": 32,
"STANDARD_E64_V5": 32,
"STANDARD_E80IDS_V4": 64,
"STANDARD_E80IS_V4": 64,
"STANDARD_E8-2ADS_V5": 16,
"STANDARD_E8-2AS_V4": 16,
"STANDARD_E8-2AS_V5": 16,
"STANDARD_E8-2DS_V4": 16,
"STANDARD_E8-2DS_V5": 16,
"STANDARD_E8-2S_V3": 16,
"STANDARD_E8-2S_V4": 16,
"STANDARD_E8-2S_V5": 16,
"STANDARD_E8-4ADS_V5": 16,
"STANDARD_E8-4AS_V4": 16,
"STANDARD_E8-4AS_V5": 16,
"STANDARD_E8-4DS_V4": 16,
"STANDARD_E8-4DS_V5": 32,
"STANDARD_E8-4S_V3": 16,
"STANDARD_E8-4S_V4": 16,
"STANDARD_E8-4S_V5": 32,
"STANDARD_E8ADS_V5": 16,
"STANDARD_E8AS_V4": 16,
"STANDARD_E8AS_V5": 16,
"STANDARD_E8A_V4": 16,
"STANDARD_E8BDS_V5": 16,
"STANDARD_E8BS_V5": 16,
"STANDARD_E8DS_V4": 16,
"STANDARD_E8DS_V5": 16,
"STANDARD_E8D_V4": 16,
"STANDARD_E8D_V5": 16,
"STANDARD_E8PDS_V5": 16,
"STANDARD_E8PS_V5": 16,
"STANDARD_E8S_V3": 16,
"STANDARD_E8S_V4": 16,
"STANDARD_E8S_V5": 16,
"STANDARD_E8_V3": 16,
"STANDARD_E8_V4": 16,
"STANDARD_E8_V5": 16,
"STANDARD_E96-24ADS_V5": 32,
"STANDARD_E96-24AS_V4": 32,
"STANDARD_E96-24AS_V5": 32,
"STANDARD_E96-24DS_V5": 32,
"STANDARD_E96-24S_V5": 32,
"STANDARD_E96-48ADS_V5": 32,
"STANDARD_E96-48AS_V4": 32,
"STANDARD_E96-48AS_V5": 32,
"STANDARD_E96-48DS_V5": 32,
"STANDARD_E96-48S_V5": 32,
"STANDARD_E96ADS_V5": 32,
"STANDARD_E96AS_V4": 32,
"STANDARD_E96AS_V5": 32,
"STANDARD_E96A_V4": 32,
"STANDARD_E96DS_V5": 32,
"STANDARD_E96D_V5": 32,
"STANDARD_E96IAS_V4": 32,
"STANDARD_E96S_V5": 32,
"STANDARD_E96_V5": 32,
"STANDARD_EC16ADS_V5": 32,
"STANDARD_EC16AS_V5": 32,
"STANDARD_EC20ADS_V5": 32,
"STANDARD_EC20AS_V5": 32,
"STANDARD_EC2ADS_V5": 4,
"STANDARD_EC2AS_V5": 4,
"STANDARD_EC32ADS_V5": 32,
"STANDARD_EC32AS_V5": 32,
"STANDARD_EC48ADS_V5": 32,
"STANDARD_EC48AS_V5": 32,
"STANDARD_EC4ADS_V5": 8,
"STANDARD_EC4AS_V5": 8,
"STANDARD_EC64ADS_V5": 32,
"STANDARD_EC64AS_V5": 32,
"STANDARD_EC8ADS_V5": 16,
"STANDARD_EC8AS_V5": 16,
"STANDARD_EC96ADS_V5": 32,
"STANDARD_EC96AS_V5": 32,
"STANDARD_EC96IADS_V5": 32,
"STANDARD_EC96IAS_V5": 32,
"STANDARD_F1": 4,
"STANDARD_F16": 64,
"STANDARD_F16S": 64,
"STANDARD_F16S_V2": 32,
"STANDARD_F1S": 4,
"STANDARD_F2": 8,
"STANDARD_F2S": 8,
"STANDARD_F2S_V2": 4,
"STANDARD_F32S_V2": 32,
"STANDARD_F4": 16,
"STANDARD_F48S_V2": 32,
"STANDARD_F4S": 16,
"STANDARD_F4S_V2": 8,
"STANDARD_F64S_V2": 32,
"STANDARD_F72S_V2": 32,
"STANDARD_F8": 32,
"STANDARD_F8S": 32,
"STANDARD_F8S_V2": 16,
"STANDARD_FX12MDS": 24,
"STANDARD_FX24MDS": 32,
"STANDARD_FX36MDS": 32,
"STANDARD_FX48MDS": 32,
"STANDARD_FX4MDS": 8,
"STANDARD_G1": 8,
"STANDARD_G2": 16,
"STANDARD_G3": 32,
"STANDARD_G4": 64,
"STANDARD_G5": 64,
"STANDARD_GS1": 8,
"STANDARD_GS2": 16,
"STANDARD_GS3": 32,
"STANDARD_GS4-4": 64,
"STANDARD_GS4": 64,
"STANDARD_GS4-8": 64,
"STANDARD_GS5-16": 64,
"STANDARD_GS5": 64,
"STANDARD_GS5-8": 64,
"STANDARD_H16": 64,
"STANDARD_H16M": 64,
"STANDARD_H16M_PROMO": 64,
"STANDARD_H16MR": 64,
"STANDARD_H16MR_PROMO": 64,
"STANDARD_H16_PROMO": 64,
"STANDARD_H16R": 64,
"STANDARD_H16R_PROMO": 64,
"STANDARD_H8": 32,
"STANDARD_H8M": 32,
"STANDARD_H8M_PROMO": 32,
"STANDARD_H8_PROMO": 32,
"STANDARD_HB120-16RS_V2": 32,
"STANDARD_HB120-16RS_V3": 32,
"STANDARD_HB120-32RS_V2": 32,
"STANDARD_HB120-32RS_V3": 32,
"STANDARD_HB120-64RS_V2": 32,
"STANDARD_HB120-64RS_V3": 32,
"STANDARD_HB120-96RS_V2": 32,
"STANDARD_HB120-96RS_V3": 32,
"STANDARD_HB120RS_V2": 32,
"STANDARD_HB120RS_V3": 32,
"STANDARD_HB60-15RS": 4,
"STANDARD_HB60-30RS": 4,
"STANDARD_HB60-45RS": 4,
"STANDARD_HB60RS": 4,
"STANDARD_HC44-16RS": 4,
"STANDARD_HC44-32RS": 4,
"STANDARD_HC44RS": 4,
"STANDARD_L16AS_V3": 32,
"STANDARD_L16S": 64,
"STANDARD_L16S_V2": 32,
"STANDARD_L16S_V3": 32,
"STANDARD_L32AS_V3": 32,
"STANDARD_L32S": 64,
"STANDARD_L32S_V2": 32,
"STANDARD_L32S_V3": 32,
"STANDARD_L48AS_V3": 32,
"STANDARD_L48S_V2": 32,
"STANDARD_L48S_V3": 32,
"STANDARD_L4S": 16,
"STANDARD_L64AS_V3": 32,
"STANDARD_L64S_V2": 32,
"STANDARD_L64S_V3": 32,
"STANDARD_L80AS_V3": 32,
"STANDARD_L80S_V2": 32,
"STANDARD_L80S_V3": 32,
"STANDARD_L8AS_V3": 16,
"STANDARD_L8S": 32,
"STANDARD_L8S_V2": 16,
"STANDARD_L8S_V3": 16,
"STANDARD_M128-32MS": 64,
"STANDARD_M128": 64,
"STANDARD_M128-64MS": 64,
"STANDARD_M128DMS_V2": 64,
"STANDARD_M128DS_V2": 64,
"STANDARD_M128M": 64,
"STANDARD_M128MS": 64,
"STANDARD_M128MS_V2": 64,
"STANDARD_M128S": 64,
"STANDARD_M128S_V2": 64,
"STANDARD_M16-4MS": 16,
"STANDARD_M16-8MS": 16,
"STANDARD_M16MS": 16,
"STANDARD_M192IDMS_V2": 64,
"STANDARD_M192IDS_V2": 64,
"STANDARD_M192IMS_V2": 64,
"STANDARD_M192IS_V2": 64,
"STANDARD_M208MS_V2": 64,
"STANDARD_M208S_V2": 64,
"STANDARD_M32-16MS": 32,
"STANDARD_M32-8MS": 32,
"STANDARD_M32DMS_V2": 32,
"STANDARD_M32LS": 32,
"STANDARD_M32MS": 32,
"STANDARD_M32MS_V2": 32,
"STANDARD_M32TS": 32,
"STANDARD_M416-208MS_V2": 64,
"STANDARD_M416-208S_V2": 64,
"STANDARD_M416MS_V2": 64,
"STANDARD_M416S_V2": 64,
"STANDARD_M64-16MS": 64,
"STANDARD_M64-32MS": 64,
"STANDARD_M64": 64,
"STANDARD_M64DMS_V2": 64,
"STANDARD_M64DS_V2": 64,
"STANDARD_M64LS": 64,
"STANDARD_M64M": 64,
"STANDARD_M64MS": 64,
"STANDARD_M64MS_V2": 64,
"STANDARD_M64S": 64,
"STANDARD_M64S_V2": 64,
"STANDARD_M8-2MS": 8,
"STANDARD_M8-4MS": 8,
"STANDARD_M8MS": 8,
"STANDARD_NC12": 48,
"STANDARD_NC12_PROMO": 48,
"STANDARD_NC12S_V2": 24,
"STANDARD_NC12S_V3": 24,
"STANDARD_NC16ADS_A10_V4": 16,
"STANDARD_NC16AS_T4_V3": 32,
"STANDARD_NC24": 64,
"STANDARD_NC24ADS_A100_V4": 8,
"STANDARD_NC24_PROMO": 64,
"STANDARD_NC24R": 64,
"STANDARD_NC24R_PROMO": 64,
"STANDARD_NC24RS_V2": 32,
"STANDARD_NC24RS_V3": 32,
"STANDARD_NC24S_V2": 32,
"STANDARD_NC24S_V3": 32,
"STANDARD_NC32ADS_A10_V4": 32,
"STANDARD_NC48ADS_A100_V4": 16,
"STANDARD_NC4AS_T4_V3": 8,
"STANDARD_NC6": 24,
"STANDARD_NC64AS_T4_V3": 32,
"STANDARD_NC6_PROMO": 24,
"STANDARD_NC6S_V2": 12,
"STANDARD_NC6S_V3": 12,
"STANDARD_NC8ADS_A10_V4": 8,
"STANDARD_NC8AS_T4_V3": 16,
"STANDARD_NC96ADS_A100_V4": 32,
"STANDARD_ND12S": 24,
"STANDARD_ND24RS": 32,
"STANDARD_ND24S": 32,
"STANDARD_ND40RS_V2": 8,
"STANDARD_ND40S_V3": 32,
"STANDARD_ND6S": 12,
"STANDARD_ND96AMSR_A100_V4": 16,
"STANDARD_ND96ASR_V4": 16,
"STANDARD_NP10S": 8,
"STANDARD_NP20S": 16,
"STANDARD_NP40S": 32,
"STANDARD_NV12": 48,
"STANDARD_NV12ADS_A10_V5": 8,
"STANDARD_NV12_PROMO": 48,
"STANDARD_NV12S_V2": 24,
"STANDARD_NV12S_V3": 12,
"STANDARD_NV16AS_V4": 32,
"STANDARD_NV18ADS_A10_V5": 16,
"STANDARD_NV24": 64,
"STANDARD_NV24_PROMO": 64,
"STANDARD_NV24S_V2": 32,
"STANDARD_NV24S_V3": 24,
"STANDARD_NV32AS_V4": 32,
"STANDARD_NV36ADMS_A10_V5": 32,
"STANDARD_NV36ADS_A10_V5": 32,
"STANDARD_NV48S_V3": 32,
"STANDARD_NV4AS_V4": 8,
"STANDARD_NV6": 24,
"STANDARD_NV6ADS_A10_V5": 4,
"STANDARD_NV6_PROMO": 24,
"STANDARD_NV6S_V2": 12,
"STANDARD_NV72ADS_A10_V5": 32,
"STANDARD_NV8AS_V4": 16,
"STANDARD_PB6S": 12,
}

View File

@ -1,109 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"os"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("azure_dd")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
if err != nil {
t.Fatal("Can't find the plugin by name")
}
if plug.GetPluginName() != azureDataDiskPluginName {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{AzureDisk: &v1.AzureDiskVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
// fakeAzureProvider type was removed because all functions were not used
// Testing mounting will require path calculation which depends on the cloud provider, which is faked in the above test.
func TestGetMaxDataDiskCount(t *testing.T) {
tests := []struct {
instanceType string
expectResult int64
}{
{
instanceType: "standard_d2_v2",
expectResult: 8,
},
{
instanceType: "Standard_DS14_V2",
expectResult: 64,
},
{
instanceType: "NOT_EXISTING",
expectResult: defaultAzureVolumeLimit,
},
{
instanceType: "",
expectResult: defaultAzureVolumeLimit,
},
}
for _, test := range tests {
result := getMaxDataDiskCount(test.instanceType)
assert.Equal(t, test.expectResult, result)
}
}
func TestUnsupportedVolumeHost(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("azure_dd")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeKubeletVolumeHost(t, tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName(azureDataDiskPluginName)
if err != nil {
t.Fatal("Can't find the plugin by name")
}
_, err = plug.ConstructVolumeSpec("", "")
if err == nil {
t.Errorf("Expected failure constructing volume spec with unsupported VolumeHost")
}
}

View File

@ -1,210 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"fmt"
"os"
"runtime"
"k8s.io/klog/v2"
"k8s.io/mount-utils"
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
)
type azureDiskMounter struct {
*dataDisk
spec *volume.Spec
plugin *azureDataDiskPlugin
options volume.VolumeOptions
}
type azureDiskUnmounter struct {
*dataDisk
plugin *azureDataDiskPlugin
}
var _ volume.Unmounter = &azureDiskUnmounter{}
var _ volume.Mounter = &azureDiskMounter{}
func (m *azureDiskMounter) GetAttributes() volume.Attributes {
readOnly := false
volumeSource, _, err := getVolumeSource(m.spec)
if err != nil {
klog.Infof("azureDisk - mounter failed to get volume source for spec %s %v", m.spec.Name(), err)
} else if volumeSource.ReadOnly != nil {
readOnly = *volumeSource.ReadOnly
}
return volume.Attributes{
ReadOnly: readOnly,
Managed: !readOnly,
SELinuxRelabel: true,
}
}
func (m *azureDiskMounter) SetUp(mounterArgs volume.MounterArgs) error {
return m.SetUpAt(m.GetPath(), mounterArgs)
}
func (m *azureDiskMounter) GetPath() string {
return getPath(m.dataDisk.podUID, m.dataDisk.volumeName, m.plugin.host)
}
func (m *azureDiskMounter) SetUpAt(dir string, mounterArgs volume.MounterArgs) error {
mounter := m.plugin.host.GetMounter(m.plugin.GetPluginName())
volumeSource, _, err := getVolumeSource(m.spec)
if err != nil {
klog.Infof("azureDisk - mounter failed to get volume source for spec %s", m.spec.Name())
return err
}
diskName := volumeSource.DiskName
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) {
klog.Infof("azureDisk - cannot validate mount point for disk %s on %s %v", diskName, dir, err)
return err
}
if !mountPoint {
// testing original mount point, make sure the mount link is valid
_, err := (&osIOHandler{}).ReadDir(dir)
if err == nil {
klog.V(4).Infof("azureDisk - already mounted to target %s", dir)
return nil
}
// mount link is invalid, now unmount and remount later
klog.Warningf("azureDisk - ReadDir %s failed with %v, unmount this directory", dir, err)
if err := mounter.Unmount(dir); err != nil {
klog.Errorf("azureDisk - Unmount directory %s failed with %v", dir, err)
return err
}
}
if runtime.GOOS != "windows" {
// in windows, we will use mklink to mount, will MkdirAll in Mount func
if err := os.MkdirAll(dir, 0750); err != nil {
klog.Errorf("azureDisk - mkdir failed on disk %s on dir: %s (%v)", diskName, dir, err)
return err
}
}
options := []string{"bind"}
if volumeSource.ReadOnly != nil && *volumeSource.ReadOnly {
options = append(options, "ro")
}
if m.options.MountOptions != nil {
options = util.JoinMountOptions(m.options.MountOptions, options)
}
klog.V(4).Infof("azureDisk - Attempting to mount %s on %s", diskName, dir)
isManagedDisk := (*volumeSource.Kind == v1.AzureManagedDisk)
globalPDPath, err := makeGlobalPDPath(m.plugin.host, volumeSource.DataDiskURI, isManagedDisk)
if err != nil {
return err
}
mountErr := mounter.MountSensitiveWithoutSystemd(globalPDPath, dir, *volumeSource.FSType, options, nil)
// Everything in the following control flow is meant as an
// attempt cleanup a failed setupAt (bind mount)
if mountErr != nil {
klog.Infof("azureDisk - SetupAt:Mount disk:%s at dir:%s failed during mounting with error:%v, will attempt to clean up", diskName, dir, mountErr)
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint check failed for disk:%s on dir:%s with error %v original-mountErr:%v", diskName, dir, err, mountErr)
}
if !mountPoint {
if err = mounter.Unmount(dir); err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup failed to unmount disk:%s on dir:%s with error:%v original-mountErr:%v", diskName, dir, err, mountErr)
}
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup IsLikelyNotMountPoint for disk:%s on dir:%s check failed with error:%v original-mountErr:%v", diskName, dir, err, mountErr)
}
if !mountPoint {
// not cool. leave for next sync loop.
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure:cleanup disk %s is still mounted on %s during cleanup original-mountErr:%v, despite call to unmount() - will try again next sync loop", diskName, dir, mountErr)
}
}
if err = os.Remove(dir); err != nil {
return fmt.Errorf("azureDisk - SetupAt:Mount:Failure error cleaning up (removing dir:%s) with error:%v original-mountErr:%v", dir, err, mountErr)
}
klog.V(2).Infof("azureDisk - Mount of disk:%s on dir:%s failed with mount error:%v post failure clean up was completed", diskName, dir, mountErr)
return mountErr
}
if volumeSource.ReadOnly == nil || !*volumeSource.ReadOnly {
volume.SetVolumeOwnership(m, mounterArgs.FsGroup, mounterArgs.FSGroupChangePolicy, util.FSGroupCompleteHook(m.plugin, m.spec))
}
klog.V(2).Infof("azureDisk - successfully mounted disk %s on %s", diskName, dir)
return nil
}
func (u *azureDiskUnmounter) TearDown() error {
return u.TearDownAt(u.GetPath())
}
func (u *azureDiskUnmounter) TearDownAt(dir string) error {
if pathExists, pathErr := mount.PathExists(dir); pathErr != nil {
return fmt.Errorf("error checking if path exists: %w", pathErr)
} else if !pathExists {
klog.Warningf("Warning: Unmount skipped because path does not exist: %v", dir)
return nil
}
klog.V(4).Infof("azureDisk - TearDownAt: %s", dir)
mounter := u.plugin.host.GetMounter(u.plugin.GetPluginName())
mountPoint, err := mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do IsLikelyNotMountPoint %s", dir, err)
}
if mountPoint {
if err := os.Remove(dir); err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do os.Remove %s", dir, err)
}
}
if err := mounter.Unmount(dir); err != nil {
return fmt.Errorf("azureDisk - TearDownAt: %s failed to do mounter.Unmount %s", dir, err)
}
mountPoint, err = mounter.IsLikelyNotMountPoint(dir)
if err != nil {
return fmt.Errorf("azureDisk - TearTownAt:IsLikelyNotMountPoint check failed: %v", err)
}
if mountPoint {
return os.Remove(dir)
}
return fmt.Errorf("azureDisk - failed to un-bind-mount volume dir")
}
func (u *azureDiskUnmounter) GetPath() string {
return getPath(u.dataDisk.podUID, u.dataDisk.volumeName, u.plugin.host)
}

View File

@ -1,397 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"errors"
"fmt"
"strconv"
"strings"
"github.com/Azure/azure-sdk-for-go/services/storage/mgmt/2019-06-01/storage"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
volumehelpers "k8s.io/cloud-provider/volume/helpers"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/legacy-cloud-providers/azure"
)
type azureDiskProvisioner struct {
plugin *azureDataDiskPlugin
options volume.VolumeOptions
}
type azureDiskDeleter struct {
*dataDisk
spec *volume.Spec
plugin *azureDataDiskPlugin
}
var _ volume.Provisioner = &azureDiskProvisioner{}
var _ volume.Deleter = &azureDiskDeleter{}
func (d *azureDiskDeleter) GetPath() string {
return getPath(d.podUID, d.dataDisk.diskName, d.plugin.host)
}
func (d *azureDiskDeleter) Delete() error {
volumeSource, _, err := getVolumeSource(d.spec)
if err != nil {
return err
}
diskController, err := getDiskController(d.plugin.host)
if err != nil {
return err
}
managed := (*volumeSource.Kind == v1.AzureManagedDisk)
if managed {
return diskController.DeleteManagedDisk(volumeSource.DataDiskURI)
}
return diskController.DeleteBlobDisk(volumeSource.DataDiskURI)
}
// parseZoned parsed 'zoned' for storage class. If zoned is not specified (empty string),
// then it defaults to true for managed disks.
func parseZoned(zonedString string, kind v1.AzureDataDiskKind) (bool, error) {
if zonedString == "" {
return kind == v1.AzureManagedDisk, nil
}
zoned, err := strconv.ParseBool(zonedString)
if err != nil {
return false, fmt.Errorf("failed to parse 'zoned': %v", err)
}
if zoned && kind != v1.AzureManagedDisk {
return false, fmt.Errorf("zoned is only supported by managed disks")
}
return zoned, nil
}
func (p *azureDiskProvisioner) Provision(selectedNode *v1.Node, allowedTopologies []v1.TopologySelectorTerm) (*v1.PersistentVolume, error) {
// perform static validation first
if p.options.PVC.Spec.Selector != nil {
return nil, fmt.Errorf("azureDisk - claim.Spec.Selector is not supported for dynamic provisioning on Azure disk")
}
var (
location, account string
storageAccountType, fsType string
cachingMode v1.AzureDataDiskCachingMode
strKind string
err error
resourceGroup string
zoned bool
zonePresent bool
zonesPresent bool
strZoned string
availabilityZone string
availabilityZones sets.String
selectedAvailabilityZone string
writeAcceleratorEnabled string
diskIopsReadWrite string
diskMbpsReadWrite string
diskEncryptionSetID string
customTags string
maxShares int
)
// maxLength = 79 - (4 for ".vhd") = 75
name := util.GenerateVolumeName(p.options.ClusterName, p.options.PVName, 75)
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestGiB, err := volumehelpers.RoundUpToGiBInt(capacity)
if err != nil {
return nil, err
}
for k, v := range p.options.Parameters {
switch strings.ToLower(k) {
case "skuname":
storageAccountType = v
case "location":
location = v
case "storageaccount":
account = v
case "storageaccounttype":
storageAccountType = v
case "kind":
strKind = v
case "cachingmode":
cachingMode = v1.AzureDataDiskCachingMode(v)
case volume.VolumeParameterFSType:
fsType = strings.ToLower(v)
case "resourcegroup":
resourceGroup = v
case "zone":
zonePresent = true
availabilityZone = v
case "zones":
zonesPresent = true
availabilityZones, err = volumehelpers.ZonesToSet(v)
if err != nil {
return nil, fmt.Errorf("error parsing zones %s, must be strings separated by commas: %v", v, err)
}
case "zoned":
strZoned = v
case "diskiopsreadwrite":
diskIopsReadWrite = v
case "diskmbpsreadwrite":
diskMbpsReadWrite = v
case "diskencryptionsetid":
diskEncryptionSetID = v
case "tags":
customTags = v
case azure.WriteAcceleratorEnabled:
writeAcceleratorEnabled = v
case "maxshares":
maxShares, err = strconv.Atoi(v)
if err != nil {
return nil, fmt.Errorf("parse %s failed with error: %v", v, err)
}
if maxShares < 1 {
return nil, fmt.Errorf("parse %s returned with invalid value: %d", v, maxShares)
}
default:
return nil, fmt.Errorf("AzureDisk - invalid option %s in storage class", k)
}
}
supportedModes := p.plugin.GetAccessModes()
if maxShares < 2 {
// only do AccessModes validation when maxShares < 2
if !util.ContainsAllAccessModes(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported with maxShares(%d) < 2", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes(), maxShares)
}
if len(p.options.PVC.Spec.AccessModes) > 1 {
return nil, fmt.Errorf("AzureDisk - multiple access modes are not supported on AzureDisk plugin with maxShares(%d) < 2", maxShares)
}
if len(p.options.PVC.Spec.AccessModes) == 1 {
if p.options.PVC.Spec.AccessModes[0] != supportedModes[0] {
return nil, fmt.Errorf("AzureDisk - mode %s is not supported by AzureDisk plugin (supported mode is %s) with maxShares(%d) < 2", p.options.PVC.Spec.AccessModes[0], supportedModes, maxShares)
}
}
} else {
supportedModes = []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
v1.ReadWriteMany,
}
}
// normalize values
skuName, err := normalizeStorageAccountType(storageAccountType)
if err != nil {
return nil, err
}
kind, err := normalizeKind(strFirstLetterToUpper(strKind))
if err != nil {
return nil, err
}
zoned, err = parseZoned(strZoned, kind)
if err != nil {
return nil, err
}
if kind != v1.AzureManagedDisk {
if resourceGroup != "" {
return nil, errors.New("StorageClass option 'resourceGroup' can be used only for managed disks")
}
if zoned {
return nil, errors.New("StorageClass option 'zoned' parameter is only supported for managed disks")
}
}
if !zoned && (zonePresent || zonesPresent || len(allowedTopologies) > 0) {
return nil, fmt.Errorf("zone, zones and allowedTopologies StorageClass parameters must be used together with zoned parameter")
}
if cachingMode, err = normalizeCachingMode(cachingMode); err != nil {
return nil, err
}
diskController, err := getDiskController(p.plugin.host)
if err != nil {
return nil, err
}
// Select zone for managed disks based on zone, zones and allowedTopologies.
if zoned {
activeZones, err := diskController.GetActiveZones()
if err != nil {
return nil, fmt.Errorf("error querying active zones: %v", err)
}
if availabilityZone != "" || availabilityZones.Len() != 0 || activeZones.Len() != 0 || len(allowedTopologies) != 0 {
selectedAvailabilityZone, err = volumehelpers.SelectZoneForVolume(zonePresent, zonesPresent, availabilityZone, availabilityZones, activeZones, selectedNode, allowedTopologies, p.options.PVC.Name)
if err != nil {
return nil, err
}
}
}
// create disk
diskURI := ""
labels := map[string]string{}
if kind == v1.AzureManagedDisk {
tags, err := azure.ConvertTagsToMap(customTags)
if err != nil {
return nil, err
}
if p.options.CloudTags != nil {
for k, v := range *(p.options.CloudTags) {
tags[k] = v
}
}
if strings.EqualFold(writeAcceleratorEnabled, "true") {
tags[azure.WriteAcceleratorEnabled] = "true"
}
volumeOptions := &azure.ManagedDiskOptions{
DiskName: name,
StorageAccountType: skuName,
ResourceGroup: resourceGroup,
PVCName: p.options.PVC.Name,
SizeGB: requestGiB,
Tags: tags,
AvailabilityZone: selectedAvailabilityZone,
DiskIOPSReadWrite: diskIopsReadWrite,
DiskMBpsReadWrite: diskMbpsReadWrite,
DiskEncryptionSetID: diskEncryptionSetID,
MaxShares: int32(maxShares),
}
diskURI, err = diskController.CreateManagedDisk(volumeOptions)
if err != nil {
return nil, err
}
labels, err = diskController.GetAzureDiskLabels(diskURI)
if err != nil {
return nil, err
}
} else { // Attention: blob disk feature is deprecated
if kind == v1.AzureDedicatedBlobDisk {
_, diskURI, _, err = diskController.CreateVolume(name, account, storageAccountType, location, requestGiB)
if err != nil {
return nil, err
}
} else {
diskURI, err = diskController.CreateBlobDisk(name, storage.SkuName(skuName), requestGiB)
if err != nil {
return nil, err
}
}
}
volumeMode := p.options.PVC.Spec.VolumeMode
if volumeMode != nil && *volumeMode == v1.PersistentVolumeBlock {
// Block volumes should not have any FSType
fsType = ""
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: p.options.PVName,
Labels: labels,
Annotations: map[string]string{
"volumehelper.VolumeDynamicallyCreatedByKey": "azure-disk-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: p.options.PersistentVolumeReclaimPolicy,
AccessModes: supportedModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", requestGiB)),
},
VolumeMode: volumeMode,
PersistentVolumeSource: v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
CachingMode: &cachingMode,
DiskName: name,
DataDiskURI: diskURI,
Kind: &kind,
FSType: &fsType,
},
},
MountOptions: p.options.MountOptions,
},
}
nodeSelectorTerms := make([]v1.NodeSelectorTerm, 0)
if zoned {
// Set node affinity labels based on availability zone labels.
if len(labels) > 0 {
requirements := make([]v1.NodeSelectorRequirement, 0)
for k, v := range labels {
requirements = append(requirements, v1.NodeSelectorRequirement{Key: k, Operator: v1.NodeSelectorOpIn, Values: []string{v}})
}
nodeSelectorTerms = append(nodeSelectorTerms, v1.NodeSelectorTerm{
MatchExpressions: requirements,
})
}
} else {
// Set node affinity labels based on topology.
// This is required because unzoned AzureDisk can't be attached to zoned nodes.
// There are at most 3 Availability Zones per supported Azure region.
// Refer https://docs.microsoft.com/en-us/azure/virtual-machines/windows/manage-availability.
for i := 0; i < 3; i++ {
requirements := []v1.NodeSelectorRequirement{
{
Key: v1.LabelTopologyRegion,
Operator: v1.NodeSelectorOpIn,
Values: []string{diskController.GetLocation()},
},
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{strconv.Itoa(i)},
},
}
nodeSelectorTerms = append(nodeSelectorTerms, v1.NodeSelectorTerm{
MatchExpressions: requirements,
})
}
}
if len(nodeSelectorTerms) > 0 {
pv.Spec.NodeAffinity = &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: nodeSelectorTerms,
},
}
}
return pv, nil
}

View File

@ -1,99 +0,0 @@
//go:build !providerless
// +build !providerless
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/api/core/v1"
)
func TestParseZoned(t *testing.T) {
tests := []struct {
msg string
zoneString string
diskKind v1.AzureDataDiskKind
expected bool
expectError bool
}{
{
msg: "managed disk should default to zoned",
diskKind: v1.AzureManagedDisk,
expected: true,
},
{
msg: "shared blob disk should default to un-zoned",
diskKind: v1.AzureSharedBlobDisk,
expected: false,
},
{
msg: "shared dedicated disk should default to un-zoned",
diskKind: v1.AzureDedicatedBlobDisk,
expected: false,
},
{
msg: "managed disk should support zoned=true",
diskKind: v1.AzureManagedDisk,
zoneString: "true",
expected: true,
},
{
msg: "managed disk should support zoned=false",
diskKind: v1.AzureManagedDisk,
zoneString: "false",
expected: false,
},
{
msg: "shared blob disk should support zoned=false",
diskKind: v1.AzureSharedBlobDisk,
zoneString: "false",
expected: false,
},
{
msg: "shared blob disk shouldn't support zoned=true",
diskKind: v1.AzureSharedBlobDisk,
zoneString: "true",
expectError: true,
},
{
msg: "shared dedicated disk should support zoned=false",
diskKind: v1.AzureDedicatedBlobDisk,
zoneString: "false",
expected: false,
},
{
msg: "dedicated blob disk shouldn't support zoned=true",
diskKind: v1.AzureDedicatedBlobDisk,
zoneString: "true",
expectError: true,
},
}
for i, test := range tests {
real, err := parseZoned(test.zoneString, test.diskKind)
if test.expectError {
assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
} else {
assert.Equal(t, test.expected, real, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
}
}
}

View File

@ -1,17 +0,0 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azuredd

View File

@ -226,7 +226,7 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
return true
},
csitranslationplugins.AzureDiskInTreePluginName: func() bool {
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk)
return true
},
csitranslationplugins.AzureFileInTreePluginName: func() bool {
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureFile)

View File

@ -67,7 +67,7 @@ func (pm PluginManager) IsMigrationCompleteForPlugin(pluginName string) bool {
case csilibplugins.AzureFileInTreePluginName:
return pm.featureGate.Enabled(features.InTreePluginAzureFileUnregister)
case csilibplugins.AzureDiskInTreePluginName:
return pm.featureGate.Enabled(features.InTreePluginAzureDiskUnregister)
return true
case csilibplugins.CinderInTreePluginName:
return pm.featureGate.Enabled(features.InTreePluginOpenStackUnregister)
case csilibplugins.VSphereInTreePluginName:
@ -95,7 +95,7 @@ func (pm PluginManager) IsMigrationEnabledForPlugin(pluginName string) bool {
case csilibplugins.AzureFileInTreePluginName:
return pm.featureGate.Enabled(features.CSIMigrationAzureFile)
case csilibplugins.AzureDiskInTreePluginName:
return pm.featureGate.Enabled(features.CSIMigrationAzureDisk)
return true
case csilibplugins.CinderInTreePluginName:
return true
case csilibplugins.VSphereInTreePluginName:

View File

@ -58,7 +58,6 @@ type persistentVolumeLabel struct {
mutex sync.Mutex
cloudConfig []byte
gcePVLabeler cloudprovider.PVLabeler
azurePVLabeler cloudprovider.PVLabeler
vspherePVLabeler cloudprovider.PVLabeler
}
@ -71,7 +70,7 @@ var _ kubeapiserveradmission.WantsCloudConfig = &persistentVolumeLabel{}
// As a side effect, the cloud provider may block invalid or non-existent volumes.
func newPersistentVolumeLabel() *persistentVolumeLabel {
// DEPRECATED: in a future release, we will use mutating admission webhooks to apply PV labels.
// Once the mutating admission webhook is used for Azure, and GCE,
// Once the mutating admission webhook is used for GCE,
// this admission controller will be removed.
klog.Warning("PersistentVolumeLabel admission controller is deprecated. " +
"Please remove this controller from your configuration files and scripts.")
@ -205,12 +204,6 @@ func (l *persistentVolumeLabel) findVolumeLabels(volume *api.PersistentVolume) (
return nil, fmt.Errorf("error querying GCE PD volume %s: %v", volume.Spec.GCEPersistentDisk.PDName, err)
}
return labels, nil
case volume.Spec.AzureDisk != nil:
labels, err := l.findAzureDiskLabels(volume)
if err != nil {
return nil, fmt.Errorf("error querying AzureDisk volume %s: %v", volume.Spec.AzureDisk.DiskName, err)
}
return labels, nil
case volume.Spec.VsphereVolume != nil:
labels, err := l.findVsphereVolumeLabels(volume)
if err != nil {
@ -271,54 +264,6 @@ func (l *persistentVolumeLabel) getGCEPVLabeler() (cloudprovider.PVLabeler, erro
return l.gcePVLabeler, nil
}
// getAzurePVLabeler returns the Azure implementation of PVLabeler
func (l *persistentVolumeLabel) getAzurePVLabeler() (cloudprovider.PVLabeler, error) {
l.mutex.Lock()
defer l.mutex.Unlock()
if l.azurePVLabeler == nil {
var cloudConfigReader io.Reader
if len(l.cloudConfig) > 0 {
cloudConfigReader = bytes.NewReader(l.cloudConfig)
}
cloudProvider, err := cloudprovider.GetCloudProvider("azure", cloudConfigReader)
if err != nil || cloudProvider == nil {
return nil, err
}
azurePVLabeler, ok := cloudProvider.(cloudprovider.PVLabeler)
if !ok {
return nil, errors.New("Azure cloud provider does not implement PV labeling")
}
l.azurePVLabeler = azurePVLabeler
}
return l.azurePVLabeler, nil
}
func (l *persistentVolumeLabel) findAzureDiskLabels(volume *api.PersistentVolume) (map[string]string, error) {
// Ignore any volumes that are being provisioned
if volume.Spec.AzureDisk.DiskName == cloudvolume.ProvisionedVolumeName {
return nil, nil
}
pvlabler, err := l.getAzurePVLabeler()
if err != nil {
return nil, err
}
if pvlabler == nil {
return nil, fmt.Errorf("unable to build Azure cloud provider for AzureDisk")
}
pv := &v1.PersistentVolume{}
err = k8s_api_v1.Convert_core_PersistentVolume_To_v1_PersistentVolume(volume, pv, nil)
if err != nil {
return nil, fmt.Errorf("failed to convert PersistentVolume to core/v1: %q", err)
}
return pvlabler.GetLabelsForVolume(context.TODO(), pv)
}
func (l *persistentVolumeLabel) findVsphereVolumeLabels(volume *api.PersistentVolume) (map[string]string, error) {
pvlabler, err := l.getVspherePVLabeler()
if err != nil {

View File

@ -706,10 +706,9 @@ func Test_PVLAdmission(t *testing.T) {
// setPVLabler applies the given mock pvlabeler to implement PV labeling for all cloud providers.
// Given we mock out the values of the labels anyways, assigning the same mock labeler for every
// provider does not reduce test coverage but it does simplify/clean up the tests here because
// the provider is then decided based on the type of PV (EBS, GCEPD, Azure Disk, etc)
// the provider is then decided based on the type of PV (EBS, GCEPD, etc)
func setPVLabeler(handler *persistentVolumeLabel, pvlabeler cloudprovider.PVLabeler) {
handler.gcePVLabeler = pvlabeler
handler.azurePVLabeler = pvlabeler
handler.vspherePVLabeler = pvlabeler
}

View File

@ -63,28 +63,6 @@ func (p *Provider) DeleteNode(node *v1.Node) error {
return errors.New("not implemented yet")
}
// CreatePD creates a persistent volume
func (p *Provider) CreatePD(zone string) (string, error) {
pdName := fmt.Sprintf("%s-%s", framework.TestContext.Prefix, string(uuid.NewUUID()))
volumeOptions := &azure.ManagedDiskOptions{
DiskName: pdName,
StorageAccountType: compute.StandardLRS,
ResourceGroup: "",
PVCName: pdName,
SizeGB: 1,
Tags: nil,
DiskIOPSReadWrite: "",
DiskMBpsReadWrite: "",
}
// do not use blank zone definition
if len(zone) > 0 {
volumeOptions.AvailabilityZone = zone
}
return p.azureCloud.CreateManagedDisk(volumeOptions)
}
// CreateShare creates a share and return its account name and key.
func (p *Provider) CreateShare() (string, string, string, error) {
accountOptions := &azure.AccountOptions{
@ -118,15 +96,6 @@ func (p *Provider) DeleteShare(accountName, shareName string) error {
return err
}
// DeletePD deletes a persistent volume
func (p *Provider) DeletePD(pdName string) error {
if err := p.azureCloud.DeleteManagedDisk(pdName); err != nil {
framework.Logf("failed to delete Azure volume %q: %v", pdName, err)
return err
}
return nil
}
// EnableAndDisableInternalLB returns functions for both enabling and disabling internal Load Balancer
func (p *Provider) EnableAndDisableInternalLB() (enable, disable func(svc *v1.Service)) {
enable = func(svc *v1.Service) {

View File

@ -1314,150 +1314,6 @@ func (v *vSphereVolume) DeleteVolume(ctx context.Context) {
v.nodeInfo.VSphere.DeleteVolume(v.volumePath, v.nodeInfo.DataCenterRef)
}
// Azure Disk
type azureDiskDriver struct {
driverInfo storageframework.DriverInfo
}
type azureDiskVolume struct {
volumeName string
}
var _ storageframework.TestDriver = &azureDiskDriver{}
var _ storageframework.PreprovisionedVolumeTestDriver = &azureDiskDriver{}
var _ storageframework.InlineVolumeTestDriver = &azureDiskDriver{}
var _ storageframework.PreprovisionedPVTestDriver = &azureDiskDriver{}
var _ storageframework.DynamicPVTestDriver = &azureDiskDriver{}
var _ storageframework.CustomTimeoutsTestDriver = &azureDiskDriver{}
// InitAzureDiskDriver returns azureDiskDriver that implements TestDriver interface
func InitAzureDiskDriver() storageframework.TestDriver {
return &azureDiskDriver{
driverInfo: storageframework.DriverInfo{
Name: "azure-disk",
InTreePluginName: "kubernetes.io/azure-disk",
MaxFileSize: storageframework.FileSizeMedium,
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Gi",
},
SupportedFsType: sets.NewString(
"", // Default fsType
"ext4",
"xfs",
),
TopologyKeys: []string{v1.LabelFailureDomainBetaZone},
Capabilities: map[storageframework.Capability]bool{
storageframework.CapPersistence: true,
storageframework.CapFsGroup: true,
storageframework.CapBlock: true,
storageframework.CapExec: true,
storageframework.CapMultiPODs: true,
// Azure supports volume limits, but the test creates large
// number of volumes and times out test suites.
storageframework.CapVolumeLimits: false,
storageframework.CapTopology: true,
storageframework.CapMultiplePVsSameID: true,
},
},
}
}
func (a *azureDiskDriver) GetDriverInfo() *storageframework.DriverInfo {
return &a.driverInfo
}
func (a *azureDiskDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
e2eskipper.SkipUnlessProviderIs("azure")
}
func (a *azureDiskDriver) GetVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) *v1.VolumeSource {
av, ok := e2evolume.(*azureDiskVolume)
if !ok {
framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume)
}
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
kind := v1.AzureManagedDisk
volSource := v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: av.volumeName,
Kind: &kind,
ReadOnly: &readOnly,
},
}
if fsType != "" {
volSource.AzureDisk.FSType = &fsType
}
return &volSource
}
func (a *azureDiskDriver) GetPersistentVolumeSource(readOnly bool, fsType string, e2evolume storageframework.TestVolume) (*v1.PersistentVolumeSource, *v1.VolumeNodeAffinity) {
av, ok := e2evolume.(*azureDiskVolume)
if !ok {
framework.Failf("Failed to cast test volume of type %T to the Azure test volume", e2evolume)
}
diskName := av.volumeName[(strings.LastIndex(av.volumeName, "/") + 1):]
kind := v1.AzureManagedDisk
pvSource := v1.PersistentVolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{
DiskName: diskName,
DataDiskURI: av.volumeName,
Kind: &kind,
ReadOnly: &readOnly,
},
}
if fsType != "" {
pvSource.AzureDisk.FSType = &fsType
}
return &pvSource, nil
}
func (a *azureDiskDriver) GetDynamicProvisionStorageClass(ctx context.Context, config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/azure-disk"
parameters := map[string]string{}
if fsType != "" {
parameters["fsType"] = fsType
}
ns := config.Framework.Namespace.Name
delayedBinding := storagev1.VolumeBindingWaitForFirstConsumer
return storageframework.GetStorageClass(provisioner, parameters, &delayedBinding, ns)
}
func (a *azureDiskDriver) PrepareTest(ctx context.Context, f *framework.Framework) *storageframework.PerTestConfig {
return &storageframework.PerTestConfig{
Driver: a,
Prefix: "azure",
Framework: f,
}
}
func (a *azureDiskDriver) CreateVolume(ctx context.Context, config *storageframework.PerTestConfig, volType storageframework.TestVolType) storageframework.TestVolume {
ginkgo.By("creating a test azure disk volume")
zone := getInlineVolumeZone(ctx, config.Framework)
if volType == storageframework.InlineVolume {
// PD will be created in framework.TestContext.CloudConfig.Zone zone,
// so pods should be also scheduled there.
config.ClientNodeSelection = e2epod.NodeSelection{
Selector: map[string]string{
v1.LabelFailureDomainBetaZone: zone,
},
}
}
volumeName, err := e2epv.CreatePDWithRetryAndZone(ctx, zone)
framework.ExpectNoError(err)
return &azureDiskVolume{
volumeName: volumeName,
}
}
func (v *azureDiskVolume) DeleteVolume(ctx context.Context) {
_ = e2epv.DeletePDWithRetry(ctx, v.volumeName)
}
// AWS
type awsDriver struct {
driverInfo storageframework.DriverInfo