Merge pull request #113826 from jsafrane/add-openstack

Add CSI migration of OpenStack Cinder volumes
This commit is contained in:
Kubernetes Prow Robot 2022-11-11 11:00:07 -08:00 committed by GitHub
commit e4d46148de
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
33 changed files with 634 additions and 13 deletions

View File

@ -4097,6 +4097,20 @@ func TestValidateVolumes(t *testing.T) {
field: "rbd.image",
}},
},
// Cinder
{
name: "valid Cinder",
vol: core.Volume{
Name: "cinder",
VolumeSource: core.VolumeSource{
Cinder: &core.CinderVolumeSource{
VolumeID: "29ea5088-4f60-4757-962e-dba678767887",
FSType: "ext4",
ReadOnly: false,
},
},
},
},
// CephFS
{
name: "valid CephFS",

View File

@ -415,6 +415,12 @@ const (
// Disables the GCE PD in-tree driver.
InTreePluginGCEUnregister featuregate.Feature = "InTreePluginGCEUnregister"
// owner: @adisky
// alpha: v1.21
//
// Disables the OpenStack Cinder in-tree driver.
InTreePluginOpenStackUnregister featuregate.Feature = "InTreePluginOpenStackUnregister"
// owner: @trierra
// alpha: v1.23
//
@ -1009,6 +1015,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
InTreePluginGCEUnregister: {Default: false, PreRelease: featuregate.Alpha},
InTreePluginOpenStackUnregister: {Default: false, PreRelease: featuregate.Alpha},
InTreePluginPortworxUnregister: {Default: false, PreRelease: featuregate.Alpha},
InTreePluginRBDUnregister: {Default: false, PreRelease: featuregate.Alpha},

View File

@ -30,6 +30,7 @@ const (
NodeUnschedulable = "NodeUnschedulable"
NodeVolumeLimits = "NodeVolumeLimits"
AzureDiskLimits = "AzureDiskLimits"
CinderLimits = "CinderLimits"
EBSLimits = "EBSLimits"
GCEPDLimits = "GCEPDLimits"
PodTopologySpread = "PodTopologySpread"

View File

@ -58,6 +58,8 @@ func getVolumeLimitKey(filterType string) v1.ResourceName {
return v1.ResourceName(volumeutil.GCEVolumeLimitKey)
case azureDiskVolumeFilterType:
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
case cinderVolumeFilterType:
return v1.ResourceName(volumeutil.CinderVolumeLimitKey)
default:
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
}

View File

@ -56,6 +56,8 @@ const (
gcePDVolumeFilterType = "GCE"
// azureDiskVolumeFilterType defines the filter name for azureDiskVolumeFilter.
azureDiskVolumeFilterType = "AzureDisk"
// cinderVolumeFilterType defines the filter name for cinderVolumeFilter.
cinderVolumeFilterType = "Cinder"
// ErrReasonMaxVolumeCountExceeded is used for MaxVolumeCount predicate error.
ErrReasonMaxVolumeCountExceeded = "node(s) exceed max volume count"
@ -73,6 +75,15 @@ func NewAzureDisk(_ runtime.Object, handle framework.Handle, fts feature.Feature
return newNonCSILimitsWithInformerFactory(azureDiskVolumeFilterType, informerFactory, fts), nil
}
// CinderName is the name of the plugin used in the plugin registry and configurations.
const CinderName = names.CinderLimits
// NewCinder returns function that initializes a new plugin and returns it.
func NewCinder(_ runtime.Object, handle framework.Handle, fts feature.Features) (framework.Plugin, error) {
informerFactory := handle.SharedInformerFactory()
return newNonCSILimitsWithInformerFactory(cinderVolumeFilterType, informerFactory, fts), nil
}
// EBSName is the name of the plugin used in the plugin registry and configurations.
const EBSName = names.EBSLimits
@ -160,6 +171,10 @@ func newNonCSILimits(
name = AzureDiskName
filter = azureDiskVolumeFilter
volumeLimitKey = v1.ResourceName(volumeutil.AzureVolumeLimitKey)
case cinderVolumeFilterType:
name = CinderName
filter = cinderVolumeFilter
volumeLimitKey = v1.ResourceName(volumeutil.CinderVolumeLimitKey)
default:
klog.ErrorS(errors.New("wrong filterName"), "Cannot create nonCSILimits plugin")
return nil
@ -460,6 +475,32 @@ var azureDiskVolumeFilter = VolumeFilter{
},
}
// cinderVolumeFilter is a VolumeFilter for filtering cinder Volumes.
// It will be deprecated once Openstack cloudprovider has been removed from in-tree.
var cinderVolumeFilter = VolumeFilter{
FilterVolume: func(vol *v1.Volume) (string, bool) {
if vol.Cinder != nil {
return vol.Cinder.VolumeID, true
}
return "", false
},
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
if pv.Spec.Cinder != nil {
return pv.Spec.Cinder.VolumeID, true
}
return "", false
},
MatchProvisioner: func(sc *storage.StorageClass) bool {
return sc.Provisioner == csilibplugins.CinderInTreePluginName
},
IsMigrated: func(csiNode *storage.CSINode) bool {
return isCSIMigrationOn(csiNode, csilibplugins.CinderInTreePluginName)
},
}
func getMaxVolumeFunc(filterName string) func(node *v1.Node) int {
return func(node *v1.Node) int {
maxVolumesFromEnv := getMaxVolLimitFromEnv()
@ -481,6 +522,8 @@ func getMaxVolumeFunc(filterName string) func(node *v1.Node) int {
return defaultMaxGCEPDVolumes
case azureDiskVolumeFilterType:
return defaultMaxAzureDiskVolumes
case cinderVolumeFilterType:
return volumeutil.DefaultMaxCinderVolumes
default:
return -1
}

View File

@ -55,6 +55,8 @@ func isCSIMigrationOn(csiNode *storagev1.CSINode, pluginName string) bool {
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk) {
return false
}
case csilibplugins.CinderInTreePluginName:
return true
case csilibplugins.RBDVolumePluginName:
if !utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationRBD) {
return false

View File

@ -72,6 +72,7 @@ func NewInTreeRegistry() runtime.Registry {
nodevolumelimits.EBSName: runtime.FactoryAdapter(fts, nodevolumelimits.NewEBS),
nodevolumelimits.GCEPDName: runtime.FactoryAdapter(fts, nodevolumelimits.NewGCEPD),
nodevolumelimits.AzureDiskName: runtime.FactoryAdapter(fts, nodevolumelimits.NewAzureDisk),
nodevolumelimits.CinderName: runtime.FactoryAdapter(fts, nodevolumelimits.NewCinder),
interpodaffinity.Name: interpodaffinity.New,
queuesort.Name: queuesort.New,
defaultbinder.Name: defaultbinder.New,

View File

@ -1010,6 +1010,8 @@ func isCSIMigrationOnForPlugin(pluginName string) bool {
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationGCE)
case csiplugins.AzureDiskInTreePluginName:
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk)
case csiplugins.CinderInTreePluginName:
return true
case csiplugins.PortworxVolumePluginName:
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationPortworx)
case csiplugins.RBDVolumePluginName:

View File

@ -222,6 +222,9 @@ func (p *csiPlugin) Init(host volume.VolumeHost) error {
csitranslationplugins.AWSEBSInTreePluginName: func() bool {
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAWS)
},
csitranslationplugins.CinderInTreePluginName: func() bool {
return true
},
csitranslationplugins.AzureDiskInTreePluginName: func() bool {
return utilfeature.DefaultFeatureGate.Enabled(features.CSIMigrationAzureDisk)
},

View File

@ -68,6 +68,8 @@ func (pm PluginManager) IsMigrationCompleteForPlugin(pluginName string) bool {
return pm.featureGate.Enabled(features.InTreePluginAzureFileUnregister)
case csilibplugins.AzureDiskInTreePluginName:
return pm.featureGate.Enabled(features.InTreePluginAzureDiskUnregister)
case csilibplugins.CinderInTreePluginName:
return pm.featureGate.Enabled(features.InTreePluginOpenStackUnregister)
case csilibplugins.VSphereInTreePluginName:
return pm.featureGate.Enabled(features.InTreePluginvSphereUnregister)
case csilibplugins.PortworxVolumePluginName:
@ -94,6 +96,8 @@ func (pm PluginManager) IsMigrationEnabledForPlugin(pluginName string) bool {
return pm.featureGate.Enabled(features.CSIMigrationAzureFile)
case csilibplugins.AzureDiskInTreePluginName:
return pm.featureGate.Enabled(features.CSIMigrationAzureDisk)
case csilibplugins.CinderInTreePluginName:
return true
case csilibplugins.VSphereInTreePluginName:
return pm.featureGate.Enabled(features.CSIMigrationvSphere)
case csilibplugins.PortworxVolumePluginName:

View File

@ -40,6 +40,13 @@ const (
// GCEVolumeLimitKey stores resource name that will store volume limits for GCE node
GCEVolumeLimitKey = "attachable-volumes-gce-pd"
// CinderVolumeLimitKey contains Volume limit key for Cinder
CinderVolumeLimitKey = "attachable-volumes-cinder"
// DefaultMaxCinderVolumes defines the maximum number of PD Volumes for Cinder
// For Openstack we are keeping this to a high enough value so as depending on backend
// cluster admins can configure it.
DefaultMaxCinderVolumes = 256
// CSIAttachLimitPrefix defines prefix used for CSI volumes
CSIAttachLimitPrefix = "attachable-volumes-csi-"

View File

@ -21,6 +21,7 @@ import (
"os"
"reflect"
"runtime"
"strings"
"testing"
v1 "k8s.io/api/core/v1"
@ -260,6 +261,30 @@ func TestFsUserFrom(t *testing.T) {
}
}
func TestGenerateVolumeName(t *testing.T) {
// Normal operation, no truncate
v1 := GenerateVolumeName("kubernetes", "pv-cinder-abcde", 255)
if v1 != "kubernetes-dynamic-pv-cinder-abcde" {
t.Errorf("Expected kubernetes-dynamic-pv-cinder-abcde, got %s", v1)
}
// Truncate trailing "6789-dynamic"
prefix := strings.Repeat("0123456789", 9) // 90 characters prefix + 8 chars. of "-dynamic"
v2 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100)
expect := prefix[:84] + "-pv-cinder-abcde"
if v2 != expect {
t.Errorf("Expected %s, got %s", expect, v2)
}
// Truncate really long cluster name
prefix = strings.Repeat("0123456789", 1000) // 10000 characters prefix
v3 := GenerateVolumeName(prefix, "pv-cinder-abcde", 100)
if v3 != expect {
t.Errorf("Expected %s, got %s", expect, v3)
}
}
func TestHasMountRefs(t *testing.T) {
testCases := map[string]struct {
mountPath string

View File

@ -72,7 +72,7 @@ var _ kubeapiserveradmission.WantsCloudConfig = &persistentVolumeLabel{}
// As a side effect, the cloud provider may block invalid or non-existent volumes.
func newPersistentVolumeLabel() *persistentVolumeLabel {
// DEPRECATED: in a future release, we will use mutating admission webhooks to apply PV labels.
// Once the mutating admission webhook is used for AWS, Azure and GCE,
// Once the mutating admission webhook is used for AWS, Azure, and GCE,
// this admission controller will be removed.
klog.Warning("PersistentVolumeLabel admission controller is deprecated. " +
"Please remove this controller from your configuration files and scripts.")

View File

@ -912,7 +912,7 @@ func Test_PVLAdmission(t *testing.T) {
// setPVLabler applies the given mock pvlabeler to implement PV labeling for all cloud providers.
// Given we mock out the values of the labels anyways, assigning the same mock labeler for every
// provider does not reduce test coverage but it does simplify/clean up the tests here because
// the provider is then decided based on the type of PV (EBS, Cinder, GCEPD, Azure Disk, etc)
// the provider is then decided based on the type of PV (EBS, GCEPD, Azure Disk, etc)
func setPVLabeler(handler *persistentVolumeLabel, pvlabeler cloudprovider.PVLabeler) {
handler.awsPVLabeler = pvlabeler
handler.gcePVLabeler = pvlabeler

View File

@ -421,6 +421,48 @@ func TestTranslateTopologyFromCSIToInTree(t *testing.T) {
v1.LabelTopologyRegion: "us-east1",
},
},
{
name: "cinder translation",
key: CinderTopologyKey,
expErr: false,
regionParser: nil,
pv: &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "cinder", Namespace: "myns",
},
Spec: v1.PersistentVolumeSpec{
NodeAffinity: &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: CinderTopologyKey,
Operator: v1.NodeSelectorOpIn,
Values: []string{"nova"},
},
},
},
},
},
},
},
},
expectedNodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: v1.LabelTopologyZone,
Operator: v1.NodeSelectorOpIn,
Values: []string{"nova"},
},
},
},
},
expectedLabels: map[string]string{
v1.LabelTopologyZone: "nova",
},
},
}
for _, tc := range testCases {

View File

@ -0,0 +1,184 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
"fmt"
"strings"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// CinderDriverName is the name of the CSI driver for Cinder
CinderDriverName = "cinder.csi.openstack.org"
// CinderTopologyKey is the zonal topology key for Cinder CSI Driver
CinderTopologyKey = "topology.cinder.csi.openstack.org/zone"
// CinderInTreePluginName is the name of the intree plugin for Cinder
CinderInTreePluginName = "kubernetes.io/cinder"
)
var _ InTreePlugin = (*osCinderCSITranslator)(nil)
// osCinderCSITranslator handles translation of PV spec from In-tree Cinder to CSI Cinder and vice versa
type osCinderCSITranslator struct{}
// NewOpenStackCinderCSITranslator returns a new instance of osCinderCSITranslator
func NewOpenStackCinderCSITranslator() InTreePlugin {
return &osCinderCSITranslator{}
}
// TranslateInTreeStorageClassToCSI translates InTree Cinder storage class parameters to CSI storage class
func (t *osCinderCSITranslator) TranslateInTreeStorageClassToCSI(sc *storage.StorageClass) (*storage.StorageClass, error) {
var (
params = map[string]string{}
)
for k, v := range sc.Parameters {
switch strings.ToLower(k) {
case fsTypeKey:
params[csiFsTypeKey] = v
default:
// All other parameters are supported by the CSI driver.
// This includes also "availability", therefore do not translate it to sc.AllowedTopologies
params[k] = v
}
}
if len(sc.AllowedTopologies) > 0 {
newTopologies, err := translateAllowedTopologies(sc.AllowedTopologies, CinderTopologyKey)
if err != nil {
return nil, fmt.Errorf("failed translating allowed topologies: %v", err)
}
sc.AllowedTopologies = newTopologies
}
sc.Parameters = params
return sc, nil
}
// TranslateInTreeInlineVolumeToCSI takes a Volume with Cinder set from in-tree
// and converts the Cinder source to a CSIPersistentVolumeSource
func (t *osCinderCSITranslator) TranslateInTreeInlineVolumeToCSI(volume *v1.Volume, podNamespace string) (*v1.PersistentVolume, error) {
if volume == nil || volume.Cinder == nil {
return nil, fmt.Errorf("volume is nil or Cinder not defined on volume")
}
cinderSource := volume.Cinder
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
// Must be unique per disk as it is used as the unique part of the
// staging path
Name: fmt.Sprintf("%s-%s", CinderDriverName, cinderSource.VolumeID),
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
CSI: &v1.CSIPersistentVolumeSource{
Driver: CinderDriverName,
VolumeHandle: cinderSource.VolumeID,
ReadOnly: cinderSource.ReadOnly,
FSType: cinderSource.FSType,
VolumeAttributes: map[string]string{},
},
},
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
},
}
return pv, nil
}
// TranslateInTreePVToCSI takes a PV with Cinder set from in-tree
// and converts the Cinder source to a CSIPersistentVolumeSource
func (t *osCinderCSITranslator) TranslateInTreePVToCSI(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
if pv == nil || pv.Spec.Cinder == nil {
return nil, fmt.Errorf("pv is nil or Cinder not defined on pv")
}
cinderSource := pv.Spec.Cinder
csiSource := &v1.CSIPersistentVolumeSource{
Driver: CinderDriverName,
VolumeHandle: cinderSource.VolumeID,
ReadOnly: cinderSource.ReadOnly,
FSType: cinderSource.FSType,
VolumeAttributes: map[string]string{},
}
if err := translateTopologyFromInTreeToCSI(pv, CinderTopologyKey); err != nil {
return nil, fmt.Errorf("failed to translate topology: %v", err)
}
pv.Spec.Cinder = nil
pv.Spec.CSI = csiSource
return pv, nil
}
// TranslateCSIPVToInTree takes a PV with CSIPersistentVolumeSource set and
// translates the Cinder CSI source to a Cinder In-tree source.
func (t *osCinderCSITranslator) TranslateCSIPVToInTree(pv *v1.PersistentVolume) (*v1.PersistentVolume, error) {
if pv == nil || pv.Spec.CSI == nil {
return nil, fmt.Errorf("pv is nil or CSI source not defined on pv")
}
csiSource := pv.Spec.CSI
cinderSource := &v1.CinderPersistentVolumeSource{
VolumeID: csiSource.VolumeHandle,
FSType: csiSource.FSType,
ReadOnly: csiSource.ReadOnly,
}
// translate CSI topology to In-tree topology for rollback compatibility.
// It is not possible to guess Cinder Region from the Zone, therefore leave it empty.
if err := translateTopologyFromCSIToInTree(pv, CinderTopologyKey, nil); err != nil {
return nil, fmt.Errorf("failed to translate topology. PV:%+v. Error:%v", *pv, err)
}
pv.Spec.CSI = nil
pv.Spec.Cinder = cinderSource
return pv, nil
}
// CanSupport tests whether the plugin supports a given persistent volume
// specification from the API. The spec pointer should be considered
// const.
func (t *osCinderCSITranslator) CanSupport(pv *v1.PersistentVolume) bool {
return pv != nil && pv.Spec.Cinder != nil
}
// CanSupportInline tests whether the plugin supports a given inline volume
// specification from the API. The spec pointer should be considered
// const.
func (t *osCinderCSITranslator) CanSupportInline(volume *v1.Volume) bool {
return volume != nil && volume.Cinder != nil
}
// GetInTreePluginName returns the name of the intree plugin driver
func (t *osCinderCSITranslator) GetInTreePluginName() string {
return CinderInTreePluginName
}
// GetCSIPluginName returns the name of the CSI plugin
func (t *osCinderCSITranslator) GetCSIPluginName() string {
return CinderDriverName
}
func (t *osCinderCSITranslator) RepairVolumeHandle(volumeHandle, nodeID string) (string, error) {
return volumeHandle, nil
}

View File

@ -0,0 +1,80 @@
/*
Copyright 2021 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package plugins
import (
"reflect"
"testing"
v1 "k8s.io/api/core/v1"
storage "k8s.io/api/storage/v1"
)
func TestTranslateCinderInTreeStorageClassToCSI(t *testing.T) {
translator := NewOpenStackCinderCSITranslator()
cases := []struct {
name string
sc *storage.StorageClass
expSc *storage.StorageClass
expErr bool
}{
{
name: "translate normal",
sc: NewStorageClass(map[string]string{"foo": "bar"}, nil),
expSc: NewStorageClass(map[string]string{"foo": "bar"}, nil),
},
{
name: "translate empty map",
sc: NewStorageClass(map[string]string{}, nil),
expSc: NewStorageClass(map[string]string{}, nil),
},
{
name: "translate with fstype",
sc: NewStorageClass(map[string]string{"fstype": "ext3"}, nil),
expSc: NewStorageClass(map[string]string{"csi.storage.k8s.io/fstype": "ext3"}, nil),
},
{
name: "translate with topology in parameters (no translation expected)",
sc: NewStorageClass(map[string]string{"availability": "nova"}, nil),
expSc: NewStorageClass(map[string]string{"availability": "nova"}, nil),
},
{
name: "translate with topology",
sc: NewStorageClass(map[string]string{}, generateToplogySelectors(v1.LabelFailureDomainBetaZone, []string{"nova"})),
expSc: NewStorageClass(map[string]string{}, generateToplogySelectors(CinderTopologyKey, []string{"nova"})),
},
}
for _, tc := range cases {
t.Logf("Testing %v", tc.name)
got, err := translator.TranslateInTreeStorageClassToCSI(tc.sc)
if err != nil && !tc.expErr {
t.Errorf("Did not expect error but got: %v", err)
}
if err == nil && tc.expErr {
t.Errorf("Expected error, but did not get one.")
}
if !reflect.DeepEqual(got, tc.expSc) {
t.Errorf("Got parameters: %v, expected: %v", got, tc.expSc)
}
}
}

View File

@ -29,6 +29,7 @@ var (
inTreePlugins = map[string]plugins.InTreePlugin{
plugins.GCEPDDriverName: plugins.NewGCEPersistentDiskCSITranslator(),
plugins.AWSEBSDriverName: plugins.NewAWSElasticBlockStoreCSITranslator(),
plugins.CinderDriverName: plugins.NewOpenStackCinderCSITranslator(),
plugins.AzureDiskDriverName: plugins.NewAzureDiskCSITranslator(),
plugins.AzureFileDriverName: plugins.NewAzureFileCSITranslator(),
plugins.VSphereDriverName: plugins.NewvSphereCSITranslator(),

View File

@ -189,6 +189,17 @@ func TestTopologyTranslation(t *testing.T) {
pv: makeAWSEBSPV(kubernetesGATopologyLabels, makeTopology(v1.LabelTopologyZone, "us-east-2a")),
expectedNodeAffinity: makeNodeAffinity(false /*multiTerms*/, plugins.AWSEBSTopologyKey, "us-east-2a"),
},
// Cinder test cases: test mosty topology key, i.e., don't repeat testing done with GCE
{
name: "OpenStack Cinder with zone labels",
pv: makeCinderPV(kubernetesBetaTopologyLabels, nil /*topology*/),
expectedNodeAffinity: makeNodeAffinity(false /*multiTerms*/, plugins.CinderTopologyKey, "us-east-1a"),
},
{
name: "OpenStack Cinder with zone labels and topology",
pv: makeCinderPV(kubernetesBetaTopologyLabels, makeTopology(v1.LabelFailureDomainBetaZone, "us-east-2a")),
expectedNodeAffinity: makeNodeAffinity(false /*multiTerms*/, plugins.CinderTopologyKey, "us-east-2a"),
},
}
for _, test := range testCases {
@ -291,6 +302,18 @@ func makeAWSEBSPV(labels map[string]string, topology *v1.NodeSelectorRequirement
return pv
}
func makeCinderPV(labels map[string]string, topology *v1.NodeSelectorRequirement) *v1.PersistentVolume {
pv := makePV(labels, topology)
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
Cinder: &v1.CinderPersistentVolumeSource{
VolumeID: "vol1",
FSType: "ext4",
ReadOnly: false,
},
}
return pv
}
func makeNodeAffinity(multiTerms bool, key string, values ...string) *v1.VolumeNodeAffinity {
nodeAffinity := &v1.VolumeNodeAffinity{
Required: &v1.NodeSelector{
@ -389,6 +412,12 @@ func generateUniqueVolumeSource(driverName string) (v1.VolumeSource, error) {
},
}, nil
case plugins.CinderDriverName:
return v1.VolumeSource{
Cinder: &v1.CinderVolumeSource{
VolumeID: string(uuid.NewUUID()),
},
}, nil
case plugins.AzureDiskDriverName:
return v1.VolumeSource{
AzureDisk: &v1.AzureDiskVolumeSource{

View File

@ -950,6 +950,8 @@ func describeVolumes(volumes []corev1.Volume, w PrefixWriter, space string) {
printAzureDiskVolumeSource(volume.VolumeSource.AzureDisk, w)
case volume.VolumeSource.VsphereVolume != nil:
printVsphereVolumeSource(volume.VolumeSource.VsphereVolume, w)
case volume.VolumeSource.Cinder != nil:
printCinderVolumeSource(volume.VolumeSource.Cinder, w)
case volume.VolumeSource.PhotonPersistentDisk != nil:
printPhotonPersistentDiskVolumeSource(volume.VolumeSource.PhotonPersistentDisk, w)
case volume.VolumeSource.PortworxVolume != nil:
@ -1228,6 +1230,24 @@ func printPhotonPersistentDiskVolumeSource(photon *corev1.PhotonPersistentDiskVo
photon.PdID, photon.FSType)
}
func printCinderVolumeSource(cinder *corev1.CinderVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+
" VolumeID:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n"+
" SecretRef:\t%v\n",
cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef)
}
func printCinderPersistentVolumeSource(cinder *corev1.CinderPersistentVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tCinder (a Persistent Disk resource in OpenStack)\n"+
" VolumeID:\t%v\n"+
" FSType:\t%v\n"+
" ReadOnly:\t%v\n"+
" SecretRef:\t%v\n",
cinder.VolumeID, cinder.FSType, cinder.ReadOnly, cinder.SecretRef)
}
func printScaleIOVolumeSource(sio *corev1.ScaleIOVolumeSource, w PrefixWriter) {
w.Write(LEVEL_2, "Type:\tScaleIO (a persistent volume backed by a block device in ScaleIO)\n"+
" Gateway:\t%v\n"+
@ -1545,6 +1565,8 @@ func describePersistentVolume(pv *corev1.PersistentVolume, events *corev1.EventL
printQuobyteVolumeSource(pv.Spec.Quobyte, w)
case pv.Spec.VsphereVolume != nil:
printVsphereVolumeSource(pv.Spec.VsphereVolume, w)
case pv.Spec.Cinder != nil:
printCinderPersistentVolumeSource(pv.Spec.Cinder, w)
case pv.Spec.AzureDisk != nil:
printAzureDiskVolumeSource(pv.Spec.AzureDisk, w)
case pv.Spec.PhotonPersistentDisk != nil:

View File

@ -1483,6 +1483,19 @@ func TestPersistentVolumeDescriber(t *testing.T) {
},
unexpectedElements: []string{"VolumeMode", "Filesystem"},
},
{
name: "test8",
plugin: "cinder",
pv: &corev1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{Name: "bar"},
Spec: corev1.PersistentVolumeSpec{
PersistentVolumeSource: corev1.PersistentVolumeSource{
Cinder: &corev1.CinderPersistentVolumeSource{},
},
},
},
unexpectedElements: []string{"VolumeMode", "Filesystem"},
},
{
name: "test9",
plugin: "fc",

View File

@ -122,6 +122,8 @@ func restrictedVolumes_1_0(podMetadata *metav1.ObjectMeta, podSpec *corev1.PodSp
badVolumeTypes.Insert("rbd")
case volume.FlexVolume != nil:
badVolumeTypes.Insert("flexVolume")
case volume.Cinder != nil:
badVolumeTypes.Insert("cinder")
case volume.CephFS != nil:
badVolumeTypes.Insert("cephfs")
case volume.Flocker != nil:

View File

@ -53,6 +53,7 @@ func TestRestrictedVolumes(t *testing.T) {
{Name: "b7", VolumeSource: corev1.VolumeSource{Glusterfs: &corev1.GlusterfsVolumeSource{}}},
{Name: "b8", VolumeSource: corev1.VolumeSource{RBD: &corev1.RBDVolumeSource{}}},
{Name: "b9", VolumeSource: corev1.VolumeSource{FlexVolume: &corev1.FlexVolumeSource{}}},
{Name: "b10", VolumeSource: corev1.VolumeSource{Cinder: &corev1.CinderVolumeSource{}}},
{Name: "b11", VolumeSource: corev1.VolumeSource{CephFS: &corev1.CephFSVolumeSource{}}},
{Name: "b12", VolumeSource: corev1.VolumeSource{Flocker: &corev1.FlockerVolumeSource{}}},
{Name: "b13", VolumeSource: corev1.VolumeSource{FC: &corev1.FCVolumeSource{}}},
@ -71,9 +72,9 @@ func TestRestrictedVolumes(t *testing.T) {
}},
expectReason: `restricted volume types`,
expectDetail: `volumes ` +
`"b1", "b2", "b3", "b4", "b5", "b6", "b7", "b8", "b9", "b11", "b12", "b13", "b14", "b15", "b16", "b17", "b18", "b19", "b20", "b21", "c1"` +
`"b1", "b2", "b3", "b4", "b5", "b6", "b7", "b8", "b9", "b10", "b11", "b12", "b13", "b14", "b15", "b16", "b17", "b18", "b19", "b20", "b21", "c1"` +
` use restricted volume types ` +
`"awsElasticBlockStore", "azureDisk", "azureFile", "cephfs", "fc", "flexVolume", "flocker", "gcePersistentDisk", "gitRepo", "glusterfs", ` +
`"awsElasticBlockStore", "azureDisk", "azureFile", "cephfs", "cinder", "fc", "flexVolume", "flocker", "gcePersistentDisk", "gitRepo", "glusterfs", ` +
`"hostPath", "iscsi", "nfs", "photonPersistentDisk", "portworxVolume", "quobyte", "rbd", "scaleIO", "storageos", "unknown", "vsphereVolume"`,
},
}

View File

@ -32,7 +32,7 @@ limitations under the License.
* be used in production.
*
* 2) With server outside of Kubernetes
* Appropriate server exist somewhere outside
* Appropriate server must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/

View File

@ -57,6 +57,7 @@ import (
_ "k8s.io/kubernetes/test/e2e/framework/providers/azure"
_ "k8s.io/kubernetes/test/e2e/framework/providers/gce"
_ "k8s.io/kubernetes/test/e2e/framework/providers/kubemark"
_ "k8s.io/kubernetes/test/e2e/framework/providers/openstack"
_ "k8s.io/kubernetes/test/e2e/framework/providers/vsphere"
// Ensure that logging flags are part of the command line.

View File

@ -0,0 +1,37 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package openstack
import (
"k8s.io/kubernetes/test/e2e/framework"
)
func init() {
framework.RegisterProvider("openstack", newProvider)
}
func newProvider() (framework.ProviderInterface, error) {
return &Provider{}, nil
}
// Provider is a structure to handle OpenStack clouds for e2e testing
// It does not do anything useful, it's there only to provide valid
// --provider=openstack cmdline option to allow testing of CSI migration
// tests of kubernetes.io/cinder volume plugin.
type Provider struct {
framework.NullProvider
}

View File

@ -27,7 +27,7 @@ limitations under the License.
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server or cloud provider outside of Kubernetes (GCE, AWS, Azure, ...)
* 2) With server or cloud provider outside of Kubernetes (Cinder, GCE, AWS, Azure, ...)
* Appropriate server or cloud provider must exist somewhere outside
* the tested Kubernetes cluster. CreateVolume will create a new volume to be
* used in the TestSuites for inlineVolume or DynamicPV tests.

View File

@ -27,7 +27,7 @@ limitations under the License.
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server or cloud provider outside of Kubernetes (GCE, AWS, Azure, ...)
* 2) With server or cloud provider outside of Kubernetes (Cinder, GCE, AWS, Azure, ...)
* Appropriate server or cloud provider must exist somewhere outside
* the tested Kubernetes cluster. CreateVolume will create a new volume to be
* used in the TestSuites for inlineVolume or DynamicPV tests.
@ -928,6 +928,70 @@ func (e *emptydirDriver) PrepareTest(f *framework.Framework) *storageframework.P
}
}
// Cinder
// This tests only CSI migration with dynamically provisioned volumes.
type cinderDriver struct {
driverInfo storageframework.DriverInfo
}
var _ storageframework.TestDriver = &cinderDriver{}
var _ storageframework.DynamicPVTestDriver = &cinderDriver{}
// InitCinderDriver returns cinderDriver that implements TestDriver interface
func InitCinderDriver() storageframework.TestDriver {
return &cinderDriver{
driverInfo: storageframework.DriverInfo{
Name: "cinder",
InTreePluginName: "kubernetes.io/cinder",
MaxFileSize: storageframework.FileSizeMedium,
SupportedSizeRange: e2evolume.SizeRange{
Min: "1Gi",
},
SupportedFsType: sets.NewString(
"", // Default fsType
),
TopologyKeys: []string{v1.LabelFailureDomainBetaZone},
Capabilities: map[storageframework.Capability]bool{
storageframework.CapPersistence: true,
storageframework.CapFsGroup: true,
storageframework.CapExec: true,
storageframework.CapBlock: true,
// Cinder supports volume limits, but the test creates large
// number of volumes and times out test suites.
storageframework.CapVolumeLimits: false,
storageframework.CapTopology: true,
},
},
}
}
func (c *cinderDriver) GetDriverInfo() *storageframework.DriverInfo {
return &c.driverInfo
}
func (c *cinderDriver) SkipUnsupportedTest(pattern storageframework.TestPattern) {
e2eskipper.SkipUnlessProviderIs("openstack")
}
func (c *cinderDriver) GetDynamicProvisionStorageClass(config *storageframework.PerTestConfig, fsType string) *storagev1.StorageClass {
provisioner := "kubernetes.io/cinder"
parameters := map[string]string{}
if fsType != "" {
parameters["fsType"] = fsType
}
ns := config.Framework.Namespace.Name
return storageframework.GetStorageClass(provisioner, parameters, nil, ns)
}
func (c *cinderDriver) PrepareTest(f *framework.Framework) *storageframework.PerTestConfig {
return &storageframework.PerTestConfig{
Driver: c,
Prefix: "cinder",
Framework: f,
}
}
// GCE
type gcePdDriver struct {
driverInfo storageframework.DriverInfo

View File

@ -36,6 +36,7 @@ var testDrivers = []func() storageframework.TestDriver{
drivers.InitHostPathDriver,
drivers.InitHostPathSymlinkDriver,
drivers.InitEmptydirDriver,
drivers.InitCinderDriver,
drivers.InitVSphereDriver,
drivers.InitAzureDiskDriver,
drivers.InitAzureFileDriver,

View File

@ -371,6 +371,8 @@ func getInTreeNodeLimits(cs clientset.Interface, nodeName string, driverInfo *st
allocatableKey = volumeutil.EBSVolumeLimitKey
case migrationplugins.GCEPDInTreePluginName:
allocatableKey = volumeutil.GCEVolumeLimitKey
case migrationplugins.CinderInTreePluginName:
allocatableKey = volumeutil.CinderVolumeLimitKey
case migrationplugins.AzureDiskInTreePluginName:
allocatableKey = volumeutil.AzureVolumeLimitKey
default:

View File

@ -284,6 +284,34 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
framework.ExpectNoError(err, "checkAWSEBS gp2 encrypted")
},
},
// OpenStack generic tests (works on all OpenStack deployments)
{
Name: "generic Cinder volume on OpenStack",
CloudProviders: []string{"openstack"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/cinder",
Parameters: map[string]string{},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{})
},
},
{
Name: "Cinder volume with empty volume type and zone on OpenStack",
CloudProviders: []string{"openstack"},
Timeouts: f.Timeouts,
Provisioner: "kubernetes.io/cinder",
Parameters: map[string]string{
"type": "",
"availability": "",
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(claim *v1.PersistentVolumeClaim) {
testsuites.PVWriteReadSingleNodeCheck(ctx, c, f.Timeouts, claim, e2epod.NodeSelection{})
},
},
// vSphere generic test
{
Name: "generic vSphere volume",
@ -397,7 +425,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// not being deleted.
// NOTE: Polls until no PVs are detected, times out at 5 minutes.
e2eskipper.SkipUnlessProviderIs("gce", "aws", "gke", "vsphere", "azure")
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
const raceAttempts int = 100
var residualPVs []*v1.PersistentVolume
@ -572,7 +600,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
ginkgo.Describe("DynamicProvisioner Default", func() {
ginkgo.It("should create and delete default persistent volumes [Slow]", func(ctx context.Context) {
e2eskipper.SkipUnlessProviderIs("gce", "aws", "gke", "vsphere", "azure")
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
e2epv.SkipIfNoDefaultStorageClass(c)
ginkgo.By("creating a claim with no annotation")
@ -596,7 +624,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// Modifying the default storage class can be disruptive to other tests that depend on it
ginkgo.It("should be disabled by changing the default annotation [Serial] [Disruptive]", func() {
e2eskipper.SkipUnlessProviderIs("gce", "aws", "gke", "vsphere", "azure")
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
e2epv.SkipIfNoDefaultStorageClass(c)
scName, scErr := e2epv.GetDefaultStorageClassName(c)
@ -635,7 +663,7 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
// Modifying the default storage class can be disruptive to other tests that depend on it
ginkgo.It("should be disabled by removing the default annotation [Serial] [Disruptive]", func() {
e2eskipper.SkipUnlessProviderIs("gce", "aws", "gke", "vsphere", "azure")
e2eskipper.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
e2epv.SkipIfNoDefaultStorageClass(c)
scName, scErr := e2epv.GetDefaultStorageClassName(c)
@ -776,6 +804,8 @@ func getDefaultPluginName() string {
return "kubernetes.io/gce-pd"
case framework.ProviderIs("aws"):
return "kubernetes.io/aws-ebs"
case framework.ProviderIs("openstack"):
return "kubernetes.io/cinder"
case framework.ProviderIs("vsphere"):
return "kubernetes.io/vsphere-volume"
case framework.ProviderIs("azure"):

View File

@ -48,7 +48,7 @@ const (
func (t *PersistentVolumeUpgradeTest) Setup(f *framework.Framework) {
var err error
e2eskipper.SkipUnlessProviderIs("gce", "gke", "aws", "vsphere", "azure")
e2eskipper.SkipUnlessProviderIs("gce", "gke", "openstack", "aws", "vsphere", "azure")
ns := f.Namespace.Name

View File

@ -52,7 +52,7 @@ func (VolumeModeDowngradeTest) Name() string {
// Skip returns true when this test can be skipped.
func (t *VolumeModeDowngradeTest) Skip(upgCtx upgrades.UpgradeContext) bool {
if !framework.ProviderIs("gce", "aws", "gke", "vsphere", "azure") {
if !framework.ProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure") {
return true
}