CSI test refactor to be more easily extensible for more plugins when there are more tests

This commit is contained in:
David Zhu 2018-04-12 18:21:59 -07:00
parent 01d916167b
commit 4d11dab272
6 changed files with 305 additions and 242 deletions

View File

@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"csi_defs.go",
"csi_objects.go",
"csi_volumes.go",
"empty_dir_wrapper.go",
"flexvolume.go",

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -20,19 +20,166 @@ limitations under the License.
package storage
import (
"fmt"
"time"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/manifest"
. "github.com/onsi/ginkgo"
)
const (
csiHostPathPluginImage string = "quay.io/k8scsi/hostpathplugin:v0.2.0"
csiHostPathPluginImage string = "quay.io/k8scsi/hostpathplugin:v0.2.0"
csiExternalAttacherImage string = "quay.io/k8scsi/csi-attacher:v0.2.0"
csiExternalProvisionerImage string = "quay.io/k8scsi/csi-provisioner:v0.2.0"
csiDriverRegistrarImage string = "quay.io/k8scsi/driver-registrar:v0.2.0"
)
// Create the driver registrar cluster role if it doesn't exist, no teardown so that tests
// are parallelizable. This role will be shared with many of the CSI tests.
func csiDriverRegistrarClusterRole(
config framework.VolumeTestConfig,
) *rbacv1.ClusterRole {
// TODO(Issue: #62237) Remove impersonation workaround and cluster role when issue resolved
By("Creating an impersonating superuser kubernetes clientset to define cluster role")
rc, err := framework.LoadConfig()
framework.ExpectNoError(err)
rc.Impersonate = restclient.ImpersonationConfig{
UserName: "superuser",
Groups: []string{"system:masters"},
}
superuserClientset, err := clientset.NewForConfig(rc)
framework.ExpectNoError(err, "Failed to create superuser clientset: %v", err)
By("Creating the CSI driver registrar cluster role")
clusterRoleClient := superuserClientset.RbacV1().ClusterRoles()
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: csiDriverRegistrarClusterRoleName,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "update", "patch"},
},
},
}
ret, err := clusterRoleClient.Create(role)
if err != nil {
if apierrs.IsAlreadyExists(err) {
return ret
}
framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err)
}
return ret
}
func csiServiceAccount(
client clientset.Interface,
config framework.VolumeTestConfig,
componentName string,
teardown bool,
) *v1.ServiceAccount {
creatingString := "Creating"
if teardown {
creatingString = "Deleting"
}
By(fmt.Sprintf("%v a CSI service account for %v", creatingString, componentName))
serviceAccountName := config.Prefix + "-" + componentName + "-service-account"
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
},
}
serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := serviceAccountClient.Create(sa)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err)
}
return ret
}
func csiClusterRoleBindings(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
sa *v1.ServiceAccount,
clusterRolesNames []string,
) {
bindingString := "Binding"
if teardown {
bindingString = "Unbinding"
}
By(fmt.Sprintf("%v cluster roles %v to the CSI service account %v", bindingString, clusterRolesNames, sa.GetName()))
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
for _, clusterRoleName := range clusterRolesNames {
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-" + clusterRoleName + "-" + config.Namespace + "-role-binding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: clusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return
}
_, err = clusterRoleBindingClient.Create(binding)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}
}
func csiHostPathPod(
client clientset.Interface,
config framework.VolumeTestConfig,
@ -200,7 +347,7 @@ func csiHostPathPod(
return ret
}
func csiGCEPDSetup(
func deployGCEPDCSIDriver(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,

View File

@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -22,13 +22,8 @@ import (
"time"
"k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
restclient "k8s.io/client-go/rest"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/utils"
@ -38,139 +33,21 @@ import (
)
const (
csiExternalAttacherImage string = "quay.io/k8scsi/csi-attacher:v0.2.0"
csiExternalProvisionerImage string = "quay.io/k8scsi/csi-provisioner:v0.2.0"
csiDriverRegistrarImage string = "quay.io/k8scsi/driver-registrar:v0.2.0"
csiExternalProvisionerClusterRoleName string = "system:csi-external-provisioner"
csiExternalAttacherClusterRoleName string = "system:csi-external-attacher"
csiDriverRegistrarClusterRoleName string = "csi-driver-registrar"
)
// Create the driver registrar cluster role if it doesn't exist, no teardown so that tests
// are parallelizable. This role will be shared with many of the CSI tests.
func csiDriverRegistrarClusterRole(
config framework.VolumeTestConfig,
) *rbacv1.ClusterRole {
// TODO(Issue: #62237) Remove impersonation workaround and cluster role when issue resolved
By("Creating an impersonating superuser kubernetes clientset to define cluster role")
rc, err := framework.LoadConfig()
framework.ExpectNoError(err)
rc.Impersonate = restclient.ImpersonationConfig{
UserName: "superuser",
Groups: []string{"system:masters"},
}
superuserClientset, err := clientset.NewForConfig(rc)
By("Creating the CSI driver registrar cluster role")
clusterRoleClient := superuserClientset.RbacV1().ClusterRoles()
role := &rbacv1.ClusterRole{
ObjectMeta: metav1.ObjectMeta{
Name: csiDriverRegistrarClusterRoleName,
},
Rules: []rbacv1.PolicyRule{
{
APIGroups: []string{""},
Resources: []string{"events"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
},
{
APIGroups: []string{""},
Resources: []string{"nodes"},
Verbs: []string{"get", "update", "patch"},
},
},
}
ret, err := clusterRoleClient.Create(role)
if err != nil {
if apierrs.IsAlreadyExists(err) {
return ret
}
framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err)
}
return ret
type csiTestDriver interface {
createCSIDriver()
cleanupCSIDriver()
createStorageClassTest(node v1.Node) storageClassTest
}
func csiServiceAccount(
client clientset.Interface,
config framework.VolumeTestConfig,
componentName string,
teardown bool,
) *v1.ServiceAccount {
By(fmt.Sprintf("Creating a CSI service account for %v", componentName))
serviceAccountName := config.Prefix + "-" + componentName + "-service-account"
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
sa := &v1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{
Name: serviceAccountName,
},
}
serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return nil
}
ret, err := serviceAccountClient.Create(sa)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err)
}
return ret
}
func csiClusterRoleBindings(
client clientset.Interface,
config framework.VolumeTestConfig,
teardown bool,
sa *v1.ServiceAccount,
clusterRolesNames []string,
) {
By(fmt.Sprintf("Binding cluster roles %v to the CSI service account %v", clusterRolesNames, sa.GetName()))
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
for _, clusterRoleName := range clusterRolesNames {
binding := &rbacv1.ClusterRoleBinding{
ObjectMeta: metav1.ObjectMeta{
Name: config.Prefix + "-" + clusterRoleName + "-" + config.Namespace + "-role-binding",
},
Subjects: []rbacv1.Subject{
{
Kind: "ServiceAccount",
Name: sa.GetName(),
Namespace: sa.GetNamespace(),
},
},
RoleRef: rbacv1.RoleRef{
Kind: "ClusterRole",
Name: clusterRoleName,
APIGroup: "rbac.authorization.k8s.io",
},
}
clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
_, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
return apierrs.IsNotFound(err), nil
})
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
if teardown {
return
}
_, err = clusterRoleBindingClient.Create(binding)
if err != nil {
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
}
}
var csiTestDrivers = map[string]func(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver{
"hostPath": initCSIHostpath,
// Feature tag to skip test in CI, pending fix of #62237
"[Feature: GCE PD CSI Plugin] gcePD": initCSIgcePD,
}
var _ = utils.SIGDescribe("CSI Volumes [Flaky]", func() {
@ -198,103 +75,151 @@ var _ = utils.SIGDescribe("CSI Volumes [Flaky]", func() {
csiDriverRegistrarClusterRole(config)
})
// Create one of these for each of the drivers to be tested
// CSI hostPath driver test
Describe("Sanity CSI plugin test using hostPath CSI driver", func() {
var (
serviceAccount *v1.ServiceAccount
combinedClusterRoleNames []string = []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
csiDriverRegistrarClusterRoleName,
}
)
for driverName, initCSIDriver := range csiTestDrivers {
curDriverName := driverName
curInitCSIDriver := initCSIDriver
BeforeEach(func() {
By("deploying csi hostpath driver")
serviceAccount = csiServiceAccount(cs, config, "hostpath", false)
csiClusterRoleBindings(cs, config, false, serviceAccount, combinedClusterRoleNames)
csiHostPathPod(cs, config, false, f, serviceAccount)
Context(fmt.Sprintf("CSI plugin test using CSI driver: %s", curDriverName), func() {
var (
driver csiTestDriver
)
BeforeEach(func() {
driver = curInitCSIDriver(f, config)
driver.createCSIDriver()
})
AfterEach(func() {
driver.cleanupCSIDriver()
})
It("should provision storage", func() {
t := driver.createStorageClassTest(node)
claim := newClaim(t, ns.GetName(), "")
class := newStorageClass(t, ns.GetName(), "")
claim.Spec.StorageClassName = &class.ObjectMeta.Name
testDynamicProvisioning(t, cs, claim, class)
})
})
AfterEach(func() {
By("uninstalling csi hostpath driver")
csiHostPathPod(cs, config, true, f, serviceAccount)
csiClusterRoleBindings(cs, config, true, serviceAccount, combinedClusterRoleNames)
csiServiceAccount(cs, config, "hostpath", true)
})
It("should provision storage with a hostPath CSI driver", func() {
t := storageClassTest{
name: "csi-hostpath",
provisioner: "csi-hostpath",
parameters: map[string]string{},
claimSize: "1Gi",
expectedSize: "1Gi",
nodeName: node.Name,
}
claim := newClaim(t, ns.GetName(), "")
class := newStorageClass(t, ns.GetName(), "")
claim.Spec.StorageClassName = &class.ObjectMeta.Name
testDynamicProvisioning(t, cs, claim, class)
})
})
Describe("[Feature: CSI] Sanity CSI plugin test using GCE-PD CSI driver", func() {
var (
controllerClusterRoles []string = []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
}
nodeClusterRoles []string = []string{
csiDriverRegistrarClusterRoleName,
}
controllerServiceAccount *v1.ServiceAccount
nodeServiceAccount *v1.ServiceAccount
)
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
// Currently you will need to manually add the required GCP Credentials as a secret "cloud-sa"
// kubectl create generic cloud-sa --from-file=PATH/TO/cloud-sa.json --namespace={{config.Namespace}}
// TODO(GITHUBISSUE): Inject the necessary credentials automatically to the driver containers in e2e test
framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute)
By("deploying gce-pd driver")
controllerServiceAccount = csiServiceAccount(cs, config, "gce-controller", false)
nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false)
csiClusterRoleBindings(cs, config, false, controllerServiceAccount, controllerClusterRoles)
csiClusterRoleBindings(cs, config, false, nodeServiceAccount, nodeClusterRoles)
csiGCEPDSetup(cs, config, false, f, nodeServiceAccount, controllerServiceAccount)
})
AfterEach(func() {
By("uninstalling gce-pd driver")
csiGCEPDSetup(cs, config, true, f, nodeServiceAccount, controllerServiceAccount)
csiClusterRoleBindings(cs, config, true, controllerServiceAccount, controllerClusterRoles)
csiClusterRoleBindings(cs, config, true, nodeServiceAccount, nodeClusterRoles)
csiServiceAccount(cs, config, "gce-controller", true)
csiServiceAccount(cs, config, "gce-node", true)
})
It("should provision storage with a GCE-PD CSI driver", func() {
nodeZone, ok := node.GetLabels()[kubeletapis.LabelZoneFailureDomain]
Expect(ok).To(BeTrue(), "Could not get label %v from node %v", kubeletapis.LabelZoneFailureDomain, node.GetName())
t := storageClassTest{
name: "csi-gce-pd",
provisioner: "csi-gce-pd",
parameters: map[string]string{"type": "pd-standard", "zone": nodeZone},
claimSize: "5Gi",
expectedSize: "5Gi",
nodeName: node.Name,
}
claim := newClaim(t, ns.GetName(), "")
class := newStorageClass(t, ns.GetName(), "")
claim.Spec.StorageClassName = &class.ObjectMeta.Name
testDynamicProvisioning(t, cs, claim, class)
})
})
}
})
type hostpathCSIDriver struct {
combinedClusterRoleNames []string
serviceAccount *v1.ServiceAccount
f *framework.Framework
config framework.VolumeTestConfig
}
func initCSIHostpath(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
return &hostpathCSIDriver{
combinedClusterRoleNames: []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
csiDriverRegistrarClusterRoleName,
},
f: f,
config: config,
}
}
func (h *hostpathCSIDriver) createStorageClassTest(node v1.Node) storageClassTest {
return storageClassTest{
name: "csi-hostpath",
provisioner: "csi-hostpath",
parameters: map[string]string{},
claimSize: "1Gi",
expectedSize: "1Gi",
nodeName: node.Name,
}
}
func (h *hostpathCSIDriver) createCSIDriver() {
By("deploying csi hostpath driver")
f := h.f
cs := f.ClientSet
config := h.config
h.serviceAccount = csiServiceAccount(cs, config, "hostpath", false)
csiClusterRoleBindings(cs, config, false, h.serviceAccount, h.combinedClusterRoleNames)
csiHostPathPod(cs, config, false, f, h.serviceAccount)
}
func (h *hostpathCSIDriver) cleanupCSIDriver() {
By("uninstalling csi hostpath driver")
f := h.f
cs := f.ClientSet
config := h.config
csiHostPathPod(cs, config, true, f, h.serviceAccount)
csiClusterRoleBindings(cs, config, true, h.serviceAccount, h.combinedClusterRoleNames)
csiServiceAccount(cs, config, "hostpath", true)
}
type gcePDCSIDriver struct {
controllerClusterRoles []string
nodeClusterRoles []string
controllerServiceAccount *v1.ServiceAccount
nodeServiceAccount *v1.ServiceAccount
f *framework.Framework
config framework.VolumeTestConfig
}
func initCSIgcePD(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
cs := f.ClientSet
framework.SkipUnlessProviderIs("gce", "gke")
// Currently you will need to manually add the required GCP Credentials as a secret "cloud-sa"
// kubectl create generic cloud-sa --from-file=PATH/TO/cloud-sa.json --namespace={{config.Namespace}}
// TODO(#62561): Inject the necessary credentials automatically to the driver containers in e2e test
framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute)
return &gcePDCSIDriver{
nodeClusterRoles: []string{
csiDriverRegistrarClusterRoleName,
},
controllerClusterRoles: []string{
csiExternalAttacherClusterRoleName,
csiExternalProvisionerClusterRoleName,
},
f: f,
config: config,
}
}
func (g *gcePDCSIDriver) createStorageClassTest(node v1.Node) storageClassTest {
nodeZone, ok := node.GetLabels()[kubeletapis.LabelZoneFailureDomain]
Expect(ok).To(BeTrue(), "Could not get label %v from node %v", kubeletapis.LabelZoneFailureDomain, node.GetName())
return storageClassTest{
name: "csi-gce-pd",
provisioner: "csi-gce-pd",
parameters: map[string]string{"type": "pd-standard", "zone": nodeZone},
claimSize: "5Gi",
expectedSize: "5Gi",
nodeName: node.Name,
}
}
func (g *gcePDCSIDriver) createCSIDriver() {
By("deploying gce-pd driver")
f := g.f
cs := f.ClientSet
config := g.config
g.controllerServiceAccount = csiServiceAccount(cs, config, "gce-controller", false /* teardown */)
g.nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false /* teardown */)
csiClusterRoleBindings(cs, config, false /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
csiClusterRoleBindings(cs, config, false /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
deployGCEPDCSIDriver(cs, config, false /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
}
func (g *gcePDCSIDriver) cleanupCSIDriver() {
By("uninstalling gce-pd driver")
f := g.f
cs := f.ClientSet
config := g.config
deployGCEPDCSIDriver(cs, config, true /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
csiClusterRoleBindings(cs, config, true /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
csiClusterRoleBindings(cs, config, true /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
csiServiceAccount(cs, config, "gce-controller", true /* teardown */)
csiServiceAccount(cs, config, "gce-node", true /* teardown */)
}

View File

@ -10,4 +10,3 @@ spec:
ports:
- name: dummy
port: 12345

View File

@ -41,11 +41,6 @@ spec:
- name: socket-dir
mountPath: /csi
- name: gce-driver
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
imagePullPolicy: Always
image: gcr.io/google-containers/volume-csi/compute-persistent-disk-csi-driver:v0.2.0.alpha
args:

View File

@ -33,9 +33,6 @@ spec:
- name: gce-driver
securityContext:
privileged: true
capabilities:
add: ["SYS_ADMIN"]
allowPrivilegeEscalation: true
imagePullPolicy: Always
image: gcr.io/google-containers/volume-csi/compute-persistent-disk-csi-driver:v0.2.0.alpha
args: