mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-27 13:37:30 +00:00
Add GCE-PD CSI Driver test to E2E test suite
This commit is contained in:
parent
7ba97b9200
commit
01d916167b
@ -376,6 +376,22 @@ func SkipUnlessNodeOSDistroIs(supportedNodeOsDistros ...string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func SkipUnlessSecretExistsAfterWait(c clientset.Interface, name, namespace string, timeout time.Duration) {
|
||||||
|
Logf("Waiting for secret %v in namespace %v to exist in duration %v", name, namespace, timeout)
|
||||||
|
start := time.Now()
|
||||||
|
if wait.PollImmediate(15*time.Second, timeout, func() (bool, error) {
|
||||||
|
_, err := c.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
Logf("Secret %v in namespace %v still does not exist after duration %v", name, namespace, time.Since(start))
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
}) != nil {
|
||||||
|
Skipf("Secret %v in namespace %v did not exist after timeout of %v", name, namespace, timeout)
|
||||||
|
}
|
||||||
|
Logf("Secret %v in namespace %v found after duration %v", name, namespace, time.Since(start))
|
||||||
|
}
|
||||||
|
|
||||||
func SkipIfContainerRuntimeIs(runtimes ...string) {
|
func SkipIfContainerRuntimeIs(runtimes ...string) {
|
||||||
for _, runtime := range runtimes {
|
for _, runtime := range runtimes {
|
||||||
if runtime == TestContext.ContainerRuntime {
|
if runtime == TestContext.ContainerRuntime {
|
||||||
|
@ -125,3 +125,20 @@ func StatefulSetFromManifest(fileName, ns string) (*apps.StatefulSet, error) {
|
|||||||
}
|
}
|
||||||
return &ss, nil
|
return &ss, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DaemonSetFromManifest returns a DaemonSet from a manifest stored in fileName in the Namespace indicated by ns.
|
||||||
|
func DaemonSetFromManifest(fileName, ns string) (*apps.DaemonSet, error) {
|
||||||
|
var ds apps.DaemonSet
|
||||||
|
data := generated.ReadOrDie(fileName)
|
||||||
|
|
||||||
|
json, err := utilyaml.ToJSON(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
err = runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), json, &ds)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
ds.Namespace = ns
|
||||||
|
return &ds, nil
|
||||||
|
}
|
||||||
|
@ -3,7 +3,7 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
|||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"csi_hostpath.go",
|
"csi_defs.go",
|
||||||
"csi_volumes.go",
|
"csi_volumes.go",
|
||||||
"empty_dir_wrapper.go",
|
"empty_dir_wrapper.go",
|
||||||
"flexvolume.go",
|
"flexvolume.go",
|
||||||
@ -39,6 +39,7 @@ go_library(
|
|||||||
"//test/e2e/framework:go_default_library",
|
"//test/e2e/framework:go_default_library",
|
||||||
"//test/e2e/framework/metrics:go_default_library",
|
"//test/e2e/framework/metrics:go_default_library",
|
||||||
"//test/e2e/generated:go_default_library",
|
"//test/e2e/generated:go_default_library",
|
||||||
|
"//test/e2e/manifest:go_default_library",
|
||||||
"//test/e2e/storage/utils:go_default_library",
|
"//test/e2e/storage/utils:go_default_library",
|
||||||
"//test/e2e/storage/vsphere:go_default_library",
|
"//test/e2e/storage/vsphere:go_default_library",
|
||||||
"//test/utils/image:go_default_library",
|
"//test/utils/image:go_default_library",
|
||||||
|
@ -21,10 +21,12 @@ package storage
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
"k8s.io/kubernetes/test/e2e/manifest"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -197,3 +199,59 @@ func csiHostPathPod(
|
|||||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret))
|
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret))
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func csiGCEPDSetup(
|
||||||
|
client clientset.Interface,
|
||||||
|
config framework.VolumeTestConfig,
|
||||||
|
teardown bool,
|
||||||
|
f *framework.Framework,
|
||||||
|
nodeSA *v1.ServiceAccount,
|
||||||
|
controllerSA *v1.ServiceAccount,
|
||||||
|
) {
|
||||||
|
// Get API Objects from manifests
|
||||||
|
nodeds, err := manifest.DaemonSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml", config.Namespace)
|
||||||
|
framework.ExpectNoError(err, "Failed to create DaemonSet from manifest")
|
||||||
|
nodeds.Spec.Template.Spec.ServiceAccountName = nodeSA.GetName()
|
||||||
|
|
||||||
|
controllerss, err := manifest.StatefulSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml", config.Namespace)
|
||||||
|
framework.ExpectNoError(err, "Failed to create StatefulSet from manifest")
|
||||||
|
controllerss.Spec.Template.Spec.ServiceAccountName = controllerSA.GetName()
|
||||||
|
|
||||||
|
controllerservice, err := manifest.SvcFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml")
|
||||||
|
framework.ExpectNoError(err, "Failed to create Service from manifest")
|
||||||
|
|
||||||
|
// Got all objects from manifests now try to delete objects
|
||||||
|
err = client.CoreV1().Services(config.Namespace).Delete(controllerservice.GetName(), nil)
|
||||||
|
if err != nil {
|
||||||
|
if !apierrs.IsNotFound(err) {
|
||||||
|
framework.ExpectNoError(err, "Failed to delete Service: %v", controllerservice.GetName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = client.AppsV1().StatefulSets(config.Namespace).Delete(controllerss.Name, nil)
|
||||||
|
if err != nil {
|
||||||
|
if !apierrs.IsNotFound(err) {
|
||||||
|
framework.ExpectNoError(err, "Failed to delete StatefulSet: %v", controllerss.GetName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = client.AppsV1().DaemonSets(config.Namespace).Delete(nodeds.Name, nil)
|
||||||
|
if err != nil {
|
||||||
|
if !apierrs.IsNotFound(err) {
|
||||||
|
framework.ExpectNoError(err, "Failed to delete DaemonSet: %v", nodeds.GetName())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if teardown {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create new API Objects through client
|
||||||
|
_, err = client.CoreV1().Services(config.Namespace).Create(controllerservice)
|
||||||
|
framework.ExpectNoError(err, "Failed to create Service: %v", controllerservice.Name)
|
||||||
|
|
||||||
|
_, err = client.AppsV1().StatefulSets(config.Namespace).Create(controllerss)
|
||||||
|
framework.ExpectNoError(err, "Failed to create StatefulSet: %v", controllerss.Name)
|
||||||
|
|
||||||
|
_, err = client.AppsV1().DaemonSets(config.Namespace).Create(nodeds)
|
||||||
|
framework.ExpectNoError(err, "Failed to create DaemonSet: %v", nodeds.Name)
|
||||||
|
|
||||||
|
}
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||||||
package storage
|
package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@ -28,10 +29,12 @@ import (
|
|||||||
|
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
restclient "k8s.io/client-go/rest"
|
restclient "k8s.io/client-go/rest"
|
||||||
|
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
"k8s.io/kubernetes/test/e2e/storage/utils"
|
"k8s.io/kubernetes/test/e2e/storage/utils"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -95,7 +98,7 @@ func csiServiceAccount(
|
|||||||
componentName string,
|
componentName string,
|
||||||
teardown bool,
|
teardown bool,
|
||||||
) *v1.ServiceAccount {
|
) *v1.ServiceAccount {
|
||||||
By("Creating a CSI service account")
|
By(fmt.Sprintf("Creating a CSI service account for %v", componentName))
|
||||||
serviceAccountName := config.Prefix + "-" + componentName + "-service-account"
|
serviceAccountName := config.Prefix + "-" + componentName + "-service-account"
|
||||||
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
|
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
|
||||||
sa := &v1.ServiceAccount{
|
sa := &v1.ServiceAccount{
|
||||||
@ -130,7 +133,7 @@ func csiClusterRoleBindings(
|
|||||||
sa *v1.ServiceAccount,
|
sa *v1.ServiceAccount,
|
||||||
clusterRolesNames []string,
|
clusterRolesNames []string,
|
||||||
) {
|
) {
|
||||||
By("Binding cluster roles to the CSI service account")
|
By(fmt.Sprintf("Binding cluster roles %v to the CSI service account %v", clusterRolesNames, sa.GetName()))
|
||||||
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
|
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
|
||||||
for _, clusterRoleName := range clusterRolesNames {
|
for _, clusterRoleName := range clusterRolesNames {
|
||||||
|
|
||||||
@ -237,4 +240,61 @@ var _ = utils.SIGDescribe("CSI Volumes [Flaky]", func() {
|
|||||||
testDynamicProvisioning(t, cs, claim, class)
|
testDynamicProvisioning(t, cs, claim, class)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
|
Describe("[Feature: CSI] Sanity CSI plugin test using GCE-PD CSI driver", func() {
|
||||||
|
var (
|
||||||
|
controllerClusterRoles []string = []string{
|
||||||
|
csiExternalAttacherClusterRoleName,
|
||||||
|
csiExternalProvisionerClusterRoleName,
|
||||||
|
}
|
||||||
|
nodeClusterRoles []string = []string{
|
||||||
|
csiDriverRegistrarClusterRoleName,
|
||||||
|
}
|
||||||
|
controllerServiceAccount *v1.ServiceAccount
|
||||||
|
nodeServiceAccount *v1.ServiceAccount
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
|
// Currently you will need to manually add the required GCP Credentials as a secret "cloud-sa"
|
||||||
|
// kubectl create generic cloud-sa --from-file=PATH/TO/cloud-sa.json --namespace={{config.Namespace}}
|
||||||
|
// TODO(GITHUBISSUE): Inject the necessary credentials automatically to the driver containers in e2e test
|
||||||
|
framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute)
|
||||||
|
|
||||||
|
By("deploying gce-pd driver")
|
||||||
|
controllerServiceAccount = csiServiceAccount(cs, config, "gce-controller", false)
|
||||||
|
nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false)
|
||||||
|
csiClusterRoleBindings(cs, config, false, controllerServiceAccount, controllerClusterRoles)
|
||||||
|
csiClusterRoleBindings(cs, config, false, nodeServiceAccount, nodeClusterRoles)
|
||||||
|
csiGCEPDSetup(cs, config, false, f, nodeServiceAccount, controllerServiceAccount)
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
By("uninstalling gce-pd driver")
|
||||||
|
csiGCEPDSetup(cs, config, true, f, nodeServiceAccount, controllerServiceAccount)
|
||||||
|
csiClusterRoleBindings(cs, config, true, controllerServiceAccount, controllerClusterRoles)
|
||||||
|
csiClusterRoleBindings(cs, config, true, nodeServiceAccount, nodeClusterRoles)
|
||||||
|
csiServiceAccount(cs, config, "gce-controller", true)
|
||||||
|
csiServiceAccount(cs, config, "gce-node", true)
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should provision storage with a GCE-PD CSI driver", func() {
|
||||||
|
nodeZone, ok := node.GetLabels()[kubeletapis.LabelZoneFailureDomain]
|
||||||
|
Expect(ok).To(BeTrue(), "Could not get label %v from node %v", kubeletapis.LabelZoneFailureDomain, node.GetName())
|
||||||
|
t := storageClassTest{
|
||||||
|
name: "csi-gce-pd",
|
||||||
|
provisioner: "csi-gce-pd",
|
||||||
|
parameters: map[string]string{"type": "pd-standard", "zone": nodeZone},
|
||||||
|
claimSize: "5Gi",
|
||||||
|
expectedSize: "5Gi",
|
||||||
|
nodeName: node.Name,
|
||||||
|
}
|
||||||
|
|
||||||
|
claim := newClaim(t, ns.GetName(), "")
|
||||||
|
class := newStorageClass(t, ns.GetName(), "")
|
||||||
|
claim.Spec.StorageClassName = &class.ObjectMeta.Name
|
||||||
|
testDynamicProvisioning(t, cs, claim, class)
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
@ -0,0 +1,13 @@
|
|||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: csi-gce-pd
|
||||||
|
labels:
|
||||||
|
app: csi-gce-pd
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: csi-gce-pd
|
||||||
|
ports:
|
||||||
|
- name: dummy
|
||||||
|
port: 12345
|
||||||
|
|
@ -0,0 +1,75 @@
|
|||||||
|
kind: StatefulSet
|
||||||
|
apiVersion: apps/v1beta1
|
||||||
|
metadata:
|
||||||
|
name: csi-gce-controller
|
||||||
|
spec:
|
||||||
|
serviceName: "csi-gce-pd"
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: csi-gce-pd-driver
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: csi-gce-pd-driver
|
||||||
|
spec:
|
||||||
|
serviceAccount: csi-gce-pd
|
||||||
|
containers:
|
||||||
|
- name: csi-external-provisioner
|
||||||
|
imagePullPolicy: Always
|
||||||
|
image: quay.io/k8scsi/csi-provisioner:v0.2.0
|
||||||
|
args:
|
||||||
|
- "--v=5"
|
||||||
|
- "--provisioner=csi-gce-pd"
|
||||||
|
- "--csi-address=$(ADDRESS)"
|
||||||
|
env:
|
||||||
|
- name: ADDRESS
|
||||||
|
value: /csi/csi.sock
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: /csi
|
||||||
|
- name: csi-attacher
|
||||||
|
imagePullPolicy: Always
|
||||||
|
image: quay.io/k8scsi/csi-attacher:v0.2.0
|
||||||
|
args:
|
||||||
|
- "--v=5"
|
||||||
|
- "--csi-address=$(ADDRESS)"
|
||||||
|
env:
|
||||||
|
- name: ADDRESS
|
||||||
|
value: /csi/csi.sock
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: /csi
|
||||||
|
- name: gce-driver
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
capabilities:
|
||||||
|
add: ["SYS_ADMIN"]
|
||||||
|
allowPrivilegeEscalation: true
|
||||||
|
imagePullPolicy: Always
|
||||||
|
image: gcr.io/google-containers/volume-csi/compute-persistent-disk-csi-driver:v0.2.0.alpha
|
||||||
|
args:
|
||||||
|
- "--v=5"
|
||||||
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||||
|
env:
|
||||||
|
- name: CSI_ENDPOINT
|
||||||
|
value: unix:///csi/csi.sock
|
||||||
|
- name: KUBE_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
- name: GOOGLE_APPLICATION_CREDENTIALS
|
||||||
|
value: "/etc/service-account/cloud-sa.json"
|
||||||
|
volumeMounts:
|
||||||
|
- name: socket-dir
|
||||||
|
mountPath: /csi
|
||||||
|
- name: cloud-sa-volume
|
||||||
|
readOnly: true
|
||||||
|
mountPath: "/etc/service-account"
|
||||||
|
volumes:
|
||||||
|
- name: socket-dir
|
||||||
|
emptyDir: {}
|
||||||
|
- name: cloud-sa-volume
|
||||||
|
secret:
|
||||||
|
secretName: cloud-sa
|
72
test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml
Normal file
72
test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
kind: DaemonSet
|
||||||
|
apiVersion: apps/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-gce-node
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: csi-gce-driver
|
||||||
|
serviceName: csi-gce
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: csi-gce-driver
|
||||||
|
spec:
|
||||||
|
serviceAccount: csi-gce-pd
|
||||||
|
containers:
|
||||||
|
- name: csi-driver-registrar
|
||||||
|
imagePullPolicy: Always
|
||||||
|
image: quay.io/k8scsi/driver-registrar:v0.2.0
|
||||||
|
args:
|
||||||
|
- "--v=5"
|
||||||
|
- "--csi-address=$(ADDRESS)"
|
||||||
|
env:
|
||||||
|
- name: ADDRESS
|
||||||
|
value: /csi/csi.sock
|
||||||
|
- name: KUBE_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
volumeMounts:
|
||||||
|
- name: plugin-dir
|
||||||
|
mountPath: /csi
|
||||||
|
- name: gce-driver
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
capabilities:
|
||||||
|
add: ["SYS_ADMIN"]
|
||||||
|
allowPrivilegeEscalation: true
|
||||||
|
imagePullPolicy: Always
|
||||||
|
image: gcr.io/google-containers/volume-csi/compute-persistent-disk-csi-driver:v0.2.0.alpha
|
||||||
|
args:
|
||||||
|
- "--v=5"
|
||||||
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||||
|
env:
|
||||||
|
- name: CSI_ENDPOINT
|
||||||
|
value: unix:///csi/csi.sock
|
||||||
|
- name: KUBE_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
volumeMounts:
|
||||||
|
- name: kubelet-dir
|
||||||
|
mountPath: /var/lib/kubelet
|
||||||
|
mountPropagation: "Bidirectional"
|
||||||
|
- name: plugin-dir
|
||||||
|
mountPath: /csi
|
||||||
|
- name: device-dir
|
||||||
|
mountPath: /host/dev
|
||||||
|
volumes:
|
||||||
|
- name: kubelet-dir
|
||||||
|
hostPath:
|
||||||
|
path: /var/lib/kubelet
|
||||||
|
type: Directory
|
||||||
|
- name: plugin-dir
|
||||||
|
hostPath:
|
||||||
|
path: /var/lib/kubelet/plugins/com.google.csi.gcepd/
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
- name: device-dir
|
||||||
|
hostPath:
|
||||||
|
path: /dev
|
||||||
|
type: Directory
|
Loading…
Reference in New Issue
Block a user