mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-12-08 02:15:27 +00:00
The CSI integration test for hostpath was hard-coded to use the latest
stable release of the sidecar and hostpath container images. This
makes sense for regression testing of changes made in Kubernetes
itself, but the same test is also useful for testing the "canary"
images on quay.io before tagging them as a new release or for testing
locally produced images. Both is now possible via command line
parameters.
Testing "canary" images on quay.io:
go run hack/e2e.go -- --provider=local --test \
--test_args="--ginkgo.focus=CSI.plugin.test.using.CSI.driver..hostPath -csiImageVersion=canary"
Testing local container images:
# https://docs.docker.com/registry/deploying/
docker run -d -p 5000:5000 --restart=always --name registry registry:2
for i in driver-registrar drivers external-attacher external-provisioner; do
make -C $i REGISTRY_NAME=localhost:5000 push
done
go run hack/e2e.go -- --provider=local --test \
--test_args="--ginkgo.focus=CSI.plugin.test.using.CSI.driver..hostPath -csiImageVersion=canary -csiImageRegistry=localhost:5000"
424 lines
12 KiB
Go
424 lines
12 KiB
Go
/*
|
|
Copyright 2018 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
// This file is used to deploy the CSI hostPath plugin
|
|
// More Information: https://github.com/kubernetes-csi/drivers/tree/master/pkg/hostpath
|
|
|
|
package storage
|
|
|
|
import (
|
|
"flag"
|
|
"fmt"
|
|
"time"
|
|
|
|
"k8s.io/api/core/v1"
|
|
rbacv1 "k8s.io/api/rbac/v1"
|
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
|
|
clientset "k8s.io/client-go/kubernetes"
|
|
restclient "k8s.io/client-go/rest"
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
|
"k8s.io/kubernetes/test/e2e/manifest"
|
|
|
|
. "github.com/onsi/ginkgo"
|
|
)
|
|
|
|
var csiImageVersions = map[string]string{
|
|
"hostpathplugin": "v0.2.0",
|
|
"csi-attacher": "v0.2.0",
|
|
"csi-provisioner": "v0.2.1",
|
|
"driver-registrar": "v0.2.0",
|
|
}
|
|
var csiImageVersion string
|
|
var csiImageRegistry string
|
|
|
|
func init() {
|
|
flag.StringVar(&csiImageVersion, "csiImageVersion", "", "overrides the default tag used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
|
|
flag.StringVar(&csiImageRegistry, "csiImageRegistry", "quay.io/k8scsi", "overrides the default repository used for hostpathplugin/csi-attacher/csi-provisioner/driver-registrar images")
|
|
}
|
|
|
|
func csiContainerImage(image string) string {
|
|
var fullName string
|
|
fullName += csiImageRegistry + "/" + image + ":"
|
|
if csiImageVersion != "" {
|
|
fullName += csiImageVersion
|
|
} else {
|
|
fullName += csiImageVersions[image]
|
|
}
|
|
return fullName
|
|
}
|
|
|
|
// Create the driver registrar cluster role if it doesn't exist, no teardown so that tests
|
|
// are parallelizable. This role will be shared with many of the CSI tests.
|
|
func csiDriverRegistrarClusterRole(
|
|
config framework.VolumeTestConfig,
|
|
) *rbacv1.ClusterRole {
|
|
// TODO(Issue: #62237) Remove impersonation workaround and cluster role when issue resolved
|
|
By("Creating an impersonating superuser kubernetes clientset to define cluster role")
|
|
rc, err := framework.LoadConfig()
|
|
framework.ExpectNoError(err)
|
|
rc.Impersonate = restclient.ImpersonationConfig{
|
|
UserName: "superuser",
|
|
Groups: []string{"system:masters"},
|
|
}
|
|
superuserClientset, err := clientset.NewForConfig(rc)
|
|
framework.ExpectNoError(err, "Failed to create superuser clientset: %v", err)
|
|
By("Creating the CSI driver registrar cluster role")
|
|
clusterRoleClient := superuserClientset.RbacV1().ClusterRoles()
|
|
role := &rbacv1.ClusterRole{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: csiDriverRegistrarClusterRoleName,
|
|
},
|
|
Rules: []rbacv1.PolicyRule{
|
|
|
|
{
|
|
APIGroups: []string{""},
|
|
Resources: []string{"events"},
|
|
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
|
|
},
|
|
{
|
|
APIGroups: []string{""},
|
|
Resources: []string{"nodes"},
|
|
Verbs: []string{"get", "update", "patch"},
|
|
},
|
|
},
|
|
}
|
|
|
|
ret, err := clusterRoleClient.Create(role)
|
|
if err != nil {
|
|
if apierrs.IsAlreadyExists(err) {
|
|
return ret
|
|
}
|
|
framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err)
|
|
}
|
|
|
|
return ret
|
|
}
|
|
|
|
func csiServiceAccount(
|
|
client clientset.Interface,
|
|
config framework.VolumeTestConfig,
|
|
componentName string,
|
|
teardown bool,
|
|
) *v1.ServiceAccount {
|
|
creatingString := "Creating"
|
|
if teardown {
|
|
creatingString = "Deleting"
|
|
}
|
|
By(fmt.Sprintf("%v a CSI service account for %v", creatingString, componentName))
|
|
serviceAccountName := config.Prefix + "-" + componentName + "-service-account"
|
|
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
|
|
sa := &v1.ServiceAccount{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: serviceAccountName,
|
|
},
|
|
}
|
|
|
|
serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{})
|
|
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
|
|
_, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{})
|
|
return apierrs.IsNotFound(err), nil
|
|
})
|
|
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
|
|
|
|
if teardown {
|
|
return nil
|
|
}
|
|
|
|
ret, err := serviceAccountClient.Create(sa)
|
|
if err != nil {
|
|
framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err)
|
|
}
|
|
|
|
return ret
|
|
}
|
|
|
|
func csiClusterRoleBindings(
|
|
client clientset.Interface,
|
|
config framework.VolumeTestConfig,
|
|
teardown bool,
|
|
sa *v1.ServiceAccount,
|
|
clusterRolesNames []string,
|
|
) {
|
|
bindingString := "Binding"
|
|
if teardown {
|
|
bindingString = "Unbinding"
|
|
}
|
|
By(fmt.Sprintf("%v cluster roles %v to the CSI service account %v", bindingString, clusterRolesNames, sa.GetName()))
|
|
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
|
|
for _, clusterRoleName := range clusterRolesNames {
|
|
|
|
binding := &rbacv1.ClusterRoleBinding{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: config.Prefix + "-" + clusterRoleName + "-" + config.Namespace + "-role-binding",
|
|
},
|
|
Subjects: []rbacv1.Subject{
|
|
{
|
|
Kind: "ServiceAccount",
|
|
Name: sa.GetName(),
|
|
Namespace: sa.GetNamespace(),
|
|
},
|
|
},
|
|
RoleRef: rbacv1.RoleRef{
|
|
Kind: "ClusterRole",
|
|
Name: clusterRoleName,
|
|
APIGroup: "rbac.authorization.k8s.io",
|
|
},
|
|
}
|
|
|
|
clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
|
|
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
|
|
_, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
|
|
return apierrs.IsNotFound(err), nil
|
|
})
|
|
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
|
|
|
|
if teardown {
|
|
return
|
|
}
|
|
|
|
_, err = clusterRoleBindingClient.Create(binding)
|
|
if err != nil {
|
|
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
|
|
}
|
|
}
|
|
}
|
|
|
|
func csiHostPathPod(
|
|
client clientset.Interface,
|
|
config framework.VolumeTestConfig,
|
|
teardown bool,
|
|
f *framework.Framework,
|
|
sa *v1.ServiceAccount,
|
|
) *v1.Pod {
|
|
podClient := client.CoreV1().Pods(config.Namespace)
|
|
|
|
priv := true
|
|
mountPropagation := v1.MountPropagationBidirectional
|
|
hostPathType := v1.HostPathDirectoryOrCreate
|
|
pod := &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: config.Prefix + "-pod",
|
|
Namespace: config.Namespace,
|
|
Labels: map[string]string{
|
|
"app": "hostpath-driver",
|
|
},
|
|
},
|
|
Spec: v1.PodSpec{
|
|
ServiceAccountName: sa.GetName(),
|
|
NodeName: config.ServerNodeName,
|
|
RestartPolicy: v1.RestartPolicyNever,
|
|
Containers: []v1.Container{
|
|
{
|
|
Name: "external-provisioner",
|
|
Image: csiContainerImage("csi-provisioner"),
|
|
ImagePullPolicy: v1.PullAlways,
|
|
Args: []string{
|
|
"--v=5",
|
|
"--provisioner=csi-hostpath",
|
|
"--csi-address=/csi/csi.sock",
|
|
},
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "socket-dir",
|
|
MountPath: "/csi",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Name: "driver-registrar",
|
|
Image: csiContainerImage("driver-registrar"),
|
|
ImagePullPolicy: v1.PullAlways,
|
|
Args: []string{
|
|
"--v=5",
|
|
"--csi-address=/csi/csi.sock",
|
|
},
|
|
Env: []v1.EnvVar{
|
|
{
|
|
Name: "KUBE_NODE_NAME",
|
|
ValueFrom: &v1.EnvVarSource{
|
|
FieldRef: &v1.ObjectFieldSelector{
|
|
FieldPath: "spec.nodeName",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "socket-dir",
|
|
MountPath: "/csi",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Name: "external-attacher",
|
|
Image: csiContainerImage("csi-attacher"),
|
|
ImagePullPolicy: v1.PullAlways,
|
|
Args: []string{
|
|
"--v=5",
|
|
"--csi-address=$(ADDRESS)",
|
|
},
|
|
Env: []v1.EnvVar{
|
|
{
|
|
Name: "ADDRESS",
|
|
Value: "/csi/csi.sock",
|
|
},
|
|
},
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "socket-dir",
|
|
MountPath: "/csi",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Name: "hostpath-driver",
|
|
Image: csiContainerImage("hostpathplugin"),
|
|
ImagePullPolicy: v1.PullAlways,
|
|
SecurityContext: &v1.SecurityContext{
|
|
Privileged: &priv,
|
|
},
|
|
Args: []string{
|
|
"--v=5",
|
|
"--endpoint=$(CSI_ENDPOINT)",
|
|
"--nodeid=$(KUBE_NODE_NAME)",
|
|
},
|
|
Env: []v1.EnvVar{
|
|
{
|
|
Name: "CSI_ENDPOINT",
|
|
Value: "unix://" + "/csi/csi.sock",
|
|
},
|
|
{
|
|
Name: "KUBE_NODE_NAME",
|
|
ValueFrom: &v1.EnvVarSource{
|
|
FieldRef: &v1.ObjectFieldSelector{
|
|
FieldPath: "spec.nodeName",
|
|
},
|
|
},
|
|
},
|
|
},
|
|
VolumeMounts: []v1.VolumeMount{
|
|
{
|
|
Name: "socket-dir",
|
|
MountPath: "/csi",
|
|
},
|
|
{
|
|
Name: "mountpoint-dir",
|
|
MountPath: "/var/lib/kubelet/pods",
|
|
MountPropagation: &mountPropagation,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
Volumes: []v1.Volume{
|
|
{
|
|
Name: "socket-dir",
|
|
VolumeSource: v1.VolumeSource{
|
|
HostPath: &v1.HostPathVolumeSource{
|
|
Path: "/var/lib/kubelet/plugins/csi-hostpath",
|
|
Type: &hostPathType,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
Name: "mountpoint-dir",
|
|
VolumeSource: v1.VolumeSource{
|
|
HostPath: &v1.HostPathVolumeSource{
|
|
Path: "/var/lib/kubelet/pods",
|
|
Type: &hostPathType,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
err := framework.DeletePodWithWait(f, client, pod)
|
|
framework.ExpectNoError(err, "Failed to delete pod %s/%s: %v",
|
|
pod.GetNamespace(), pod.GetName(), err)
|
|
|
|
if teardown {
|
|
return nil
|
|
}
|
|
|
|
ret, err := podClient.Create(pod)
|
|
if err != nil {
|
|
framework.ExpectNoError(err, "Failed to create %q pod: %v", pod.GetName(), err)
|
|
}
|
|
|
|
// Wait for pod to come up
|
|
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret))
|
|
return ret
|
|
}
|
|
|
|
func deployGCEPDCSIDriver(
|
|
client clientset.Interface,
|
|
config framework.VolumeTestConfig,
|
|
teardown bool,
|
|
f *framework.Framework,
|
|
nodeSA *v1.ServiceAccount,
|
|
controllerSA *v1.ServiceAccount,
|
|
) {
|
|
// Get API Objects from manifests
|
|
nodeds, err := manifest.DaemonSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml", config.Namespace)
|
|
framework.ExpectNoError(err, "Failed to create DaemonSet from manifest")
|
|
nodeds.Spec.Template.Spec.ServiceAccountName = nodeSA.GetName()
|
|
|
|
controllerss, err := manifest.StatefulSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml", config.Namespace)
|
|
framework.ExpectNoError(err, "Failed to create StatefulSet from manifest")
|
|
controllerss.Spec.Template.Spec.ServiceAccountName = controllerSA.GetName()
|
|
|
|
controllerservice, err := manifest.SvcFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml")
|
|
framework.ExpectNoError(err, "Failed to create Service from manifest")
|
|
|
|
// Got all objects from manifests now try to delete objects
|
|
err = client.CoreV1().Services(config.Namespace).Delete(controllerservice.GetName(), nil)
|
|
if err != nil {
|
|
if !apierrs.IsNotFound(err) {
|
|
framework.ExpectNoError(err, "Failed to delete Service: %v", controllerservice.GetName())
|
|
}
|
|
}
|
|
|
|
err = client.AppsV1().StatefulSets(config.Namespace).Delete(controllerss.Name, nil)
|
|
if err != nil {
|
|
if !apierrs.IsNotFound(err) {
|
|
framework.ExpectNoError(err, "Failed to delete StatefulSet: %v", controllerss.GetName())
|
|
}
|
|
}
|
|
err = client.AppsV1().DaemonSets(config.Namespace).Delete(nodeds.Name, nil)
|
|
if err != nil {
|
|
if !apierrs.IsNotFound(err) {
|
|
framework.ExpectNoError(err, "Failed to delete DaemonSet: %v", nodeds.GetName())
|
|
}
|
|
}
|
|
if teardown {
|
|
return
|
|
}
|
|
|
|
// Create new API Objects through client
|
|
_, err = client.CoreV1().Services(config.Namespace).Create(controllerservice)
|
|
framework.ExpectNoError(err, "Failed to create Service: %v", controllerservice.Name)
|
|
|
|
_, err = client.AppsV1().StatefulSets(config.Namespace).Create(controllerss)
|
|
framework.ExpectNoError(err, "Failed to create StatefulSet: %v", controllerss.Name)
|
|
|
|
_, err = client.AppsV1().DaemonSets(config.Namespace).Create(nodeds)
|
|
framework.ExpectNoError(err, "Failed to create DaemonSet: %v", nodeds.Name)
|
|
|
|
}
|