Merge pull request #68138 from wongma7/nfs-e2e-containerd

Automatic merge from submit-queue (batch tested with PRs 67736, 68123, 68138). If you want to cherry-pick this change to another branch, please follow the instructions here: https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md.

Update external provisioner test to use latest nfs-provisioner

**What this PR does / why we need it**: latest nfs-provisioner will work with cri-containerd, so let's update it

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #

**Special notes for your reviewer**: I want to move this test to use nfs-client-provisioner soon anyway since a lot of our e2e tests already use a containerized nfs server and it would be good to be consistent. So this can be treated as something of a stopgap but it would be nice to have ASAP to unblock https://github.com/kubernetes-incubator/external-storage/issues/432#issuecomment-417511065

**Release note**:

```release-note
NONE
```
This commit is contained in:
Kubernetes Submit Queue 2018-09-04 12:51:38 -07:00 committed by GitHub
commit be11540775
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 104 additions and 156 deletions

View File

@ -17,9 +17,9 @@ go_library(
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/storage/testpatterns:go_default_library",
"//test/e2e/storage/utils:go_default_library",
"//test/e2e/storage/vsphere:go_default_library",
"//test/utils/image:go_default_library",
"//vendor/github.com/onsi/ginkgo:go_default_library",

View File

@ -51,10 +51,10 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/storage/testpatterns"
"k8s.io/kubernetes/test/e2e/storage/utils"
vspheretest "k8s.io/kubernetes/test/e2e/storage/vsphere"
imageutils "k8s.io/kubernetes/test/utils/image"
)
@ -144,80 +144,7 @@ func (n *nfsDriver) CreateDriver() {
framework.ExpectNoError(err, "Failed to update authorization: %v", err)
By("creating an external dynamic provisioner pod")
n.externalProvisionerPod = startExternalProvisioner(cs, ns.Name, n.externalPluginName)
}
func startExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "external-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nfs-provisioner",
Image: "quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9",
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{"DAC_READ_SEARCH"},
},
},
Args: []string{
"-provisioner=" + externalPluginName,
"-grace-period=0",
},
Ports: []v1.ContainerPort{
{Name: "nfs", ContainerPort: 2049},
{Name: "mountd", ContainerPort: 20048},
{Name: "rpcbind", ContainerPort: 111},
{Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
VolumeMounts: []v1.VolumeMount{
{
Name: "export-volume",
MountPath: "/export",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "export-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
},
}
provisionerPod, err := podClient.Create(provisionerPod)
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
n.externalProvisionerPod = utils.StartExternalProvisioner(cs, ns.Name, n.externalPluginName)
}
func (n *nfsDriver) CleanupDriver() {

View File

@ -313,3 +313,76 @@ func RunInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
}()
framework.ExpectNoError(framework.WaitForPodSuccessInNamespaceSlow(c, pod.Name, pod.Namespace))
}
func StartExternalProvisioner(c clientset.Interface, ns string, externalPluginName string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "external-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nfs-provisioner",
Image: "quay.io/kubernetes_incubator/nfs-provisioner:v2.1.0-k8s1.11",
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{"DAC_READ_SEARCH"},
},
},
Args: []string{
"-provisioner=" + externalPluginName,
"-grace-period=0",
},
Ports: []v1.ContainerPort{
{Name: "nfs", ContainerPort: 2049},
{Name: "mountd", ContainerPort: 20048},
{Name: "rpcbind", ContainerPort: 111},
{Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
VolumeMounts: []v1.VolumeMount{
{
Name: "export-volume",
MountPath: "/export",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "export-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
},
}
provisionerPod, err := podClient.Create(provisionerPod)
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}

View File

@ -786,17 +786,38 @@ var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
Describe("DynamicProvisioner External", func() {
It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() {
// external dynamic provisioner pods need additional permissions provided by the
// persistent-volume-provisioner role
framework.BindClusterRole(c.RbacV1beta1(), "system:persistent-volume-provisioner", ns,
rbacv1beta1.Subject{Kind: rbacv1beta1.ServiceAccountKind, Namespace: ns, Name: "default"})
// persistent-volume-provisioner clusterrole and a leader-locking role
serviceAccountName := "default"
subject := rbacv1beta1.Subject{
Kind: rbacv1beta1.ServiceAccountKind,
Namespace: ns,
Name: serviceAccountName,
}
err := framework.WaitForAuthorizationUpdate(c.AuthorizationV1beta1(),
serviceaccount.MakeUsername(ns, "default"),
framework.BindClusterRole(c.RbacV1beta1(), "system:persistent-volume-provisioner", ns, subject)
roleName := "leader-locking-nfs-provisioner"
_, err := f.ClientSet.RbacV1beta1().Roles(ns).Create(&rbacv1beta1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Rules: []rbacv1beta1.PolicyRule{{
APIGroups: []string{""},
Resources: []string{"endpoints"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
}},
})
framework.ExpectNoError(err, "Failed to create leader-locking role")
framework.BindRoleInNamespace(c.RbacV1beta1(), roleName, ns, subject)
err = framework.WaitForAuthorizationUpdate(c.AuthorizationV1beta1(),
serviceaccount.MakeUsername(ns, serviceAccountName),
"", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true)
framework.ExpectNoError(err, "Failed to update authorization: %v", err)
framework.ExpectNoError(err, "Failed to update authorization")
By("creating an external dynamic provisioner pod")
pod := startExternalProvisioner(c, ns)
pod := utils.StartExternalProvisioner(c, ns, externalPluginName)
defer framework.DeletePodOrFail(c, ns, pod.Name)
By("creating a StorageClass")
@ -1375,79 +1396,6 @@ func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod {
return pod
}
func startExternalProvisioner(c clientset.Interface, ns string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "external-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nfs-provisioner",
Image: "quay.io/kubernetes_incubator/nfs-provisioner:v1.0.9",
SecurityContext: &v1.SecurityContext{
Capabilities: &v1.Capabilities{
Add: []v1.Capability{"DAC_READ_SEARCH"},
},
},
Args: []string{
"-provisioner=" + externalPluginName,
"-grace-period=0",
},
Ports: []v1.ContainerPort{
{Name: "nfs", ContainerPort: 2049},
{Name: "mountd", ContainerPort: 20048},
{Name: "rpcbind", ContainerPort: 111},
{Name: "rpcbind-udp", ContainerPort: 111, Protocol: v1.ProtocolUDP},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
VolumeMounts: []v1.VolumeMount{
{
Name: "export-volume",
MountPath: "/export",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "export-volume",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
},
}
provisionerPod, err := podClient.Create(provisionerPod)
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
// waitForProvisionedVolumesDelete is a polling wrapper to scan all PersistentVolumes for any associated to the test's
// StorageClass. Returns either an error and nil values or the remaining PVs and their count.
func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*v1.PersistentVolume, error) {