mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 15:58:37 +00:00
commit
2ecc71da47
@ -8,6 +8,7 @@ go_library(
|
||||
"authorizer_util.go",
|
||||
"cleanup.go",
|
||||
"crd_util.go",
|
||||
"create.go",
|
||||
"deployment_util.go",
|
||||
"exec_util.go",
|
||||
"flake_reporting_util.go",
|
||||
@ -78,7 +79,9 @@ go_library(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset:go_default_library",
|
||||
"//staging/src/k8s.io/apiextensions-apiserver/test/integration/fixtures:go_default_library",
|
||||
@ -125,6 +128,7 @@ go_library(
|
||||
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//test/e2e/framework/ginkgowrapper:go_default_library",
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/e2e/perftype:go_default_library",
|
||||
"//test/utils:go_default_library",
|
||||
|
569
test/e2e/framework/create.go
Normal file
569
test/e2e/framework/create.go
Normal file
@ -0,0 +1,569 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
rbac "k8s.io/api/rbac/v1"
|
||||
storage "k8s.io/api/storage/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/test/e2e/framework/testfiles"
|
||||
)
|
||||
|
||||
// LoadFromManifests loads .yaml or .json manifest files and returns
|
||||
// all items that it finds in them. It supports all items for which
|
||||
// there is a factory registered in Factories and .yaml files with
|
||||
// multiple items separated by "---". Files are accessed via the
|
||||
// "testfiles" package, which means they can come from a file system
|
||||
// or be built into the binary.
|
||||
//
|
||||
// LoadFromManifests has some limitations:
|
||||
// - aliases are not supported (i.e. use serviceAccountName instead of the deprecated serviceAccount,
|
||||
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#podspec-v1-core)
|
||||
// and silently ignored
|
||||
// - the latest stable API version for each item is used, regardless of what
|
||||
// is specified in the manifest files
|
||||
func (f *Framework) LoadFromManifests(files ...string) ([]interface{}, error) {
|
||||
var items []interface{}
|
||||
err := visitManifests(func(data []byte) error {
|
||||
// Ignore any additional fields for now, just determine what we have.
|
||||
var what What
|
||||
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), data, &what); err != nil {
|
||||
return errors.Wrap(err, "decode TypeMeta")
|
||||
}
|
||||
|
||||
factory := Factories[what]
|
||||
if factory == nil {
|
||||
return errors.Errorf("item of type %+v not supported", what)
|
||||
}
|
||||
|
||||
object := factory.New()
|
||||
if err := runtime.DecodeInto(legacyscheme.Codecs.UniversalDecoder(), data, object); err != nil {
|
||||
return errors.Wrapf(err, "decode %+v", what)
|
||||
}
|
||||
items = append(items, object)
|
||||
return nil
|
||||
}, files...)
|
||||
|
||||
return items, err
|
||||
}
|
||||
|
||||
func visitManifests(cb func([]byte) error, files ...string) error {
|
||||
for _, fileName := range files {
|
||||
Logf("parsing %s", fileName)
|
||||
data, err := testfiles.Read(fileName)
|
||||
if err != nil {
|
||||
Failf("reading manifest file: %v", err)
|
||||
}
|
||||
|
||||
// Split at the "---" separator before working on
|
||||
// individual item. Only works for .yaml.
|
||||
//
|
||||
// We need to split ourselves because we need access
|
||||
// to each original chunk of data for
|
||||
// runtime.DecodeInto. kubectl has its own
|
||||
// infrastructure for this, but that is a lot of code
|
||||
// with many dependencies.
|
||||
items := bytes.Split(data, []byte("\n---"))
|
||||
|
||||
for _, item := range items {
|
||||
if err := cb(item); err != nil {
|
||||
return errors.Wrap(err, fileName)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PatchItems modifies the given items in place such that each test
|
||||
// gets its own instances, to avoid conflicts between different tests
|
||||
// and between tests and normal deployments.
|
||||
//
|
||||
// This is done by:
|
||||
// - creating namespaced items inside the test's namespace
|
||||
// - changing the name of non-namespaced items like ClusterRole
|
||||
//
|
||||
// PatchItems has some limitations:
|
||||
// - only some common items are supported, unknown ones trigger an error
|
||||
// - only the latest stable API version for each item is supported
|
||||
func (f *Framework) PatchItems(items ...interface{}) error {
|
||||
for _, item := range items {
|
||||
Logf("patching original content of %T:\n%s", item, PrettyPrint(item))
|
||||
if err := f.patchItemRecursively(item); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateItems creates the items. Each of them must be an API object
|
||||
// of a type that is registered in Factory.
|
||||
//
|
||||
// It returns either a cleanup function or an error, but never both.
|
||||
//
|
||||
// Cleaning up after a test can be triggered in two ways:
|
||||
// - the test invokes the returned cleanup function,
|
||||
// usually in an AfterEach
|
||||
// - the test suite terminates, potentially after
|
||||
// skipping the test's AfterEach (https://github.com/onsi/ginkgo/issues/222)
|
||||
//
|
||||
// PatchItems has the some limitations as LoadFromManifests:
|
||||
// - only some common items are supported, unknown ones trigger an error
|
||||
// - only the latest stable API version for each item is supported
|
||||
func (f *Framework) CreateItems(items ...interface{}) (func(), error) {
|
||||
var destructors []func() error
|
||||
var cleanupHandle CleanupActionHandle
|
||||
cleanup := func() {
|
||||
if cleanupHandle == nil {
|
||||
// Already done.
|
||||
return
|
||||
}
|
||||
RemoveCleanupAction(cleanupHandle)
|
||||
|
||||
// TODO (?): use same logic as framework.go for determining
|
||||
// whether we are expected to clean up? This would change the
|
||||
// meaning of the -delete-namespace and -delete-namespace-on-failure
|
||||
// command line flags, because they would also start to apply
|
||||
// to non-namespaced items.
|
||||
for _, destructor := range destructors {
|
||||
if err := destructor(); err != nil && !apierrs.IsNotFound(err) {
|
||||
Logf("deleting failed: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
cleanupHandle = AddCleanupAction(cleanup)
|
||||
|
||||
var result error
|
||||
for _, item := range items {
|
||||
// Each factory knows which item(s) it supports, so try each one.
|
||||
done := false
|
||||
Logf("creating %T:\n%s", item, PrettyPrint(item))
|
||||
for _, factory := range Factories {
|
||||
destructor, err := factory.Create(f, item)
|
||||
if destructor != nil {
|
||||
destructors = append(destructors, destructor)
|
||||
}
|
||||
if err == nil {
|
||||
done = true
|
||||
break
|
||||
} else if errors.Cause(err) != ItemNotSupported {
|
||||
result = err
|
||||
break
|
||||
}
|
||||
}
|
||||
if result == nil && !done {
|
||||
result = errors.Errorf("item of type %T not supported", item)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if result != nil {
|
||||
cleanup()
|
||||
return nil, result
|
||||
}
|
||||
|
||||
return cleanup, nil
|
||||
}
|
||||
|
||||
// CreateFromManifests is a combination of LoadFromManifests,
|
||||
// PatchItems, patching with an optional custom function,
|
||||
// and CreateItems.
|
||||
func (f *Framework) CreateFromManifests(patch func(item interface{}) error, files ...string) (func(), error) {
|
||||
items, err := f.LoadFromManifests(files...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "CreateFromManifests")
|
||||
}
|
||||
if err := f.PatchItems(items...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if patch != nil {
|
||||
for _, item := range items {
|
||||
if err := patch(item); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return f.CreateItems(items...)
|
||||
}
|
||||
|
||||
// What is a subset of metav1.TypeMeta which (in contrast to
|
||||
// metav1.TypeMeta itself) satisfies the runtime.Object interface.
|
||||
type What struct {
|
||||
Kind string `json:"kind"`
|
||||
}
|
||||
|
||||
func (in *What) DeepCopy() *What {
|
||||
return &What{Kind: in.Kind}
|
||||
}
|
||||
|
||||
func (in *What) DeepCopyInto(out *What) {
|
||||
out.Kind = in.Kind
|
||||
}
|
||||
|
||||
func (in *What) DeepCopyObject() runtime.Object {
|
||||
return &What{Kind: in.Kind}
|
||||
}
|
||||
|
||||
func (in *What) GetObjectKind() schema.ObjectKind {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ItemFactory provides support for creating one particular item.
|
||||
// The type gets exported because other packages might want to
|
||||
// extend the set of pre-defined factories.
|
||||
type ItemFactory interface {
|
||||
// New returns a new empty item.
|
||||
New() runtime.Object
|
||||
|
||||
// Create is responsible for creating the item. It returns an
|
||||
// error or a cleanup function for the created item.
|
||||
// If the item is of an unsupported type, it must return
|
||||
// an error that has ItemNotSupported as cause.
|
||||
Create(f *Framework, item interface{}) (func() error, error)
|
||||
}
|
||||
|
||||
// ItemNotSupported is the error that Create methods
|
||||
// must return or wrap when they don't support the given item.
|
||||
var ItemNotSupported = errors.New("not supported")
|
||||
|
||||
var Factories = map[What]ItemFactory{
|
||||
{"ClusterRole"}: &ClusterRoleFactory{},
|
||||
{"ClusterRoleBinding"}: &ClusterRoleBindingFactory{},
|
||||
{"DaemonSet"}: &DaemonSetFactory{},
|
||||
{"Role"}: &RoleFactory{},
|
||||
{"RoleBinding"}: &RoleBindingFactory{},
|
||||
{"ServiceAccount"}: &ServiceAccountFactory{},
|
||||
{"StatefulSet"}: &StatefulSetFactory{},
|
||||
{"StorageClass"}: &StorageClassFactory{},
|
||||
{"Service"}: &ServiceFactory{},
|
||||
}
|
||||
|
||||
// PatchName makes the name of some item unique by appending the
|
||||
// generated unique name.
|
||||
func (f *Framework) PatchName(item *string) {
|
||||
if *item != "" {
|
||||
*item = *item + "-" + f.UniqueName
|
||||
}
|
||||
}
|
||||
|
||||
// PatchNamespace moves the item into the test's namespace. Not
|
||||
// all items can be namespaced. For those, the name also needs to be
|
||||
// patched.
|
||||
func (f *Framework) PatchNamespace(item *string) {
|
||||
if f.Namespace != nil {
|
||||
*item = f.Namespace.GetName()
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) patchItemRecursively(item interface{}) error {
|
||||
switch item := item.(type) {
|
||||
case *rbac.Subject:
|
||||
f.PatchNamespace(&item.Namespace)
|
||||
case *rbac.RoleRef:
|
||||
// TODO: avoid hard-coding this special name. Perhaps add a Framework.PredefinedRoles
|
||||
// which contains all role names that are defined cluster-wide before the test starts?
|
||||
// All those names are excempt from renaming. That list could be populated by querying
|
||||
// and get extended by tests.
|
||||
if item.Name != "e2e-test-privileged-psp" {
|
||||
f.PatchName(&item.Name)
|
||||
}
|
||||
case *rbac.ClusterRole:
|
||||
f.PatchName(&item.Name)
|
||||
case *rbac.Role:
|
||||
f.PatchNamespace(&item.Namespace)
|
||||
// Roles are namespaced, but because for RoleRef above we don't
|
||||
// know whether the referenced role is a ClusterRole or Role
|
||||
// and therefore always renames, we have to do the same here.
|
||||
f.PatchName(&item.Name)
|
||||
case *storage.StorageClass:
|
||||
f.PatchName(&item.Name)
|
||||
case *v1.ServiceAccount:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *rbac.ClusterRoleBinding:
|
||||
f.PatchName(&item.Name)
|
||||
for i := range item.Subjects {
|
||||
if err := f.patchItemRecursively(&item.Subjects[i]); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
}
|
||||
if err := f.patchItemRecursively(&item.RoleRef); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
case *rbac.RoleBinding:
|
||||
f.PatchNamespace(&item.Namespace)
|
||||
for i := range item.Subjects {
|
||||
if err := f.patchItemRecursively(&item.Subjects[i]); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
}
|
||||
if err := f.patchItemRecursively(&item.RoleRef); err != nil {
|
||||
return errors.Wrapf(err, "%T", f)
|
||||
}
|
||||
case *v1.Service:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *apps.StatefulSet:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
case *apps.DaemonSet:
|
||||
f.PatchNamespace(&item.ObjectMeta.Namespace)
|
||||
default:
|
||||
return errors.Errorf("missing support for patching item of type %T", item)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// The individual factories all follow the same template, but with
|
||||
// enough differences in types and functions that copy-and-paste
|
||||
// looked like the least dirty approach. Perhaps one day Go will have
|
||||
// generics.
|
||||
|
||||
type ServiceAccountFactory struct{}
|
||||
|
||||
func (f *ServiceAccountFactory) New() runtime.Object {
|
||||
return &v1.ServiceAccount{}
|
||||
}
|
||||
|
||||
func (*ServiceAccountFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*v1.ServiceAccount)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
client := f.ClientSet.CoreV1().ServiceAccounts(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create ServiceAccount")
|
||||
}
|
||||
return func() error {
|
||||
Logf("deleting %T %s", item, item.GetName())
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type ClusterRoleFactory struct{}
|
||||
|
||||
func (f *ClusterRoleFactory) New() runtime.Object {
|
||||
return &rbac.ClusterRole{}
|
||||
}
|
||||
|
||||
func (*ClusterRoleFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbac.ClusterRole)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
// Impersonation is required for Kubernetes < 1.12, see
|
||||
// https://github.com/kubernetes/kubernetes/issues/62237#issuecomment-429315111
|
||||
//
|
||||
// This code is kept even for more recent Kubernetes, because users of
|
||||
// the framework outside of Kubernetes might run against an older version
|
||||
// of Kubernetes. It will be deprecated eventually.
|
||||
//
|
||||
// TODO: is this only needed for a ClusterRole or also for other non-namespaced
|
||||
// items?
|
||||
Logf("Creating an impersonating superuser kubernetes clientset to define cluster role")
|
||||
rc, err := LoadConfig()
|
||||
ExpectNoError(err)
|
||||
rc.Impersonate = restclient.ImpersonationConfig{
|
||||
UserName: "superuser",
|
||||
Groups: []string{"system:masters"},
|
||||
}
|
||||
superuserClientset, err := clientset.NewForConfig(rc)
|
||||
ExpectNoError(err, "create superuser clientset")
|
||||
|
||||
client := superuserClientset.RbacV1().ClusterRoles()
|
||||
if _, err = client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create ClusterRole")
|
||||
}
|
||||
return func() error {
|
||||
Logf("deleting %T %s", item, item.GetName())
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type ClusterRoleBindingFactory struct{}
|
||||
|
||||
func (f *ClusterRoleBindingFactory) New() runtime.Object {
|
||||
return &rbac.ClusterRoleBinding{}
|
||||
}
|
||||
|
||||
func (*ClusterRoleBindingFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbac.ClusterRoleBinding)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.RbacV1().ClusterRoleBindings()
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create ClusterRoleBinding")
|
||||
}
|
||||
return func() error {
|
||||
Logf("deleting %T %s", item, item.GetName())
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type RoleFactory struct{}
|
||||
|
||||
func (f *RoleFactory) New() runtime.Object {
|
||||
return &rbac.Role{}
|
||||
}
|
||||
|
||||
func (*RoleFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbac.Role)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.RbacV1().Roles(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create Role")
|
||||
}
|
||||
return func() error {
|
||||
Logf("deleting %T %s", item, item.GetName())
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type RoleBindingFactory struct{}
|
||||
|
||||
func (f *RoleBindingFactory) New() runtime.Object {
|
||||
return &rbac.RoleBinding{}
|
||||
}
|
||||
|
||||
func (*RoleBindingFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*rbac.RoleBinding)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.RbacV1().RoleBindings(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create RoleBinding")
|
||||
}
|
||||
return func() error {
|
||||
Logf("deleting %T %s", item, item.GetName())
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type ServiceFactory struct{}
|
||||
|
||||
func (f *ServiceFactory) New() runtime.Object {
|
||||
return &v1.Service{}
|
||||
}
|
||||
|
||||
func (*ServiceFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*v1.Service)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.CoreV1().Services(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create Service")
|
||||
}
|
||||
return func() error {
|
||||
Logf("deleting %T %s", item, item.GetName())
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type StatefulSetFactory struct{}
|
||||
|
||||
func (f *StatefulSetFactory) New() runtime.Object {
|
||||
return &apps.StatefulSet{}
|
||||
}
|
||||
|
||||
func (*StatefulSetFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*apps.StatefulSet)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.AppsV1().StatefulSets(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create StatefulSet")
|
||||
}
|
||||
return func() error {
|
||||
Logf("deleting %T %s", item, item.GetName())
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type DaemonSetFactory struct{}
|
||||
|
||||
func (f *DaemonSetFactory) New() runtime.Object {
|
||||
return &apps.DaemonSet{}
|
||||
}
|
||||
|
||||
func (*DaemonSetFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*apps.DaemonSet)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.AppsV1().DaemonSets(f.Namespace.GetName())
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create DaemonSet")
|
||||
}
|
||||
return func() error {
|
||||
Logf("deleting %T %s", item, item.GetName())
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
type StorageClassFactory struct{}
|
||||
|
||||
func (f *StorageClassFactory) New() runtime.Object {
|
||||
return &storage.StorageClass{}
|
||||
}
|
||||
|
||||
func (*StorageClassFactory) Create(f *Framework, i interface{}) (func() error, error) {
|
||||
item, ok := i.(*storage.StorageClass)
|
||||
if !ok {
|
||||
return nil, ItemNotSupported
|
||||
}
|
||||
|
||||
client := f.ClientSet.StorageV1().StorageClasses()
|
||||
if _, err := client.Create(item); err != nil {
|
||||
return nil, errors.Wrap(err, "create StorageClass")
|
||||
}
|
||||
return func() error {
|
||||
Logf("deleting %T %s", item, item.GetName())
|
||||
return client.Delete(item.GetName(), &metav1.DeleteOptions{})
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PrettyPrint returns a human-readable representation of an item.
|
||||
func PrettyPrint(item interface{}) string {
|
||||
data, err := json.MarshalIndent(item, "", " ")
|
||||
if err == nil {
|
||||
return string(data)
|
||||
}
|
||||
return fmt.Sprintf("%+v", item)
|
||||
}
|
@ -25,6 +25,7 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -68,6 +69,11 @@ const (
|
||||
type Framework struct {
|
||||
BaseName string
|
||||
|
||||
// Set together with creating the ClientSet and the namespace.
|
||||
// Guaranteed to be unique in the cluster even when running the same
|
||||
// test multiple times in parallel.
|
||||
UniqueName string
|
||||
|
||||
ClientSet clientset.Interface
|
||||
KubemarkExternalClusterClientSet clientset.Interface
|
||||
APIExtensionsClientSet apiextensionsclient.Interface
|
||||
@ -229,6 +235,10 @@ func (f *Framework) BeforeEach() {
|
||||
} else {
|
||||
Logf("Skipping waiting for service account")
|
||||
}
|
||||
f.UniqueName = f.Namespace.GetName()
|
||||
} else {
|
||||
// not guaranteed to be unique, but very likely
|
||||
f.UniqueName = fmt.Sprintf("%s-%08x", f.BaseName, rand.Int31())
|
||||
}
|
||||
|
||||
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" {
|
||||
|
@ -41,7 +41,6 @@ go_library(
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/extensions/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1beta1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||
@ -66,7 +65,6 @@ go_library(
|
||||
"//staging/src/k8s.io/apiserver/pkg/authentication/serviceaccount:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/rest:go_default_library",
|
||||
"//staging/src/k8s.io/csi-api/pkg/apis/csi/v1alpha1:go_default_library",
|
||||
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
|
||||
"//staging/src/k8s.io/csi-api/pkg/crd:go_default_library",
|
||||
@ -74,7 +72,6 @@ go_library(
|
||||
"//test/e2e/framework/metrics:go_default_library",
|
||||
"//test/e2e/framework/providers/gce:go_default_library",
|
||||
"//test/e2e/framework/testfiles:go_default_library",
|
||||
"//test/e2e/manifest:go_default_library",
|
||||
"//test/e2e/storage/drivers:go_default_library",
|
||||
"//test/e2e/storage/testsuites:go_default_library",
|
||||
"//test/e2e/storage/utils:go_default_library",
|
||||
|
@ -26,19 +26,14 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e/manifest"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
||||
@ -69,472 +64,6 @@ func csiContainerImage(image string) string {
|
||||
return fullName
|
||||
}
|
||||
|
||||
// Create the driver registrar cluster role if it doesn't exist, no teardown so that tests
|
||||
// are parallelizable. This role will be shared with many of the CSI tests.
|
||||
func csiDriverRegistrarClusterRole(
|
||||
config framework.VolumeTestConfig,
|
||||
) *rbacv1.ClusterRole {
|
||||
// TODO(Issue: #62237) Remove impersonation workaround and cluster role when issue resolved
|
||||
By("Creating an impersonating superuser kubernetes clientset to define cluster role")
|
||||
rc, err := framework.LoadConfig()
|
||||
framework.ExpectNoError(err)
|
||||
rc.Impersonate = restclient.ImpersonationConfig{
|
||||
UserName: "superuser",
|
||||
Groups: []string{"system:masters"},
|
||||
}
|
||||
superuserClientset, err := clientset.NewForConfig(rc)
|
||||
framework.ExpectNoError(err, "Failed to create superuser clientset: %v", err)
|
||||
By("Creating the CSI driver registrar cluster role")
|
||||
clusterRoleClient := superuserClientset.RbacV1().ClusterRoles()
|
||||
role := &rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: csiDriverRegistrarClusterRoleName,
|
||||
},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"events"},
|
||||
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{""},
|
||||
Resources: []string{"nodes"},
|
||||
Verbs: []string{"get", "update", "patch"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
ret, err := clusterRoleClient.Create(role)
|
||||
if err != nil {
|
||||
if apierrs.IsAlreadyExists(err) {
|
||||
return ret
|
||||
}
|
||||
framework.ExpectNoError(err, "Failed to create %s cluster role: %v", role.GetName(), err)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func csiServiceAccount(
|
||||
client clientset.Interface,
|
||||
config framework.VolumeTestConfig,
|
||||
componentName string,
|
||||
teardown bool,
|
||||
) *v1.ServiceAccount {
|
||||
creatingString := "Creating"
|
||||
if teardown {
|
||||
creatingString = "Deleting"
|
||||
}
|
||||
By(fmt.Sprintf("%v a CSI service account for %v", creatingString, componentName))
|
||||
serviceAccountName := config.Prefix + "-" + componentName + "-service-account"
|
||||
serviceAccountClient := client.CoreV1().ServiceAccounts(config.Namespace)
|
||||
sa := &v1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceAccountName,
|
||||
Namespace: config.Namespace,
|
||||
},
|
||||
}
|
||||
|
||||
serviceAccountClient.Delete(sa.GetName(), &metav1.DeleteOptions{})
|
||||
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
|
||||
_, err := serviceAccountClient.Get(sa.GetName(), metav1.GetOptions{})
|
||||
return apierrs.IsNotFound(err), nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
|
||||
|
||||
if teardown {
|
||||
return nil
|
||||
}
|
||||
|
||||
ret, err := serviceAccountClient.Create(sa)
|
||||
if err != nil {
|
||||
framework.ExpectNoError(err, "Failed to create %s service account: %v", sa.GetName(), err)
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func csiClusterRoleBindings(
|
||||
client clientset.Interface,
|
||||
config framework.VolumeTestConfig,
|
||||
teardown bool,
|
||||
sa *v1.ServiceAccount,
|
||||
clusterRolesNames []string,
|
||||
) {
|
||||
bindingString := "Binding"
|
||||
if teardown {
|
||||
bindingString = "Unbinding"
|
||||
}
|
||||
By(fmt.Sprintf("%v cluster roles %v to the CSI service account %v", bindingString, clusterRolesNames, sa.GetName()))
|
||||
clusterRoleBindingClient := client.RbacV1().ClusterRoleBindings()
|
||||
for _, clusterRoleName := range clusterRolesNames {
|
||||
binding := &rbacv1.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterRoleName + "-" + config.Namespace + "-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: rbacv1.ServiceAccountKind,
|
||||
Name: sa.GetName(),
|
||||
Namespace: sa.GetNamespace(),
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "ClusterRole",
|
||||
Name: clusterRoleName,
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
}
|
||||
|
||||
clusterRoleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
|
||||
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
|
||||
_, err := clusterRoleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
|
||||
return apierrs.IsNotFound(err), nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
|
||||
|
||||
if teardown {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = clusterRoleBindingClient.Create(binding)
|
||||
if err != nil {
|
||||
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func csiControllerRole(
|
||||
client clientset.Interface,
|
||||
config framework.VolumeTestConfig,
|
||||
teardown bool,
|
||||
) string {
|
||||
action := "Creating"
|
||||
if teardown {
|
||||
action = "Deleting"
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("%v CSI controller role", action))
|
||||
|
||||
role, err := manifest.RoleFromManifest("test/e2e/testing-manifests/storage-csi/controller-role.yaml", config.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to create Role from manifest")
|
||||
|
||||
client.RbacV1().Roles(role.Namespace).Delete(role.Name, nil)
|
||||
err = wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
|
||||
_, err := client.RbacV1().Roles(role.Namespace).Get(role.Name, metav1.GetOptions{})
|
||||
return apierrs.IsNotFound(err), nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
|
||||
|
||||
if teardown {
|
||||
return role.Name
|
||||
}
|
||||
|
||||
_, err = client.RbacV1().Roles(role.Namespace).Create(role)
|
||||
if err != nil {
|
||||
framework.ExpectNoError(err, "Failed to create %s role binding: %v", role.Name, err)
|
||||
}
|
||||
return role.Name
|
||||
}
|
||||
|
||||
func csiControllerRoleBinding(
|
||||
client clientset.Interface,
|
||||
config framework.VolumeTestConfig,
|
||||
teardown bool,
|
||||
roleName string,
|
||||
sa *v1.ServiceAccount,
|
||||
) {
|
||||
bindingString := "Binding"
|
||||
if teardown {
|
||||
bindingString = "Unbinding"
|
||||
}
|
||||
By(fmt.Sprintf("%v roles %v to the CSI service account %v", bindingString, roleName, sa.GetName()))
|
||||
roleBindingClient := client.RbacV1().RoleBindings(config.Namespace)
|
||||
binding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-" + roleName + "-" + config.Namespace + "-role-binding",
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: sa.GetName(),
|
||||
Namespace: sa.GetNamespace(),
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
Kind: "Role",
|
||||
Name: roleName,
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
},
|
||||
}
|
||||
|
||||
roleBindingClient.Delete(binding.GetName(), &metav1.DeleteOptions{})
|
||||
err := wait.Poll(2*time.Second, 10*time.Minute, func() (bool, error) {
|
||||
_, err := roleBindingClient.Get(binding.GetName(), metav1.GetOptions{})
|
||||
return apierrs.IsNotFound(err), nil
|
||||
})
|
||||
framework.ExpectNoError(err, "Timed out waiting for deletion: %v", err)
|
||||
|
||||
if teardown {
|
||||
return
|
||||
}
|
||||
|
||||
_, err = roleBindingClient.Create(binding)
|
||||
if err != nil {
|
||||
framework.ExpectNoError(err, "Failed to create %s role binding: %v", binding.GetName(), err)
|
||||
}
|
||||
}
|
||||
|
||||
func csiHostPathPod(
|
||||
client clientset.Interface,
|
||||
config framework.VolumeTestConfig,
|
||||
teardown bool,
|
||||
f *framework.Framework,
|
||||
sa *v1.ServiceAccount,
|
||||
) *v1.Pod {
|
||||
priv := true
|
||||
mountPropagation := v1.MountPropagationBidirectional
|
||||
hostPathType := v1.HostPathDirectoryOrCreate
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: config.Prefix + "-pod",
|
||||
Namespace: config.Namespace,
|
||||
Labels: map[string]string{
|
||||
"app": "hostpath-driver",
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
ServiceAccountName: sa.GetName(),
|
||||
NodeName: config.ServerNodeName,
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "external-provisioner",
|
||||
Image: csiContainerImage("csi-provisioner"),
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &priv,
|
||||
},
|
||||
Args: []string{
|
||||
"--v=5",
|
||||
"--provisioner=csi-hostpath",
|
||||
"--csi-address=/csi/csi.sock",
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "socket-dir",
|
||||
MountPath: "/csi",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "driver-registrar",
|
||||
Image: csiContainerImage("driver-registrar"),
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &priv,
|
||||
},
|
||||
Args: []string{
|
||||
"--v=5",
|
||||
"--csi-address=/csi/csi.sock",
|
||||
"--kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock",
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "KUBE_NODE_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "socket-dir",
|
||||
MountPath: "/csi",
|
||||
},
|
||||
{
|
||||
Name: "registration-dir",
|
||||
MountPath: "/registration",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "external-attacher",
|
||||
Image: csiContainerImage("csi-attacher"),
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &priv,
|
||||
},
|
||||
Args: []string{
|
||||
"--v=5",
|
||||
"--csi-address=$(ADDRESS)",
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "ADDRESS",
|
||||
Value: "/csi/csi.sock",
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "socket-dir",
|
||||
MountPath: "/csi",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "hostpath-driver",
|
||||
Image: csiContainerImage("hostpathplugin"),
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &priv,
|
||||
},
|
||||
Args: []string{
|
||||
"--v=5",
|
||||
"--endpoint=$(CSI_ENDPOINT)",
|
||||
"--nodeid=$(KUBE_NODE_NAME)",
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
Name: "CSI_ENDPOINT",
|
||||
Value: "unix://" + "/csi/csi.sock",
|
||||
},
|
||||
{
|
||||
Name: "KUBE_NODE_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "socket-dir",
|
||||
MountPath: "/csi",
|
||||
},
|
||||
{
|
||||
Name: "mountpoint-dir",
|
||||
MountPath: "/var/lib/kubelet/pods",
|
||||
MountPropagation: &mountPropagation,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "socket-dir",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/var/lib/kubelet/plugins/csi-hostpath",
|
||||
Type: &hostPathType,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "registration-dir",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/var/lib/kubelet/plugins",
|
||||
Type: &hostPathType,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "mountpoint-dir",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: "/var/lib/kubelet/pods",
|
||||
Type: &hostPathType,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := framework.DeletePodWithWait(f, client, pod)
|
||||
framework.ExpectNoError(err, "Failed to delete pod %s/%s: %v",
|
||||
pod.GetNamespace(), pod.GetName(), err)
|
||||
|
||||
if teardown {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Creating the pod can fail initially while the service
|
||||
// account's secret isn't provisioned yet ('No API token found
|
||||
// for service account "csi-service-account", retry after the
|
||||
// token is automatically created and added to the service
|
||||
// account', see https://github.com/kubernetes/kubernetes/issues/68776).
|
||||
// We could use a DaemonSet, but then the name of the csi-pod changes
|
||||
// during each test run. It's simpler to just try for a while here.
|
||||
podClient := f.PodClient()
|
||||
ret := podClient.CreateEventually(pod)
|
||||
|
||||
// Wait for pod to come up
|
||||
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(client, ret))
|
||||
return ret
|
||||
}
|
||||
|
||||
func deployGCEPDCSIDriver(
|
||||
client clientset.Interface,
|
||||
config framework.VolumeTestConfig,
|
||||
teardown bool,
|
||||
f *framework.Framework,
|
||||
nodeSA *v1.ServiceAccount,
|
||||
controllerSA *v1.ServiceAccount,
|
||||
) {
|
||||
// Get API Objects from manifests
|
||||
nodeds, err := manifest.DaemonSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml", config.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to create DaemonSet from manifest")
|
||||
nodeds.Spec.Template.Spec.ServiceAccountName = nodeSA.GetName()
|
||||
|
||||
controllerss, err := manifest.StatefulSetFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml", config.Namespace)
|
||||
framework.ExpectNoError(err, "Failed to create StatefulSet from manifest")
|
||||
controllerss.Spec.Template.Spec.ServiceAccountName = controllerSA.GetName()
|
||||
|
||||
controllerservice, err := manifest.SvcFromManifest("test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml")
|
||||
framework.ExpectNoError(err, "Failed to create Service from manifest")
|
||||
|
||||
// Got all objects from manifests now try to delete objects
|
||||
err = client.CoreV1().Services(config.Namespace).Delete(controllerservice.GetName(), nil)
|
||||
if err != nil {
|
||||
if !apierrs.IsNotFound(err) {
|
||||
framework.ExpectNoError(err, "Failed to delete Service: %v", controllerservice.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
err = client.AppsV1().StatefulSets(config.Namespace).Delete(controllerss.Name, nil)
|
||||
if err != nil {
|
||||
if !apierrs.IsNotFound(err) {
|
||||
framework.ExpectNoError(err, "Failed to delete StatefulSet: %v", controllerss.GetName())
|
||||
}
|
||||
}
|
||||
err = client.AppsV1().DaemonSets(config.Namespace).Delete(nodeds.Name, nil)
|
||||
if err != nil {
|
||||
if !apierrs.IsNotFound(err) {
|
||||
framework.ExpectNoError(err, "Failed to delete DaemonSet: %v", nodeds.GetName())
|
||||
}
|
||||
}
|
||||
if teardown {
|
||||
return
|
||||
}
|
||||
|
||||
// Create new API Objects through client
|
||||
_, err = client.CoreV1().Services(config.Namespace).Create(controllerservice)
|
||||
framework.ExpectNoError(err, "Failed to create Service: %v", controllerservice.Name)
|
||||
|
||||
_, err = client.AppsV1().StatefulSets(config.Namespace).Create(controllerss)
|
||||
framework.ExpectNoError(err, "Failed to create StatefulSet: %v", controllerss.Name)
|
||||
|
||||
_, err = client.AppsV1().DaemonSets(config.Namespace).Create(nodeds)
|
||||
framework.ExpectNoError(err, "Failed to create DaemonSet: %v", nodeds.Name)
|
||||
|
||||
}
|
||||
|
||||
func createCSICRDs(c apiextensionsclient.Interface) {
|
||||
By("Creating CSI CRDs")
|
||||
crds := []*apiextensionsv1beta1.CustomResourceDefinition{
|
||||
|
@ -40,16 +40,10 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
const (
|
||||
csiExternalProvisionerClusterRoleName string = "system:csi-external-provisioner"
|
||||
csiExternalAttacherClusterRoleName string = "system:csi-external-attacher"
|
||||
csiDriverRegistrarClusterRoleName string = "csi-driver-registrar"
|
||||
)
|
||||
|
||||
type csiTestDriver interface {
|
||||
createCSIDriver()
|
||||
cleanupCSIDriver()
|
||||
createStorageClassTest(node v1.Node) testsuites.StorageClassTest
|
||||
createStorageClassTest() testsuites.StorageClassTest
|
||||
}
|
||||
|
||||
var csiTestDrivers = map[string]func(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver{
|
||||
@ -60,7 +54,7 @@ var csiTestDrivers = map[string]func(f *framework.Framework, config framework.Vo
|
||||
}
|
||||
|
||||
var _ = utils.SIGDescribe("CSI Volumes", func() {
|
||||
f := framework.NewDefaultFramework("csi-mock-plugin")
|
||||
f := framework.NewDefaultFramework("csi-volumes")
|
||||
|
||||
var (
|
||||
cs clientset.Interface
|
||||
@ -76,6 +70,7 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
||||
crdclient = f.APIExtensionsClientSet
|
||||
csics = f.CSIClientSet
|
||||
ns = f.Namespace
|
||||
|
||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||
node = nodes.Items[rand.Intn(len(nodes.Items))]
|
||||
config = framework.VolumeTestConfig{
|
||||
@ -86,7 +81,6 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
||||
ServerNodeName: node.Name,
|
||||
WaitForCompletion: true,
|
||||
}
|
||||
csiDriverRegistrarClusterRole(config)
|
||||
createCSICRDs(crdclient)
|
||||
})
|
||||
|
||||
@ -109,17 +103,23 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
||||
})
|
||||
|
||||
It("should provision storage", func() {
|
||||
t := driver.createStorageClassTest(node)
|
||||
t := driver.createStorageClassTest()
|
||||
claim := newClaim(t, ns.GetName(), "")
|
||||
class := newStorageClass(t, ns.GetName(), "")
|
||||
claim.Spec.StorageClassName = &class.ObjectMeta.Name
|
||||
var class *storagev1.StorageClass
|
||||
if t.StorageClassName == "" {
|
||||
class = newStorageClass(t, ns.GetName(), "")
|
||||
claim.Spec.StorageClassName = &class.ObjectMeta.Name
|
||||
} else {
|
||||
scName := t.StorageClassName
|
||||
claim.Spec.StorageClassName = &scName
|
||||
}
|
||||
testsuites.TestDynamicProvisioning(t, cs, claim, class)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
// Use [Serial], because there can be only one CSIDriver for csi-hostpath driver.
|
||||
Context("CSI attach test using HostPath driver [Serial][Feature:CSISkipAttach]", func() {
|
||||
// The CSIDriverRegistry feature gate is needed for this test in Kubernetes 1.12.
|
||||
Context("CSI attach test using HostPath driver [Feature:CSISkipAttach]", func() {
|
||||
var (
|
||||
driver csiTestDriver
|
||||
)
|
||||
@ -161,14 +161,14 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
||||
test := t
|
||||
It(test.name, func() {
|
||||
if test.driverExists {
|
||||
driver := createCSIDriver(csics, test.driverAttachable)
|
||||
driver := createCSIDriver(csics, "csi-hostpath-"+f.UniqueName, test.driverAttachable)
|
||||
if driver != nil {
|
||||
defer csics.CsiV1alpha1().CSIDrivers().Delete(driver.Name, nil)
|
||||
}
|
||||
}
|
||||
|
||||
By("Creating pod")
|
||||
t := driver.createStorageClassTest(node)
|
||||
t := driver.createStorageClassTest()
|
||||
class, claim, pod := startPausePod(cs, t, ns.Name)
|
||||
if class != nil {
|
||||
defer cs.StorageV1().StorageClasses().Delete(class.Name, nil)
|
||||
@ -210,11 +210,11 @@ var _ = utils.SIGDescribe("CSI Volumes", func() {
|
||||
})
|
||||
})
|
||||
|
||||
func createCSIDriver(csics csiclient.Interface, attachable bool) *csiv1alpha1.CSIDriver {
|
||||
func createCSIDriver(csics csiclient.Interface, name string, attachable bool) *csiv1alpha1.CSIDriver {
|
||||
By("Creating CSIDriver instance")
|
||||
driver := &csiv1alpha1.CSIDriver{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "csi-hostpath",
|
||||
Name: name,
|
||||
},
|
||||
Spec: csiv1alpha1.CSIDriverSpec{
|
||||
AttachRequired: &attachable,
|
||||
@ -295,68 +295,75 @@ func startPausePod(cs clientset.Interface, t testsuites.StorageClassTest, ns str
|
||||
}
|
||||
|
||||
type hostpathCSIDriver struct {
|
||||
combinedClusterRoleNames []string
|
||||
serviceAccount *v1.ServiceAccount
|
||||
|
||||
f *framework.Framework
|
||||
config framework.VolumeTestConfig
|
||||
f *framework.Framework
|
||||
config framework.VolumeTestConfig
|
||||
cleanup func()
|
||||
}
|
||||
|
||||
func initCSIHostpath(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
|
||||
return &hostpathCSIDriver{
|
||||
combinedClusterRoleNames: []string{
|
||||
csiExternalAttacherClusterRoleName,
|
||||
csiExternalProvisionerClusterRoleName,
|
||||
csiDriverRegistrarClusterRoleName,
|
||||
},
|
||||
f: f,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) createStorageClassTest(node v1.Node) testsuites.StorageClassTest {
|
||||
func (h *hostpathCSIDriver) createStorageClassTest() testsuites.StorageClassTest {
|
||||
return testsuites.StorageClassTest{
|
||||
Name: "csi-hostpath",
|
||||
Provisioner: "csi-hostpath",
|
||||
Parameters: map[string]string{},
|
||||
ClaimSize: "1Gi",
|
||||
ExpectedSize: "1Gi",
|
||||
NodeName: node.Name,
|
||||
|
||||
// The hostpath driver only works when everything runs on a single node.
|
||||
NodeName: h.config.ServerNodeName,
|
||||
|
||||
// Provisioner and storage class name must match what's used in
|
||||
// csi-storageclass.yaml, plus the test-specific suffix.
|
||||
Provisioner: "csi-hostpath-" + h.f.UniqueName,
|
||||
StorageClassName: "csi-hostpath-sc-" + h.f.UniqueName,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) createCSIDriver() {
|
||||
By("deploying csi hostpath driver")
|
||||
f := h.f
|
||||
cs := f.ClientSet
|
||||
config := h.config
|
||||
h.serviceAccount = csiServiceAccount(cs, config, "hostpath", false)
|
||||
csiClusterRoleBindings(cs, config, false, h.serviceAccount, h.combinedClusterRoleNames)
|
||||
role := csiControllerRole(cs, config, false)
|
||||
csiControllerRoleBinding(cs, config, false, role, h.serviceAccount)
|
||||
csiHostPathPod(cs, config, false, f, h.serviceAccount)
|
||||
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
|
||||
// settings are ignored for this test. We could patch the image definitions.
|
||||
o := utils.PatchCSIOptions{
|
||||
OldDriverName: "csi-hostpath",
|
||||
NewDriverName: "csi-hostpath-" + h.f.UniqueName,
|
||||
DriverContainerName: "hostpath",
|
||||
ProvisionerContainerName: "csi-provisioner",
|
||||
NodeName: h.config.ServerNodeName,
|
||||
}
|
||||
cleanup, err := h.f.CreateFromManifests(func(item interface{}) error {
|
||||
return utils.PatchCSIDeployment(h.f, o, item)
|
||||
},
|
||||
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-attacher.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpath-provisioner.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/csi-hostpathplugin.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/hostpath/hostpath/e2e-test-rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/hostpath/usage/csi-storageclass.yaml",
|
||||
)
|
||||
h.cleanup = cleanup
|
||||
if err != nil {
|
||||
framework.Failf("deploying csi hostpath driver: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hostpathCSIDriver) cleanupCSIDriver() {
|
||||
By("uninstalling csi hostpath driver")
|
||||
f := h.f
|
||||
cs := f.ClientSet
|
||||
config := h.config
|
||||
csiHostPathPod(cs, config, true, f, h.serviceAccount)
|
||||
csiClusterRoleBindings(cs, config, true, h.serviceAccount, h.combinedClusterRoleNames)
|
||||
role := csiControllerRole(cs, config, true)
|
||||
csiControllerRoleBinding(cs, config, true, role, h.serviceAccount)
|
||||
csiServiceAccount(cs, config, "hostpath", true)
|
||||
if h.cleanup != nil {
|
||||
By("uninstalling csi hostpath driver")
|
||||
h.cleanup()
|
||||
}
|
||||
}
|
||||
|
||||
type gcePDCSIDriver struct {
|
||||
controllerClusterRoles []string
|
||||
nodeClusterRoles []string
|
||||
controllerServiceAccount *v1.ServiceAccount
|
||||
nodeServiceAccount *v1.ServiceAccount
|
||||
|
||||
f *framework.Framework
|
||||
config framework.VolumeTestConfig
|
||||
f *framework.Framework
|
||||
config framework.VolumeTestConfig
|
||||
cleanup func()
|
||||
}
|
||||
|
||||
func initCSIgcePD(f *framework.Framework, config framework.VolumeTestConfig) csiTestDriver {
|
||||
@ -370,21 +377,15 @@ func initCSIgcePD(f *framework.Framework, config framework.VolumeTestConfig) csi
|
||||
framework.SkipUnlessSecretExistsAfterWait(cs, "cloud-sa", config.Namespace, 3*time.Minute)
|
||||
|
||||
return &gcePDCSIDriver{
|
||||
nodeClusterRoles: []string{
|
||||
csiDriverRegistrarClusterRoleName,
|
||||
},
|
||||
controllerClusterRoles: []string{
|
||||
csiExternalAttacherClusterRoleName,
|
||||
csiExternalProvisionerClusterRoleName,
|
||||
},
|
||||
f: f,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriver) createStorageClassTest(node v1.Node) testsuites.StorageClassTest {
|
||||
func (g *gcePDCSIDriver) createStorageClassTest() testsuites.StorageClassTest {
|
||||
return testsuites.StorageClassTest{
|
||||
Name: "com.google.csi.gcepd",
|
||||
Name: "com.google.csi.gcepd",
|
||||
// *Not* renaming the driver, see below.
|
||||
Provisioner: "com.google.csi.gcepd",
|
||||
Parameters: map[string]string{"type": "pd-standard"},
|
||||
ClaimSize: "5Gi",
|
||||
@ -394,34 +395,38 @@ func (g *gcePDCSIDriver) createStorageClassTest(node v1.Node) testsuites.Storage
|
||||
|
||||
func (g *gcePDCSIDriver) createCSIDriver() {
|
||||
By("deploying gce-pd driver")
|
||||
f := g.f
|
||||
cs := f.ClientSet
|
||||
config := g.config
|
||||
g.controllerServiceAccount = csiServiceAccount(cs, config, "gce-controller", false /* teardown */)
|
||||
g.nodeServiceAccount = csiServiceAccount(cs, config, "gce-node", false /* teardown */)
|
||||
csiClusterRoleBindings(cs, config, false /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
|
||||
csiClusterRoleBindings(cs, config, false /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
|
||||
utils.PrivilegedTestPSPClusterRoleBinding(cs, config.Namespace, false, /* teardown */
|
||||
[]string{g.controllerServiceAccount.Name, g.nodeServiceAccount.Name})
|
||||
role := csiControllerRole(cs, config, false)
|
||||
csiControllerRoleBinding(cs, config, false, role, g.controllerServiceAccount)
|
||||
deployGCEPDCSIDriver(cs, config, false /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
|
||||
// It would be safer to rename the gcePD driver, but that
|
||||
// hasn't been done before either and attempts to do so now led to
|
||||
// errors during driver registration, therefore it is disabled
|
||||
// by passing a nil function below.
|
||||
//
|
||||
// These are the options which would have to be used:
|
||||
// o := utils.PatchCSIOptions{
|
||||
// OldDriverName: "com.google.csi.gcepd",
|
||||
// NewDriverName: "com.google.csi.gcepd-" + g.f.UniqueName,
|
||||
// DriverContainerName: "gce-driver",
|
||||
// ProvisionerContainerName: "csi-external-provisioner",
|
||||
// }
|
||||
cleanup, err := g.f.CreateFromManifests(nil,
|
||||
"test/e2e/testing-manifests/storage-csi/driver-registrar/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/external-attacher/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/external-provisioner/rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/gce-pd/csi-controller-rbac.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/gce-pd/node_ds.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/gce-pd/controller_ss.yaml",
|
||||
"test/e2e/testing-manifests/storage-csi/gce-pd/controller_service.yaml",
|
||||
)
|
||||
g.cleanup = cleanup
|
||||
if err != nil {
|
||||
framework.Failf("deploying csi hostpath driver: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriver) cleanupCSIDriver() {
|
||||
By("uninstalling gce-pd driver")
|
||||
f := g.f
|
||||
cs := f.ClientSet
|
||||
config := g.config
|
||||
deployGCEPDCSIDriver(cs, config, true /* teardown */, f, g.nodeServiceAccount, g.controllerServiceAccount)
|
||||
csiClusterRoleBindings(cs, config, true /* teardown */, g.controllerServiceAccount, g.controllerClusterRoles)
|
||||
csiClusterRoleBindings(cs, config, true /* teardown */, g.nodeServiceAccount, g.nodeClusterRoles)
|
||||
utils.PrivilegedTestPSPClusterRoleBinding(cs, config.Namespace, true, /* teardown */
|
||||
[]string{g.controllerServiceAccount.Name, g.nodeServiceAccount.Name})
|
||||
role := csiControllerRole(cs, config, true)
|
||||
csiControllerRoleBinding(cs, config, true, role, g.controllerServiceAccount)
|
||||
csiServiceAccount(cs, config, "gce-controller", true /* teardown */)
|
||||
csiServiceAccount(cs, config, "gce-node", true /* teardown */)
|
||||
if g.cleanup != nil {
|
||||
g.cleanup()
|
||||
}
|
||||
}
|
||||
|
||||
type gcePDCSIDriverExternal struct {
|
||||
@ -435,7 +440,7 @@ func initCSIgcePDExternal(f *framework.Framework, config framework.VolumeTestCon
|
||||
return &gcePDCSIDriverExternal{}
|
||||
}
|
||||
|
||||
func (g *gcePDCSIDriverExternal) createStorageClassTest(node v1.Node) testsuites.StorageClassTest {
|
||||
func (g *gcePDCSIDriverExternal) createStorageClassTest() testsuites.StorageClassTest {
|
||||
return testsuites.StorageClassTest{
|
||||
Name: "com.google.csi.gcepd",
|
||||
Provisioner: "com.google.csi.gcepd",
|
||||
|
@ -41,6 +41,7 @@ type StorageClassTest struct {
|
||||
Name string
|
||||
CloudProviders []string
|
||||
Provisioner string
|
||||
StorageClassName string
|
||||
Parameters map[string]string
|
||||
DelayBinding bool
|
||||
ClaimSize string
|
||||
|
@ -8,13 +8,16 @@ load(
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"deployment.go",
|
||||
"framework.go",
|
||||
"utils.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/test/e2e/storage/utils",
|
||||
deps = [
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/rbac/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
|
147
test/e2e/storage/utils/deployment.go
Normal file
147
test/e2e/storage/utils/deployment.go
Normal file
@ -0,0 +1,147 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package utils
|
||||
|
||||
import (
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
)
|
||||
|
||||
// PatchCSIDeployment modifies the CSI driver deployment:
|
||||
// - replaces the provisioner name
|
||||
// - forces pods onto a specific host
|
||||
//
|
||||
// All of that is optional, see PatchCSIOptions. Just beware
|
||||
// that not renaming the CSI driver deployment can be problematic:
|
||||
// - when multiple tests deploy the driver, they need
|
||||
// to run sequentially
|
||||
// - might conflict with manual deployments
|
||||
//
|
||||
// This function is written so that it works for CSI driver deployments
|
||||
// that follow these conventions:
|
||||
// - driver and provisioner names are identical
|
||||
// - the driver binary accepts a --drivername parameter
|
||||
// - the provisioner binary accepts a --provisioner parameter
|
||||
// - the paths inside the container are either fixed
|
||||
// and don't need to be patch (for example, --csi-address=/csi/csi.sock is
|
||||
// okay) or are specified directly in a parameter (for example,
|
||||
// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock)
|
||||
//
|
||||
// Driver deployments that are different will have to do the patching
|
||||
// without this function, or skip patching entirely.
|
||||
//
|
||||
// TODO (?): the storage.csi.image.version and storage.csi.image.registry
|
||||
// settings are ignored. We could patch the image definitions or deprecate
|
||||
// those options.
|
||||
func PatchCSIDeployment(f *framework.Framework, o PatchCSIOptions, object interface{}) error {
|
||||
rename := o.OldDriverName != "" && o.NewDriverName != "" &&
|
||||
o.OldDriverName != o.NewDriverName
|
||||
|
||||
patchVolumes := func(volumes []v1.Volume) {
|
||||
if !rename {
|
||||
return
|
||||
}
|
||||
for i := range volumes {
|
||||
volume := &volumes[i]
|
||||
if volume.HostPath != nil {
|
||||
// Update paths like /var/lib/kubelet/plugins/<provisioner>.
|
||||
p := &volume.HostPath.Path
|
||||
dir, file := path.Split(*p)
|
||||
if file == o.OldDriverName {
|
||||
*p = path.Join(dir, o.NewDriverName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
patchContainers := func(containers []v1.Container) {
|
||||
for i := range containers {
|
||||
container := &containers[i]
|
||||
if rename {
|
||||
for e := range container.Args {
|
||||
// Inject test-specific provider name into paths like this one:
|
||||
// --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
|
||||
container.Args[e] = strings.Replace(container.Args[e], "/"+o.OldDriverName+"/", "/"+o.NewDriverName+"/", 1)
|
||||
}
|
||||
}
|
||||
// Overwrite driver name resp. provider name
|
||||
// by appending a parameter with the right
|
||||
// value.
|
||||
switch container.Name {
|
||||
case o.DriverContainerName:
|
||||
container.Args = append(container.Args, "--drivername="+o.NewDriverName)
|
||||
case o.ProvisionerContainerName:
|
||||
// Driver name is expected to be the same
|
||||
// as the provisioner here.
|
||||
container.Args = append(container.Args, "--provisioner="+o.NewDriverName)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
patchPodSpec := func(spec *v1.PodSpec) {
|
||||
patchContainers(spec.Containers)
|
||||
patchVolumes(spec.Volumes)
|
||||
if o.NodeName != "" {
|
||||
spec.NodeName = o.NodeName
|
||||
}
|
||||
}
|
||||
|
||||
switch object := object.(type) {
|
||||
case *appsv1.ReplicaSet:
|
||||
patchPodSpec(&object.Spec.Template.Spec)
|
||||
case *appsv1.DaemonSet:
|
||||
patchPodSpec(&object.Spec.Template.Spec)
|
||||
case *appsv1.StatefulSet:
|
||||
patchPodSpec(&object.Spec.Template.Spec)
|
||||
case *appsv1.Deployment:
|
||||
patchPodSpec(&object.Spec.Template.Spec)
|
||||
case *storagev1.StorageClass:
|
||||
if o.NewDriverName != "" {
|
||||
// Driver name is expected to be the same
|
||||
// as the provisioner name here.
|
||||
object.Provisioner = o.NewDriverName
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// PatchCSIOptions controls how PatchCSIDeployment patches the objects.
|
||||
type PatchCSIOptions struct {
|
||||
// The original driver name.
|
||||
OldDriverName string
|
||||
// The driver name that replaces the original name.
|
||||
// Can be empty (not used at all) or equal to OldDriverName
|
||||
// (then it will be added were appropriate without renaming
|
||||
// in existing fields).
|
||||
NewDriverName string
|
||||
// The name of the container which has the CSI driver binary.
|
||||
// If non-empty, --drivername with the new name will be
|
||||
// appended to the argument list.
|
||||
DriverContainerName string
|
||||
// The name of the container which has the provisioner binary.
|
||||
// If non-empty, --provisioner with new name will be appended
|
||||
// to the argument list.
|
||||
ProvisionerContainerName string
|
||||
// If non-empty, all pods are forced to run on this node.
|
||||
NodeName string
|
||||
}
|
@ -1,11 +1,3 @@
|
||||
# Role for external CSI provisioner and attacher.
|
||||
# They need to modify Endpoints and ConfigMap for leader election.
|
||||
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-controller
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps", "endpoints"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
# Replaced by individual roles for external-attacher and external-provisioner:
|
||||
# - https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml
|
||||
# - https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml
|
||||
|
@ -0,0 +1 @@
|
||||
The original file is (or will be) https://github.com/kubernetes-csi/driver-registrar/blob/master/deploy/kubernetes/rbac.yaml
|
@ -0,0 +1,44 @@
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
# CSI provisioner.
|
||||
#
|
||||
# In production, each CSI driver deployment has to be customized:
|
||||
# - to avoid conflicts, use non-default namespace and different names
|
||||
# for non-namespaced entities like the ClusterRole
|
||||
# - decide whether the deployment replicates the external CSI
|
||||
# provisioner, in which case leadership election must be enabled;
|
||||
# this influences the RBAC setup, see below
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-driver-registrar
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: driver-registrar-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["get", "list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "update", "patch"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-driver-registrar-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-driver-registrar
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: driver-registrar-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
@ -0,0 +1 @@
|
||||
The original file is (or will be) https://github.com/kubernetes-csi/external-attacher/blob/master/deploy/kubernetes/rbac.yaml
|
@ -0,0 +1,81 @@
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
# CSI attacher.
|
||||
#
|
||||
# In production, each CSI driver deployment has to be customized:
|
||||
# - to avoid conflicts, use non-default namespace and different names
|
||||
# for non-namespaced entities like the ClusterRole
|
||||
# - decide whether the deployment replicates the external CSI
|
||||
# attacher, in which case leadership election must be enabled;
|
||||
# this influences the RBAC setup, see below
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-attacher
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
|
||||
---
|
||||
# Attacher must be able to work with PVs, nodes and VolumeAttachments
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-attacher-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: [""]
|
||||
resources: ["nodes"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["csi.storage.k8s.io"]
|
||||
resources: ["csinodeinfos"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["volumeattachments"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# Attacher must be able to work with config map in current namespace
|
||||
# if (and only if) leadership election is enabled
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
name: external-attacher-cfg
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-attacher-role-cfg
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: external-attacher-cfg
|
@ -0,0 +1 @@
|
||||
The original file is (or will be) https://github.com/kubernetes-csi/external-provisioner/blob/master/deploy/kubernetes/rbac.yaml
|
@ -0,0 +1,89 @@
|
||||
# This YAML file contains all RBAC objects that are necessary to run external
|
||||
# CSI provisioner.
|
||||
#
|
||||
# In production, each CSI driver deployment has to be customized:
|
||||
# - to avoid conflicts, use non-default namespace and different names
|
||||
# for non-namespaced entities like the ClusterRole
|
||||
# - decide whether the deployment replicates the external CSI
|
||||
# provisioner, in which case leadership election must be enabled;
|
||||
# this influences the RBAC setup, see below
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-provisioner
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: external-provisioner-runner
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["secrets"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumes"]
|
||||
verbs: ["get", "list", "watch", "create", "delete"]
|
||||
- apiGroups: [""]
|
||||
resources: ["persistentvolumeclaims"]
|
||||
verbs: ["get", "list", "watch", "update"]
|
||||
- apiGroups: ["storage.k8s.io"]
|
||||
resources: ["storageclasses"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources: ["events"]
|
||||
verbs: ["list", "watch", "create", "update", "patch"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshots"]
|
||||
verbs: ["get", "list"]
|
||||
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||
resources: ["volumesnapshotcontents"]
|
||||
verbs: ["get", "list"]
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-provisioner
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-provisioner-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
# Provisioner must be able to work with endpoints in current namespace
|
||||
# if (and only if) leadership election is enabled
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
name: external-provisioner-cfg
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["endpoints"]
|
||||
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-provisioner-role-cfg
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-provisioner
|
||||
# replace with non-default namespace name
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: external-provisioner-cfg
|
@ -13,29 +13,24 @@ spec:
|
||||
labels:
|
||||
app: csi-gce-pd-driver
|
||||
spec:
|
||||
serviceAccountName: csi-controller
|
||||
containers:
|
||||
- name: csi-external-provisioner
|
||||
imagePullPolicy: Always
|
||||
image: quay.io/k8scsi/csi-provisioner:v0.2.0
|
||||
image: quay.io/k8scsi/csi-provisioner:v0.4.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--provisioner=com.google.csi.gcepd"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
- name: csi-attacher
|
||||
imagePullPolicy: Always
|
||||
image: quay.io/k8scsi/csi-attacher:v0.2.0
|
||||
image: quay.io/k8scsi/csi-attacher:v0.4.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
volumeMounts:
|
||||
- name: socket-dir
|
||||
mountPath: /csi
|
||||
@ -44,11 +39,9 @@ spec:
|
||||
image: gcr.io/google-containers/volume-csi/gcp-compute-persistent-disk-csi-driver:v0.1.0.alpha
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--endpoint=unix:///csi/csi.sock"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
|
@ -0,0 +1,78 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: csi-controller
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-controller-attacher-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-controller
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-attacher-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-controller-attacher-role-cfg
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-controller
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: external-attacher-cfg
|
||||
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-controller-provisioner-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-controller
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: external-provisioner-runner
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
|
||||
---
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: csi-controller-provisioner-role-cfg
|
||||
namespace: default
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-controller
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: external-provisioner-cfg
|
||||
|
||||
---
|
||||
# priviledged Pod Security Policy, previously defined via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: psp-csi-controller-driver-registrar-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-controller
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-driver-registrar
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: e2e-test-privileged-psp
|
||||
apiGroup: rbac.authorization.k8s.io
|
@ -12,19 +12,16 @@ spec:
|
||||
labels:
|
||||
app: csi-gce-driver
|
||||
spec:
|
||||
serviceAccountName: csi-driver-registrar
|
||||
containers:
|
||||
- name: csi-driver-registrar
|
||||
imagePullPolicy: Always
|
||||
image: quay.io/k8scsi/driver-registrar:v0.3.0
|
||||
image: quay.io/k8scsi/driver-registrar:v0.4.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--kubelet-registration-path=$(DRIVER_REG_SOCK_PATH)"
|
||||
- "--csi-address=/csi/csi.sock"
|
||||
- "--kubelet-registration-path=/var/lib/kubelet/plugins/com.google.csi.gcepd/csi.sock"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
- name: DRIVER_REG_SOCK_PATH
|
||||
value: /var/lib/kubelet/plugins/com.google.csi.gcepd/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
@ -41,11 +38,9 @@ spec:
|
||||
image: gcr.io/google-containers/volume-csi/gcp-compute-persistent-disk-csi-driver:v0.1.0.alpha
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--endpoint=unix:///csi/csi.sock"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
|
@ -0,0 +1,5 @@
|
||||
A partial copy of https://github.com/kubernetes-csi/docs/tree/master/book/src/example,
|
||||
with some modifications:
|
||||
- serviceAccountName is used instead of the deprecated serviceAccount
|
||||
- the RBAC roles from driver-registrar, external-attacher and external-provisioner
|
||||
are used
|
@ -0,0 +1,48 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-hostpath-attacher
|
||||
labels:
|
||||
app: csi-hostpath-attacher
|
||||
spec:
|
||||
selector:
|
||||
app: csi-hostpath-attacher
|
||||
ports:
|
||||
- name: dummy
|
||||
port: 12345
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-hostpath-attacher
|
||||
spec:
|
||||
serviceName: "csi-hostpath-attacher"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-hostpath-attacher
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-hostpath-attacher
|
||||
spec:
|
||||
serviceAccountName: csi-attacher
|
||||
containers:
|
||||
- name: csi-attacher
|
||||
image: quay.io/k8scsi/csi-attacher:v0.4.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=$(ADDRESS)
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
@ -0,0 +1,49 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: csi-hostpath-provisioner
|
||||
labels:
|
||||
app: csi-hostpath-provisioner
|
||||
spec:
|
||||
selector:
|
||||
app: csi-hostpath-provisioner
|
||||
ports:
|
||||
- name: dummy
|
||||
port: 12345
|
||||
|
||||
---
|
||||
kind: StatefulSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-hostpath-provisioner
|
||||
spec:
|
||||
serviceName: "csi-hostpath-provisioner"
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-hostpath-provisioner
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-hostpath-provisioner
|
||||
spec:
|
||||
serviceAccountName: csi-provisioner
|
||||
containers:
|
||||
- name: csi-provisioner
|
||||
image: quay.io/k8scsi/csi-provisioner:v0.4.1
|
||||
args:
|
||||
- "--provisioner=csi-hostpath"
|
||||
- "--csi-address=$(ADDRESS)"
|
||||
- "--connection-timeout=15s"
|
||||
env:
|
||||
- name: ADDRESS
|
||||
value: /csi/csi.sock
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
@ -0,0 +1,70 @@
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: csi-hostpathplugin
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: csi-hostpathplugin
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: csi-hostpathplugin
|
||||
spec:
|
||||
serviceAccountName: csi-driver-registrar
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: driver-registrar
|
||||
image: quay.io/k8scsi/driver-registrar:v0.4.1
|
||||
args:
|
||||
- --v=5
|
||||
- --csi-address=/csi/csi.sock
|
||||
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
|
||||
env:
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
imagePullPolicy: Always
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /registration
|
||||
name: registration-dir
|
||||
- name: hostpath
|
||||
image: quay.io/k8scsi/hostpathplugin:v0.4.1
|
||||
args:
|
||||
- "--v=5"
|
||||
- "--endpoint=$(CSI_ENDPOINT)"
|
||||
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||
env:
|
||||
- name: CSI_ENDPOINT
|
||||
value: unix:///csi/csi.sock
|
||||
- name: KUBE_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
imagePullPolicy: Always
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /csi
|
||||
name: socket-dir
|
||||
- mountPath: /var/lib/kubelet/pods
|
||||
mountPropagation: Bidirectional
|
||||
name: mountpoint-dir
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||
type: DirectoryOrCreate
|
||||
name: socket-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/pods
|
||||
type: DirectoryOrCreate
|
||||
name: mountpoint-dir
|
||||
- hostPath:
|
||||
path: /var/lib/kubelet/plugins
|
||||
type: Directory
|
||||
name: registration-dir
|
@ -0,0 +1,19 @@
|
||||
# priviledged Pod Security Policy, previously defined just for gcePD via PrivilegedTestPSPClusterRoleBinding()
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: psp-csi-hostpath-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: csi-attacher
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-driver-registrar
|
||||
namespace: default
|
||||
- kind: ServiceAccount
|
||||
name: csi-provisioner
|
||||
namespace: default
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: e2e-test-privileged-psp
|
||||
apiGroup: rbac.authorization.k8s.io
|
@ -0,0 +1,7 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: csi-hostpath-sc
|
||||
provisioner: csi-hostpath
|
||||
reclaimPolicy: Delete
|
||||
volumeBindingMode: Immediate
|
Loading…
Reference in New Issue
Block a user