Merge pull request #111998 from pohly/e2e-framework-after-each

e2e framework: replace custom cleanup support with Ginkgo v2 mechanisms
This commit is contained in:
Kubernetes Prow Robot 2022-09-09 16:17:22 -07:00 committed by GitHub
commit d9f21e55df
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
20 changed files with 468 additions and 434 deletions

View File

@ -85,7 +85,6 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
var _ = ginkgo.SynchronizedAfterSuite(func() {
progressReporter.SetEndMsg()
CleanupSuite()
}, func() {
AfterSuiteActions()
})

View File

@ -1,78 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"reflect"
"runtime"
"sync"
)
// CleanupActionHandle is an integer pointer type for handling cleanup action
type CleanupActionHandle *int
type cleanupFuncHandle struct {
actionHandle CleanupActionHandle
actionHook func()
}
var cleanupActionsLock sync.Mutex
var cleanupHookList = []cleanupFuncHandle{}
// AddCleanupAction installs a function that will be called in the event of the
// whole test being terminated. This allows arbitrary pieces of the overall
// test to hook into SynchronizedAfterSuite().
// The hooks are called in last-in-first-out order.
func AddCleanupAction(fn func()) CleanupActionHandle {
p := CleanupActionHandle(new(int))
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
c := cleanupFuncHandle{actionHandle: p, actionHook: fn}
cleanupHookList = append([]cleanupFuncHandle{c}, cleanupHookList...)
return p
}
// RemoveCleanupAction removes a function that was installed by
// AddCleanupAction.
func RemoveCleanupAction(p CleanupActionHandle) {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
for i, item := range cleanupHookList {
if item.actionHandle == p {
cleanupHookList = append(cleanupHookList[:i], cleanupHookList[i+1:]...)
break
}
}
}
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
// not remove them (see RemoveCleanupAction) but it does run unlocked, so they
// may remove themselves.
func RunCleanupActions() {
list := []func(){}
func() {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
for _, p := range cleanupHookList {
list = append(list, p.actionHook)
}
}()
// Run unlocked.
for _, fn := range list {
Logf("Running Cleanup Action: %v", runtime.FuncForPC(reflect.ValueOf(fn).Pointer()).Name())
fn()
}
}

View File

@ -98,19 +98,6 @@ type Framework struct {
// Flaky operation failures in an e2e test can be captured through this.
flakeReport *FlakeReport
// To make sure that this framework cleans up after itself, no matter what,
// we install a Cleanup action before each test and clear it after. If we
// should abort, the AfterSuite hook should run all Cleanup actions.
cleanupHandle CleanupActionHandle
// afterEaches is a map of name to function to be called after each test. These are not
// cleared. The call order is randomized so that no dependencies can grow between
// the various afterEaches
afterEaches map[string]AfterEachActionFunc
// beforeEachStarted indicates that BeforeEach has started
beforeEachStarted bool
// configuration for framework's client
Options Options
@ -125,9 +112,6 @@ type Framework struct {
Timeouts *TimeoutContext
}
// AfterEachActionFunc is a function that can be called after each test
type AfterEachActionFunc func(f *Framework, failed bool)
// TestDataSummary is an interface for managing test data.
type TestDataSummary interface {
SummaryKind() string
@ -149,8 +133,10 @@ func NewFrameworkWithCustomTimeouts(baseName string, timeouts *TimeoutContext) *
return f
}
// NewDefaultFramework makes a new framework and sets up a BeforeEach/AfterEach for
// you (you can write additional before/after each functions).
// NewDefaultFramework makes a new framework and sets up a BeforeEach which
// initializes the framework instance. It cleans up with a DeferCleanup,
// which runs last, so a AfterEach in the test still has a valid framework
// instance.
func NewDefaultFramework(baseName string) *Framework {
options := Options{
ClientQPS: 20,
@ -169,72 +155,60 @@ func NewFramework(baseName string, options Options, client clientset.Interface)
Timeouts: NewTimeoutContextWithDefaults(),
}
f.AddAfterEach("dumpNamespaceInfo", func(f *Framework, failed bool) {
if !failed {
return
}
if !TestContext.DumpLogsOnFailure {
return
}
if !f.SkipNamespaceCreation {
for _, ns := range f.namespacesToDelete {
DumpAllNamespaceInfo(f.ClientSet, ns.Name)
}
}
})
ginkgo.BeforeEach(f.BeforeEach)
ginkgo.AfterEach(f.AfterEach)
return f
}
// BeforeEach gets a client and makes a namespace.
func (f *Framework) BeforeEach() {
f.beforeEachStarted = true
// DeferCleanup, in constrast to AfterEach, triggers execution in
// first-in-last-out order. This ensures that the framework instance
// remains valid as long as possible.
//
// In addition, AfterEach will not be called if a test never gets here.
ginkgo.DeferCleanup(f.AfterEach)
// The fact that we need this feels like a bug in ginkgo.
// https://github.com/onsi/ginkgo/v2/issues/222
f.cleanupHandle = AddCleanupAction(f.AfterEach)
if f.ClientSet == nil {
ginkgo.By("Creating a kubernetes client")
config, err := LoadConfig()
ExpectNoError(err)
// Registered later and thus runs before deleting namespaces.
ginkgo.DeferCleanup(f.dumpNamespaceInfo)
config.QPS = f.Options.ClientQPS
config.Burst = f.Options.ClientBurst
if f.Options.GroupVersion != nil {
config.GroupVersion = f.Options.GroupVersion
}
if TestContext.KubeAPIContentType != "" {
config.ContentType = TestContext.KubeAPIContentType
}
f.clientConfig = rest.CopyConfig(config)
f.ClientSet, err = clientset.NewForConfig(config)
ExpectNoError(err)
f.DynamicClient, err = dynamic.NewForConfig(config)
ExpectNoError(err)
ginkgo.By("Creating a kubernetes client")
config, err := LoadConfig()
ExpectNoError(err)
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
// as they are required when creating a REST client.
if config.GroupVersion == nil {
config.GroupVersion = &schema.GroupVersion{}
}
if config.NegotiatedSerializer == nil {
config.NegotiatedSerializer = scheme.Codecs
}
restClient, err := rest.RESTClientFor(config)
ExpectNoError(err)
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
ExpectNoError(err)
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
restMapper.Reset()
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
TestContext.CloudConfig.Provider.FrameworkBeforeEach(f)
config.QPS = f.Options.ClientQPS
config.Burst = f.Options.ClientBurst
if f.Options.GroupVersion != nil {
config.GroupVersion = f.Options.GroupVersion
}
if TestContext.KubeAPIContentType != "" {
config.ContentType = TestContext.KubeAPIContentType
}
f.clientConfig = rest.CopyConfig(config)
f.ClientSet, err = clientset.NewForConfig(config)
ExpectNoError(err)
f.DynamicClient, err = dynamic.NewForConfig(config)
ExpectNoError(err)
// create scales getter, set GroupVersion and NegotiatedSerializer to default values
// as they are required when creating a REST client.
if config.GroupVersion == nil {
config.GroupVersion = &schema.GroupVersion{}
}
if config.NegotiatedSerializer == nil {
config.NegotiatedSerializer = scheme.Codecs
}
restClient, err := rest.RESTClientFor(config)
ExpectNoError(err)
discoClient, err := discovery.NewDiscoveryClientForConfig(config)
ExpectNoError(err)
cachedDiscoClient := cacheddiscovery.NewMemCacheClient(discoClient)
restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cachedDiscoClient)
restMapper.Reset()
resolver := scaleclient.NewDiscoveryScaleKindResolver(cachedDiscoClient)
f.ScalesGetter = scaleclient.New(restClient, restMapper, dynamic.LegacyAPIPathResolverFunc, resolver)
TestContext.CloudConfig.Provider.FrameworkBeforeEach(f)
if !f.SkipNamespaceCreation {
ginkgo.By(fmt.Sprintf("Building a namespace api object, basename %s", f.BaseName))
@ -317,6 +291,22 @@ func (f *Framework) BeforeEach() {
f.flakeReport = NewFlakeReport()
}
func (f *Framework) dumpNamespaceInfo() {
if !ginkgo.CurrentSpecReport().Failed() {
return
}
if !TestContext.DumpLogsOnFailure {
return
}
ginkgo.By("dump namespace information after failure", func() {
if !f.SkipNamespaceCreation {
for _, ns := range f.namespacesToDelete {
DumpAllNamespaceInfo(f.ClientSet, ns.Name)
}
}
})
}
// printSummaries prints summaries of tests.
func printSummaries(summaries []TestDataSummary, testBaseName string) {
now := time.Now()
@ -354,29 +344,8 @@ func printSummaries(summaries []TestDataSummary, testBaseName string) {
}
}
// AddAfterEach is a way to add a function to be called after every test. The execution order is intentionally random
// to avoid growing dependencies. If you register the same name twice, it is a coding error and will panic.
func (f *Framework) AddAfterEach(name string, fn AfterEachActionFunc) {
if _, ok := f.afterEaches[name]; ok {
panic(fmt.Sprintf("%q is already registered", name))
}
if f.afterEaches == nil {
f.afterEaches = map[string]AfterEachActionFunc{}
}
f.afterEaches[name] = fn
}
// AfterEach deletes the namespace, after reading its events.
func (f *Framework) AfterEach() {
// If BeforeEach never started AfterEach should be skipped.
// Currently some tests under e2e/storage have this condition.
if !f.beforeEachStarted {
return
}
RemoveCleanupAction(f.cleanupHandle)
// This should not happen. Given ClientSet is a public field a test must have updated it!
// Error out early before any API calls during cleanup.
if f.ClientSet == nil {
@ -430,11 +399,6 @@ func (f *Framework) AfterEach() {
}
}()
// run all aftereach functions in random order to ensure no dependencies grow
for _, afterEachFn := range f.afterEaches {
afterEachFn(f, ginkgo.CurrentSpecReport().Failed())
}
if TestContext.GatherKubeSystemResourceUsageData != "false" && TestContext.GatherKubeSystemResourceUsageData != "none" && f.gatherer != nil {
ginkgo.By("Collecting resource usage data")
summary, resourceViolationError := f.gatherer.StopAndSummarize([]int{90, 99, 100}, f.AddonResourceConstraints)

View File

@ -29,7 +29,7 @@ import (
"k8s.io/kubernetes/test/e2e/framework"
)
func TestGinkgoOutput(t *testing.T, expected SuiteResults) {
func TestGinkgoOutput(t *testing.T, expected SuiteResults, runSpecsArgs ...interface{}) {
// Run the Ginkgo suite with spec results collected via ReportAfterEach
// in adddition to the default one. To see what the full
// Ginkgo output looks like, run this test with "go test -v".
@ -39,7 +39,7 @@ func TestGinkgoOutput(t *testing.T, expected SuiteResults) {
report = append(report, spec)
})
fakeT := &testing.T{}
ginkgo.RunSpecs(fakeT, "Logging Suite")
ginkgo.RunSpecs(fakeT, "Logging Suite", runSpecsArgs...)
// Now check the output.
actual := normalizeReport(report)
@ -103,9 +103,13 @@ var timePrefix = regexp.MustCompile(`(?m)^[[:alpha:]]{3} +[[:digit:]]{1,2} +[[:d
// elapsedSuffix matches "Elapsed: 16.189µs"
var elapsedSuffix = regexp.MustCompile(`Elapsed: [[:digit:]]+(\.[[:digit:]]+)?(µs|ns|ms|s|m)`)
// timeSuffix matches "09/06/22 15:36:43.445" as printed by Ginkgo v2 for log output.
var timeSuffix = regexp.MustCompile(`(?m)[[:space:]][[:digit:]]{2}/[[:digit:]]{2}/[[:digit:]]{2} [[:digit:]]{2}:[[:digit:]]{2}:[[:digit:]]{2}\.[[:digit:]]{1,3}$`)
func stripTimes(in string) string {
out := timePrefix.ReplaceAllString(in, "")
out = elapsedSuffix.ReplaceAllString(out, "Elapsed: <elapsed>")
out = timeSuffix.ReplaceAllString(out, "")
return out
}

View File

@ -0,0 +1,151 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cleanup
import (
"flag"
"regexp"
"testing"
"github.com/onsi/ginkgo/v2"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/internal/output"
testapiserver "k8s.io/kubernetes/test/utils/apiserver"
)
// The line number of the following code is checked in TestFailureOutput below.
// Be careful when moving it around or changing the import statements above.
// Here are some intentionally blank lines that can be removed to compensate
// for future additional import statements.
//
//
//
//
//
//
//
//
//
//
//
//
//
//
//
// This must be line #50.
var _ = ginkgo.Describe("framework", func() {
ginkgo.BeforeEach(func() {
framework.Logf("before")
})
f := framework.NewDefaultFramework("test-namespace")
ginkgo.AfterEach(func() {
framework.Logf("after")
if f.ClientSet == nil {
framework.Fail("Wrong order of cleanup operations: framework.AfterEach already ran and cleared f.ClientSet.")
}
})
ginkgo.It("works", func() {
// DeferCleanup invokes in first-in-last-out order
ginkgo.DeferCleanup(func() {
framework.Logf("cleanup last")
})
ginkgo.DeferCleanup(func() {
framework.Logf("cleanup first")
})
})
})
const (
ginkgoOutput = `[BeforeEach] framework
cleanup_test.go:53
INFO: before
[BeforeEach] framework
framework.go:xxx
STEP: Creating a kubernetes client
INFO: >>> kubeConfig: yyy/kube.config
STEP: Building a namespace api object, basename test-namespace
INFO: Skipping waiting for service account
[It] works
cleanup_test.go:66
[AfterEach] framework
cleanup_test.go:59
INFO: after
[DeferCleanup] framework
cleanup_test.go:71
INFO: cleanup first
[DeferCleanup] framework
cleanup_test.go:68
INFO: cleanup last
[DeferCleanup] framework
framework.go:xxx
[DeferCleanup] framework
framework.go:xxx
STEP: Destroying namespace "test-namespace-zzz" for this suite.
`
)
func TestCleanup(t *testing.T) {
apiServer := testapiserver.StartAPITestServer(t)
// This simulates how test/e2e uses the framework and how users
// invoke test/e2e.
framework.RegisterCommonFlags(flag.CommandLine)
framework.RegisterClusterFlags(flag.CommandLine)
for flagname, value := range map[string]string{
"kubeconfig": apiServer.KubeConfigFile,
// Some features are not supported by the fake cluster.
"e2e-verify-service-account": "false",
"allowed-not-ready-nodes": "-1",
// This simplifies the text comparison.
"ginkgo.no-color": "true",
} {
if err := flag.Set(flagname, value); err != nil {
t.Fatalf("set %s: %v", flagname, err)
}
}
framework.AfterReadingAllFlags(&framework.TestContext)
suiteConfig, reporterConfig := framework.CreateGinkgoConfig()
expected := output.SuiteResults{
output.TestResult{
Name: "framework works",
NormalizeOutput: normalizeOutput,
Output: ginkgoOutput,
},
}
output.TestGinkgoOutput(t, expected, suiteConfig, reporterConfig)
}
func normalizeOutput(output string) string {
for exp, replacement := range map[string]string{
// Ignore line numbers inside framework source code (likely to change).
`framework\.go:\d+`: `framework.go:xxx`,
// Config file name varies for each run.
`kubeConfig: .*/kube.config`: `kubeConfig: yyy/kube.config`,
// Random suffix for namespace.
`test-namespace-\d+`: `test-namespace-zzz`,
} {
output = regexp.MustCompile(exp).ReplaceAllString(output, replacement)
}
return output
}

View File

@ -35,11 +35,11 @@ import (
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
cliflag "k8s.io/component-base/cli/flag"
"k8s.io/klog/v2"
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
testapiserver "k8s.io/kubernetes/test/utils/apiserver"
)
const (
@ -424,44 +424,6 @@ func RegisterClusterFlags(flags *flag.FlagSet) {
flags.DurationVar(&nodeKiller.SimulatedDowntime, "node-killer-simulated-downtime", 10*time.Minute, "A delay between node death and recreation")
}
func createKubeConfig(clientCfg *restclient.Config) *clientcmdapi.Config {
clusterNick := "cluster"
userNick := "user"
contextNick := "context"
configCmd := clientcmdapi.NewConfig()
credentials := clientcmdapi.NewAuthInfo()
credentials.Token = clientCfg.BearerToken
credentials.TokenFile = clientCfg.BearerTokenFile
credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile
if len(credentials.ClientCertificate) == 0 {
credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData
}
credentials.ClientKey = clientCfg.TLSClientConfig.KeyFile
if len(credentials.ClientKey) == 0 {
credentials.ClientKeyData = clientCfg.TLSClientConfig.KeyData
}
configCmd.AuthInfos[userNick] = credentials
cluster := clientcmdapi.NewCluster()
cluster.Server = clientCfg.Host
cluster.CertificateAuthority = clientCfg.CAFile
if len(cluster.CertificateAuthority) == 0 {
cluster.CertificateAuthorityData = clientCfg.CAData
}
cluster.InsecureSkipTLSVerify = clientCfg.Insecure
configCmd.Clusters[clusterNick] = cluster
context := clientcmdapi.NewContext()
context.Cluster = clusterNick
context.AuthInfo = userNick
configCmd.Contexts[contextNick] = context
configCmd.CurrentContext = contextNick
return configCmd
}
// GenerateSecureToken returns a string of length tokenLen, consisting
// of random bytes encoded as base64 for use as a Bearer Token during
// communication with an APIServer
@ -498,7 +460,7 @@ func AfterReadingAllFlags(t *TestContextType) {
// Check if we can use the in-cluster config
if clusterConfig, err := restclient.InClusterConfig(); err == nil {
if tempFile, err := os.CreateTemp(os.TempDir(), "kubeconfig-"); err == nil {
kubeConfig := createKubeConfig(clusterConfig)
kubeConfig := testapiserver.CreateKubeConfig(clusterConfig)
clientcmd.WriteToFile(*kubeConfig, tempFile.Name())
t.KubeConfig = tempFile.Name()
klog.V(4).Infof("Using a temporary kubeconfig file from in-cluster config : %s", tempFile.Name())

View File

@ -987,8 +987,6 @@ func generateDriverCleanupFunc(
driverName, testns, driverns string,
driverCleanup, cancelLogging func()) func() {
cleanupHandle := new(framework.CleanupActionHandle)
// Cleanup CSI driver and namespaces. This function needs to be idempotent and can be
// concurrently called from defer (or AfterEach) and AfterSuite action hooks.
cleanupFunc := func() {
@ -1003,13 +1001,7 @@ func generateDriverCleanupFunc(
ginkgo.By(fmt.Sprintf("deleting the driver namespace: %s", driverns))
tryFunc(func() { f.DeleteNamespace(driverns) })
// cleanup function has already ran and hence we don't need to run it again.
// We do this as very last action because in-case defer(or AfterEach) races
// with AfterSuite and test routine gets killed then this block still
// runs in AfterSuite
framework.RemoveCleanupAction(*cleanupHandle)
}
*cleanupHandle = framework.AddCleanupAction(cleanupFunc)
return cleanupFunc
}

View File

@ -55,7 +55,6 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
resizableSc *storagev1.StorageClass
node *v1.Node
nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string
nodeLabelValue string
nodeKey string
@ -76,15 +75,11 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
framework.ExpectNoError(err)
nodeName = node.Name
nodeKey = "mounted_flexvolume_expand"
if !isNodeLabeled {
nodeLabelValue = ns
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel[nodeKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
isNodeLabeled = true
}
nodeKey = "mounted_flexvolume_expand_" + ns
nodeLabelValue = ns
nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue}
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
ginkgo.DeferCleanup(framework.RemoveLabelOffNode, c, nodeName, nodeKey)
test := testsuites.StorageClassTest{
Name: "flexvolume-resize",
@ -109,24 +104,12 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume expand[Slow]
}, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating pvc")
})
framework.AddCleanupAction(func() {
if len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(c, nodeName, nodeKey)
}
})
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
ginkgo.DeferCleanup(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
nodeKeyValueLabel = make(map[string]string)
}
})
})
ginkgo.It("Should verify mounted flex volumes can be resized", func() {

View File

@ -48,7 +48,6 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
pvc *v1.PersistentVolumeClaim
resizableSc *storagev1.StorageClass
nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string
nodeLabelValue string
nodeKey string
@ -71,15 +70,11 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
framework.ExpectNoError(err)
nodeName = node.Name
nodeKey = "mounted_flexvolume_expand"
if !isNodeLabeled {
nodeLabelValue = ns
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel[nodeKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
isNodeLabeled = true
}
nodeKey = "mounted_flexvolume_expand_" + ns
nodeLabelValue = ns
nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue}
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
ginkgo.DeferCleanup(framework.RemoveLabelOffNode, c, nodeName, nodeKey)
test := testsuites.StorageClassTest{
Name: "flexvolume-resize",
@ -104,25 +99,12 @@ var _ = utils.SIGDescribe("[Feature:Flexvolumes] Mounted flexvolume volume expan
}, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating pvc: %v", err)
})
framework.AddCleanupAction(func() {
if len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(c, nodeName, nodeKey)
}
})
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
ginkgo.DeferCleanup(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
nodeKeyValueLabel = make(map[string]string)
}
})
})
ginkgo.It("should be resizable when mounted", func() {

View File

@ -52,7 +52,6 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
sc *storagev1.StorageClass
cleanStorageClass func()
nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string
nodeLabelValue string
nodeKey string
@ -70,15 +69,11 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
framework.ExpectNoError(err)
nodeName = node.Name
nodeKey = "mounted_volume_expand"
if !isNodeLabeled {
nodeLabelValue = ns
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel[nodeKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
isNodeLabeled = true
}
nodeKey = "mounted_volume_expand_" + ns
nodeLabelValue = ns
nodeKeyValueLabel = map[string]string{nodeKey: nodeLabelValue}
framework.AddOrUpdateLabelOnNode(c, nodeName, nodeKey, nodeLabelValue)
ginkgo.DeferCleanup(framework.RemoveLabelOffNode, c, nodeName, nodeKey)
test := testsuites.StorageClassTest{
Name: "default",
@ -93,6 +88,7 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
if !*sc.AllowVolumeExpansion {
framework.Failf("Class %s does not allow volume expansion", sc.Name)
}
ginkgo.DeferCleanup(cleanStorageClass)
pvc = e2epv.MakePersistentVolumeClaim(e2epv.PersistentVolumeClaimConfig{
ClaimSize: test.ClaimSize,
@ -101,26 +97,13 @@ var _ = utils.SIGDescribe("Mounted volume expand [Feature:StorageProvider]", fun
}, ns)
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
framework.ExpectNoError(err, "Error creating pvc")
})
ginkgo.DeferCleanup(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
framework.AddCleanupAction(func() {
if len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(c, nodeName, nodeKey)
}
})
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: Cleaning up resources for mounted volume resize")
if c != nil {
if errs := e2epv.PVPVCCleanup(c, ns, nil, pvc); len(errs) > 0 {
framework.Failf("AfterEach: Failed to delete PVC and/or PV. Errors: %v", utilerrors.NewAggregate(errs))
}
pvc, nodeName, isNodeLabeled, nodeLabelValue = nil, "", false, ""
nodeKeyValueLabel = make(map[string]string)
}
cleanStorageClass()
})
})
ginkgo.It("Should verify mounted devices can be resized", func() {

View File

@ -248,14 +248,10 @@ func (t *snapshottableStressTestSuite) DefineTests(driver storageframework.TestD
ginkgo.BeforeEach(func() {
init()
ginkgo.DeferCleanup(cleanup)
createPodsAndVolumes()
})
// See #96177, this is necessary for cleaning up resources when tests are interrupted.
f.AddAfterEach("cleanup", func(f *framework.Framework, failed bool) {
cleanup()
})
ginkgo.It("should support snapshotting of many volumes repeatedly [Slow] [Serial]", func() {
// Repeatedly create and delete snapshots of each volume.
for i := 0; i < stressTest.testOptions.NumPods; i++ {

View File

@ -194,14 +194,10 @@ func (t *volumeStressTestSuite) DefineTests(driver storageframework.TestDriver,
ginkgo.BeforeEach(func() {
init()
ginkgo.DeferCleanup(cleanup)
createPodsAndVolumes()
})
// See #96177, this is necessary for cleaning up resources when tests are interrupted.
f.AddAfterEach("cleanup", func(f *framework.Framework, failed bool) {
cleanup()
})
ginkgo.It("multiple pods should access different volumes repeatedly [Slow] [Serial]", func() {
// Restart pod repeatedly
for i := 0; i < l.testOptions.NumPods; i++ {

View File

@ -23,7 +23,6 @@ import (
"time"
"github.com/davecgh/go-spew/spew"
"github.com/onsi/ginkgo/v2"
v1 "k8s.io/api/core/v1"
@ -129,7 +128,8 @@ func (t *volumePerformanceTestSuite) DefineTests(driver storageframework.TestDri
}
f := framework.NewFramework("volume-lifecycle-performance", frameworkOptions, nil)
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
f.AddAfterEach("cleanup", func(f *framework.Framework, failed bool) {
ginkgo.AfterEach(func() {
ginkgo.By("Closing informer channel")
close(l.stopCh)
ginkgo.By("Deleting all PVCs")

View File

@ -75,35 +75,48 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
volLabel = labels.Set{e2epv.VolumeSelectorKey: ns}
selector = metav1.SetAsLabelSelector(volLabel)
if volumePath == "" {
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err)
pvConfig = e2epv.PersistentVolumeConfig{
NamePrefix: "vspherepv-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
volumePath, err = nodeInfo.VSphere.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func() {
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
})
pvConfig = e2epv.PersistentVolumeConfig{
NamePrefix: "vspherepv-",
Labels: volLabel,
PVSource: v1.PersistentVolumeSource{
VsphereVolume: &v1.VsphereVirtualDiskVolumeSource{
VolumePath: volumePath,
FSType: "ext4",
},
Prebind: nil,
}
emptyStorageClass := ""
pvcConfig = e2epv.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
},
Prebind: nil,
}
emptyStorageClass := ""
pvcConfig = e2epv.PersistentVolumeClaimConfig{
Selector: selector,
StorageClassName: &emptyStorageClass,
}
ginkgo.By("Creating the PV and PVC")
pv, pvc, err = e2epv.CreatePVPVC(c, f.Timeouts, pvConfig, pvcConfig, ns, false)
framework.ExpectNoError(err)
ginkgo.DeferCleanup(func() {
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "AfterEach: failed to delete PV ", pv.Name)
})
ginkgo.DeferCleanup(func() {
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "AfterEach: failed to delete PVC ", pvc.Name)
})
framework.ExpectNoError(e2epv.WaitOnPVandPVC(c, f.Timeouts, ns, pv, pvc))
ginkgo.By("Creating the Client Pod")
clientPod, err = e2epod.CreateClientPod(c, ns, pvc)
framework.ExpectNoError(err)
node = clientPod.Spec.NodeName
ginkgo.DeferCleanup(func() {
framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
})
ginkgo.DeferCleanup(func() {
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node), "wait for vsphere disk to detach")
})
ginkgo.By("Verify disk should be attached to the node")
isAttached, err := diskIsAttached(volumePath, node)
@ -113,42 +126,6 @@ var _ = utils.SIGDescribe("PersistentVolumes:vsphere [Feature:vsphere]", func()
}
})
ginkgo.AfterEach(func() {
framework.Logf("AfterEach: Cleaning up test resources")
if c != nil {
framework.ExpectNoError(e2epod.DeletePodWithWait(c, clientPod), "AfterEach: failed to delete pod ", clientPod.Name)
if pv != nil {
framework.ExpectNoError(e2epv.DeletePersistentVolume(c, pv.Name), "AfterEach: failed to delete PV ", pv.Name)
}
if pvc != nil {
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "AfterEach: failed to delete PVC ", pvc.Name)
}
}
})
/*
Clean up
1. Wait and verify volume is detached from the node
2. Delete PV
3. Delete Volume (vmdk)
*/
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(ns) > 0 && len(volumePath) > 0 {
framework.ExpectNoError(waitForVSphereDiskToDetach(volumePath, node))
nodeInfo.VSphere.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
})
/*
Delete the PVC and then the pod. Expect the pod to succeed in unmounting and detaching PD on delete.
Test Steps:
1. Delete PVC.
2. Delete POD, POD deletion should succeed.
*/
ginkgo.It("should test that deleting a PVC before the pod does not cause pod deletion to fail on vsphere volume detach", func() {
ginkgo.By("Deleting the Claim")
framework.ExpectNoError(e2epv.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)

View File

@ -107,18 +107,11 @@ var _ = utils.SIGDescribe("vcp at scale [Feature:vsphere] ", func() {
e2eskipper.Skipf("Cannot attach %d volumes to %d nodes. Maximum volumes that can be attached on %d nodes is %d", volumeCount, len(nodes.Items), len(nodes.Items), volumesPerNode*len(nodes.Items))
}
nodeSelectorList = createNodeLabels(client, namespace, nodes)
})
/*
Remove labels from all the nodes
*/
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(namespace) > 0 && nodes != nil {
ginkgo.DeferCleanup(func() {
for _, node := range nodes.Items {
framework.RemoveLabelOffNode(client, node.Name, NodeLabelKey)
}
}
})
})
ginkgo.It("vsphere scale tests", func() {

View File

@ -65,7 +65,6 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
client clientset.Interface
namespace string
nodeName string
isNodeLabeled bool
nodeKeyValueLabel map[string]string
nodeLabelValue string
)
@ -74,20 +73,11 @@ var _ = utils.SIGDescribe("Volume Disk Format [Feature:vsphere]", func() {
Bootstrap(f)
client = f.ClientSet
namespace = f.Namespace.Name
if !isNodeLabeled {
nodeName = GetReadySchedulableRandomNodeInfo().Name
nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
nodeKeyValueLabel = make(map[string]string)
nodeKeyValueLabel[NodeLabelKey] = nodeLabelValue
framework.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue)
isNodeLabeled = true
}
})
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(namespace) > 0 && len(nodeLabelValue) > 0 {
framework.RemoveLabelOffNode(client, nodeName, NodeLabelKey)
}
nodeName = GetReadySchedulableRandomNodeInfo().Name
nodeLabelValue = "vsphere_e2e_" + string(uuid.NewUUID())
nodeKeyValueLabel = map[string]string{NodeLabelKey: nodeLabelValue}
framework.AddOrUpdateLabelOnNode(client, nodeName, NodeLabelKey, nodeLabelValue)
ginkgo.DeferCleanup(framework.RemoveLabelOffNode, client, nodeName, NodeLabelKey)
})
ginkgo.It("verify disk format type - eagerzeroedthick is honored for dynamically provisioned pv using storageclass", func() {

View File

@ -50,7 +50,6 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() {
node1KeyValueLabel map[string]string
node2Name string
node2KeyValueLabel map[string]string
isNodeLabeled bool
nodeInfo *NodeInfo
vsp *VSphere
)
@ -60,41 +59,29 @@ var _ = utils.SIGDescribe("Volume Placement [Feature:vsphere]", func() {
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
if !isNodeLabeled {
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
isNodeLabeled = true
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere
}
ginkgo.By("creating vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath)
})
ginkgo.AfterEach(func() {
for _, volumePath := range volumePaths {
vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
volumePaths = nil
})
/*
Steps
1. Remove labels assigned to node 1 and node 2
2. Delete VMDK volume
*/
framework.AddCleanupAction(func() {
// Cleanup actions will be called even when the tests are skipped and leaves namespace unset.
if len(ns) > 0 {
node1Name, node1KeyValueLabel, node2Name, node2KeyValueLabel = testSetupVolumePlacement(c, ns)
ginkgo.DeferCleanup(func() {
if len(node1KeyValueLabel) > 0 {
framework.RemoveLabelOffNode(c, node1Name, NodeLabelKey)
}
if len(node2KeyValueLabel) > 0 {
framework.RemoveLabelOffNode(c, node2Name, NodeLabelKey)
}
}
})
nodeInfo = TestContext.NodeMapper.GetNodeInfo(node1Name)
vsp = nodeInfo.VSphere
ginkgo.By("creating vmdk")
volumePath, err := vsp.CreateVolume(&VolumeOptions{}, nodeInfo.DataCenterRef)
framework.ExpectNoError(err)
volumePaths = append(volumePaths, volumePath)
ginkgo.DeferCleanup(func() {
for _, volumePath := range volumePaths {
vsp.DeleteVolume(volumePath, nodeInfo.DataCenterRef)
}
volumePaths = nil
})
})
/*
Steps

View File

@ -27,16 +27,6 @@ import (
e2emetrics "k8s.io/kubernetes/test/e2e/framework/metrics"
)
// CleanupSuite is the boilerplate that can be used after tests on ginkgo were run, on the SynchronizedAfterSuite step.
// Similar to SynchronizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
// Here, the order of functions is reversed; first, the function which runs everywhere,
// and then the function that only runs on the first Ginkgo node.
func CleanupSuite() {
// Run on all Ginkgo nodes
framework.Logf("Running AfterSuite actions on all nodes")
framework.RunCleanupActions()
}
// AfterSuiteActions are actions that are run on ginkgo's SynchronizedAfterSuite
func AfterSuiteActions() {
// Run only Ginkgo on node 1

View File

@ -0,0 +1,62 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
// CreateKubeConfig converts a [rest.Config] into a [clientcmdapi.Config]
// which then can be written to a file with [clientcmd.WriteToFile].
func CreateKubeConfig(clientCfg *rest.Config) *clientcmdapi.Config {
clusterNick := "cluster"
userNick := "user"
contextNick := "context"
config := clientcmdapi.NewConfig()
credentials := clientcmdapi.NewAuthInfo()
credentials.Token = clientCfg.BearerToken
credentials.TokenFile = clientCfg.BearerTokenFile
credentials.ClientCertificate = clientCfg.TLSClientConfig.CertFile
if len(credentials.ClientCertificate) == 0 {
credentials.ClientCertificateData = clientCfg.TLSClientConfig.CertData
}
credentials.ClientKey = clientCfg.TLSClientConfig.KeyFile
if len(credentials.ClientKey) == 0 {
credentials.ClientKeyData = clientCfg.TLSClientConfig.KeyData
}
config.AuthInfos[userNick] = credentials
cluster := clientcmdapi.NewCluster()
cluster.Server = clientCfg.Host
cluster.CertificateAuthority = clientCfg.CAFile
if len(cluster.CertificateAuthority) == 0 {
cluster.CertificateAuthorityData = clientCfg.CAData
}
cluster.InsecureSkipTLSVerify = clientCfg.Insecure
config.Clusters[clusterNick] = cluster
context := clientcmdapi.NewContext()
context.Cluster = clusterNick
context.AuthInfo = userNick
config.Contexts[contextNick] = context
config.CurrentContext = contextNick
return config
}

View File

@ -0,0 +1,101 @@
/*
Copyright 2022 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package apiserver
import (
"path"
"testing"
"github.com/google/uuid"
"k8s.io/apiserver/pkg/server/dynamiccertificates"
etcdserver "k8s.io/apiserver/pkg/storage/etcd3/testserver"
"k8s.io/apiserver/pkg/storage/storagebackend"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/util/cert"
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
)
// TestAPIServer provides access to a running apiserver instance.
type TestAPIServer struct {
// ClientSet is already initialized to access the apiserver as admin.
ClientSet clientset.Interface
// KubeConfigFile is the absolute path for a kube.config file that
// grants admin access to the apiserver.
KubeConfigFile string
}
// StartAPIServer runs etcd and apiserver in the background in the same
// process. All resources get released automatically when the test
// completes. If startup fails, the test gets aborted.
func StartAPITestServer(t *testing.T) TestAPIServer {
cfg := etcdserver.NewTestConfig(t)
etcdClient := etcdserver.RunEtcd(t, cfg)
storageConfig := storagebackend.NewDefaultConfig(path.Join(uuid.New().String(), "registry"), nil)
storageConfig.Transport.ServerList = etcdClient.Endpoints()
server := kubeapiservertesting.StartTestServerOrDie(t, nil, []string{}, storageConfig)
t.Cleanup(server.TearDownFn)
clientSet := clientset.NewForConfigOrDie(server.ClientConfig)
kubeConfigFile := writeKubeConfigForWardleServerToKASConnection(t, server.ClientConfig)
return TestAPIServer{
ClientSet: clientSet,
KubeConfigFile: kubeConfigFile,
}
}
func writeKubeConfigForWardleServerToKASConnection(t *testing.T, kubeClientConfig *rest.Config) string {
// write a kubeconfig out for starting other API servers with delegated auth. remember, no in-cluster config
// the loopback client config uses a loopback cert with different SNI. We need to use the "real"
// cert, so we'll hope we aren't hacked during a unit test and instead load it from the server we started.
wardleToKASKubeClientConfig := rest.CopyConfig(kubeClientConfig)
servingCerts, _, err := cert.GetServingCertificatesForURL(wardleToKASKubeClientConfig.Host, "")
if err != nil {
t.Fatal(err)
}
encodedServing, err := cert.EncodeCertificates(servingCerts...)
if err != nil {
t.Fatal(err)
}
wardleToKASKubeClientConfig.CAData = encodedServing
for _, v := range servingCerts {
t.Logf("Client: Server public key is %v\n", dynamiccertificates.GetHumanCertDetail(v))
}
certs, err := cert.ParseCertsPEM(wardleToKASKubeClientConfig.CAData)
if err != nil {
t.Fatal(err)
}
for _, curr := range certs {
t.Logf("CA bundle %v\n", dynamiccertificates.GetHumanCertDetail(curr))
}
adminKubeConfig := CreateKubeConfig(wardleToKASKubeClientConfig)
tmpDir := t.TempDir()
kubeConfigFile := path.Join(tmpDir, "kube.config")
if err := clientcmd.WriteToFile(*adminKubeConfig, kubeConfigFile); err != nil {
t.Fatal(err)
}
return kubeConfigFile
}