mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-02 00:07:50 +00:00
Merge pull request #20771 from thockin/e2e-framework
E2E framework cleanups
This commit is contained in:
commit
7b795643de
@ -22,6 +22,7 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -189,12 +190,54 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
|
||||
|
||||
})
|
||||
|
||||
type CleanupActionHandle *int
|
||||
|
||||
var cleanupActionsLock sync.Mutex
|
||||
var cleanupActions = map[CleanupActionHandle]func(){}
|
||||
|
||||
// AddCleanupAction installs a function that will be called in the event of the
|
||||
// whole test being terminated. This allows arbitrary pieces of the overall
|
||||
// test to hook into SynchronizedAfterSuite().
|
||||
func AddCleanupAction(fn func()) CleanupActionHandle {
|
||||
p := CleanupActionHandle(new(int))
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
cleanupActions[p] = fn
|
||||
return p
|
||||
}
|
||||
|
||||
// RemoveCleanupAction removes a function that was installed by
|
||||
// AddCleanupAction.
|
||||
func RemoveCleanupAction(p CleanupActionHandle) {
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
delete(cleanupActions, p)
|
||||
}
|
||||
|
||||
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
|
||||
// not remove them (see RemoveCleanupAction) but it does run unlocked, so they
|
||||
// may remove themselves.
|
||||
func RunCleanupActions() {
|
||||
list := []func(){}
|
||||
func() {
|
||||
cleanupActionsLock.Lock()
|
||||
defer cleanupActionsLock.Unlock()
|
||||
for _, fn := range cleanupActions {
|
||||
list = append(list, fn)
|
||||
}
|
||||
}()
|
||||
// Run unlocked.
|
||||
for _, fn := range list {
|
||||
fn()
|
||||
}
|
||||
}
|
||||
|
||||
// Similar to SynchornizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
|
||||
// Here, the order of functions is reversed; first, the function which runs everywhere,
|
||||
// and then the function that only runs on the first Ginkgo node.
|
||||
var _ = ginkgo.SynchronizedAfterSuite(func() {
|
||||
// Run on all Ginkgo nodes
|
||||
|
||||
RunCleanupActions()
|
||||
}, func() {
|
||||
// Run only Ginkgo on node 1
|
||||
if testContext.ReportDir != "" {
|
||||
|
@ -75,15 +75,8 @@ var _ = Describe("ClusterDns [Feature:Example]", func() {
|
||||
namespaces := []*api.Namespace{nil, nil}
|
||||
for i := range namespaces {
|
||||
var err error
|
||||
namespaces[i], err = createTestingNS(fmt.Sprintf("dnsexample%d", i), c, nil)
|
||||
if testContext.DeleteNamespace {
|
||||
if namespaces[i] != nil {
|
||||
defer deleteNS(c, namespaces[i].Name, 5*time.Minute /* namespace deletion timeout */)
|
||||
}
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
|
||||
}
|
||||
namespaces[i], err = framework.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
for _, ns := range namespaces {
|
||||
|
@ -43,9 +43,11 @@ const (
|
||||
type Framework struct {
|
||||
BaseName string
|
||||
|
||||
Namespace *api.Namespace
|
||||
Client *client.Client
|
||||
Clientset_1_2 *release_1_2.Clientset
|
||||
Client *client.Client
|
||||
Clientset_1_2 *release_1_2.Clientset
|
||||
|
||||
Namespace *api.Namespace // Every test has at least one namespace
|
||||
namespacesToDelete []*api.Namespace // Some tests have more than one.
|
||||
NamespaceDeletionTimeout time.Duration
|
||||
|
||||
gatherer containerResourceGatherer
|
||||
@ -57,6 +59,11 @@ type Framework struct {
|
||||
logsSizeWaitGroup sync.WaitGroup
|
||||
logsSizeCloseChannel chan bool
|
||||
logsSizeVerifier *LogsSizeVerifier
|
||||
|
||||
// To make sure that this framework cleans up after itself, no matter what,
|
||||
// we install a cleanup action before each test and clear it after. If we
|
||||
// should abort, the AfterSuite hook should run all cleanup actions.
|
||||
cleanupHandle CleanupActionHandle
|
||||
}
|
||||
|
||||
type TestDataSummary interface {
|
||||
@ -80,6 +87,10 @@ func NewFramework(baseName string) *Framework {
|
||||
|
||||
// beforeEach gets a client and makes a namespace.
|
||||
func (f *Framework) beforeEach() {
|
||||
// The fact that we need this feels like a bug in ginkgo.
|
||||
// https://github.com/onsi/ginkgo/issues/222
|
||||
f.cleanupHandle = AddCleanupAction(f.afterEach)
|
||||
|
||||
By("Creating a kubernetes client")
|
||||
c, err := loadClient()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -88,7 +99,7 @@ func (f *Framework) beforeEach() {
|
||||
f.Clientset_1_2 = release_1_2.FromUnversionedClient(c)
|
||||
|
||||
By("Building a namespace api object")
|
||||
namespace, err := createTestingNS(f.BaseName, f.Client, map[string]string{
|
||||
namespace, err := f.CreateNamespace(f.BaseName, map[string]string{
|
||||
"e2e-framework": f.BaseName,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
@ -121,6 +132,8 @@ func (f *Framework) beforeEach() {
|
||||
|
||||
// afterEach deletes the namespace, after reading its events.
|
||||
func (f *Framework) afterEach() {
|
||||
RemoveCleanupAction(f.cleanupHandle)
|
||||
|
||||
// Print events if the test failed.
|
||||
if CurrentGinkgoTestDescription().Failed {
|
||||
By(fmt.Sprintf("Collecting events from namespace %q.", f.Namespace.Name))
|
||||
@ -166,15 +179,18 @@ func (f *Framework) afterEach() {
|
||||
}
|
||||
|
||||
if testContext.DeleteNamespace {
|
||||
By(fmt.Sprintf("Destroying namespace %q for this suite.", f.Namespace.Name))
|
||||
for _, ns := range f.namespacesToDelete {
|
||||
By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name))
|
||||
|
||||
timeout := 5 * time.Minute
|
||||
if f.NamespaceDeletionTimeout != 0 {
|
||||
timeout = f.NamespaceDeletionTimeout
|
||||
}
|
||||
if err := deleteNS(f.Client, f.Namespace.Name, timeout); err != nil {
|
||||
Failf("Couldn't delete ns %q: %s", f.Namespace.Name, err)
|
||||
timeout := 5 * time.Minute
|
||||
if f.NamespaceDeletionTimeout != 0 {
|
||||
timeout = f.NamespaceDeletionTimeout
|
||||
}
|
||||
if err := deleteNS(f.Client, ns.Name, timeout); err != nil {
|
||||
Failf("Couldn't delete ns %q: %s", ns.Name, err)
|
||||
}
|
||||
}
|
||||
f.namespacesToDelete = nil
|
||||
} else {
|
||||
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
|
||||
}
|
||||
@ -209,6 +225,14 @@ func (f *Framework) afterEach() {
|
||||
f.Client = nil
|
||||
}
|
||||
|
||||
func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*api.Namespace, error) {
|
||||
ns, err := createTestingNS(baseName, f.Client, labels)
|
||||
if err == nil {
|
||||
f.namespacesToDelete = append(f.namespacesToDelete, ns)
|
||||
}
|
||||
return ns, err
|
||||
}
|
||||
|
||||
// WaitForPodTerminated waits for the pod to be terminated with the given reason.
|
||||
func (f *Framework) WaitForPodTerminated(podName, reason string) error {
|
||||
return waitForPodTerminatedInNamespace(f.Client, podName, reason, f.Namespace.Name)
|
||||
|
@ -23,14 +23,13 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func extinguish(c *client.Client, totalNS int, maxAllowedAfterDel int, maxSeconds int) {
|
||||
func extinguish(f *Framework, totalNS int, maxAllowedAfterDel int, maxSeconds int) {
|
||||
var err error
|
||||
|
||||
By("Creating testing namespaces")
|
||||
@ -40,7 +39,7 @@ func extinguish(c *client.Client, totalNS int, maxAllowedAfterDel int, maxSecond
|
||||
go func(n int) {
|
||||
defer wg.Done()
|
||||
defer GinkgoRecover()
|
||||
_, err = createTestingNS(fmt.Sprintf("nslifetest-%v", n), c, nil)
|
||||
_, err = f.CreateNamespace(fmt.Sprintf("nslifetest-%v", n), nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}(n)
|
||||
}
|
||||
@ -49,7 +48,7 @@ func extinguish(c *client.Client, totalNS int, maxAllowedAfterDel int, maxSecond
|
||||
//Wait 10 seconds, then SEND delete requests for all the namespaces.
|
||||
By("Waiting 10 seconds")
|
||||
time.Sleep(time.Duration(10 * time.Second))
|
||||
deleted, err := deleteNamespaces(c, []string{"nslifetest"}, nil /* skipFilter */)
|
||||
deleted, err := deleteNamespaces(f.Client, []string{"nslifetest"}, nil /* skipFilter */)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(len(deleted)).To(Equal(totalNS))
|
||||
|
||||
@ -58,7 +57,7 @@ func extinguish(c *client.Client, totalNS int, maxAllowedAfterDel int, maxSecond
|
||||
expectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
|
||||
func() (bool, error) {
|
||||
var cnt = 0
|
||||
nsList, err := c.Namespaces().List(api.ListOptions{})
|
||||
nsList, err := f.Client.Namespaces().List(api.ListOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -108,9 +107,9 @@ var _ = Describe("Namespaces [Serial]", func() {
|
||||
f := NewFramework("namespaces")
|
||||
|
||||
It("should delete fast enough (90 percent of 100 namespaces in 150 seconds)",
|
||||
func() { extinguish(f.Client, 100, 10, 150) })
|
||||
func() { extinguish(f, 100, 10, 150) })
|
||||
|
||||
// On hold until etcd3; see #7372
|
||||
It("should always delete fast (ALL of 100 namespaces in 150 seconds) [Feature:ComprehensiveNamespaceDraining]",
|
||||
func() { extinguish(f.Client, 100, 0, 150) })
|
||||
func() { extinguish(f, 100, 0, 150) })
|
||||
})
|
||||
|
@ -61,7 +61,6 @@ var _ = Describe("Services", func() {
|
||||
f := NewFramework("services")
|
||||
|
||||
var c *client.Client
|
||||
var extraNamespaces []string
|
||||
|
||||
BeforeEach(func() {
|
||||
var err error
|
||||
@ -69,20 +68,6 @@ var _ = Describe("Services", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
if testContext.DeleteNamespace {
|
||||
for _, ns := range extraNamespaces {
|
||||
By(fmt.Sprintf("Destroying namespace %v", ns))
|
||||
if err := deleteNS(c, ns, 5*time.Minute /* namespace deletion timeout */); err != nil {
|
||||
Failf("Couldn't delete namespace %s: %s", ns, err)
|
||||
}
|
||||
}
|
||||
extraNamespaces = nil
|
||||
} else {
|
||||
Logf("Found DeleteNamespace=false, skipping namespace deletion!")
|
||||
}
|
||||
})
|
||||
|
||||
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
|
||||
|
||||
It("should provide secure master service [Conformance]", func() {
|
||||
@ -327,9 +312,9 @@ var _ = Describe("Services", func() {
|
||||
|
||||
By("Removing iptable rules")
|
||||
result, err := SSH(`
|
||||
sudo iptables -t nat -F KUBE-SERVICES || true;
|
||||
sudo iptables -t nat -F KUBE-PORTALS-HOST || true;
|
||||
sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, testContext.Provider)
|
||||
sudo iptables -t nat -F KUBE-SERVICES || true;
|
||||
sudo iptables -t nat -F KUBE-PORTALS-HOST || true;
|
||||
sudo iptables -t nat -F KUBE-PORTALS-CONTAINER || true`, host, testContext.Provider)
|
||||
if err != nil || result.Code != 0 {
|
||||
LogSSHResult(result)
|
||||
Failf("couldn't remove iptable rules: %v", err)
|
||||
@ -426,11 +411,10 @@ var _ = Describe("Services", func() {
|
||||
Logf("namespace for TCP test: %s", ns1)
|
||||
|
||||
By("creating a second namespace")
|
||||
namespacePtr, err := createTestingNS("services", c, nil)
|
||||
namespacePtr, err := f.CreateNamespace("services", nil)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
ns2 := namespacePtr.Name // LB2 in ns2 on UDP
|
||||
Logf("namespace for UDP test: %s", ns2)
|
||||
extraNamespaces = append(extraNamespaces, ns2)
|
||||
|
||||
jig := NewServiceTestJig(c, serviceName)
|
||||
nodeIP := pickNodeIP(jig.Client) // for later
|
||||
|
Loading…
Reference in New Issue
Block a user