mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-28 14:07:14 +00:00
Merge pull request #77053 from SataQiu/fix-golint-test-20190425
Fix golint failures of test/e2e/lifecycle
This commit is contained in:
commit
1626aa56d0
@ -619,7 +619,6 @@ test/e2e/chaosmonkey
|
|||||||
test/e2e/common
|
test/e2e/common
|
||||||
test/e2e/framework
|
test/e2e/framework
|
||||||
test/e2e/framework/providers/gce
|
test/e2e/framework/providers/gce
|
||||||
test/e2e/lifecycle
|
|
||||||
test/e2e/lifecycle/bootstrap
|
test/e2e/lifecycle/bootstrap
|
||||||
test/e2e/network
|
test/e2e/network
|
||||||
test/e2e/node
|
test/e2e/node
|
||||||
|
@ -30,14 +30,14 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
imageutils "k8s.io/kubernetes/test/utils/image"
|
imageutils "k8s.io/kubernetes/test/utils/image"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO: it would probably be slightly better to build up the objects
|
// TODO: it would probably be slightly better to build up the objects
|
||||||
// in the code and then serialize to yaml.
|
// in the code and then serialize to yaml.
|
||||||
var reconcile_addon_controller = `
|
var reconcileAddonController = `
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
@ -65,7 +65,7 @@ spec:
|
|||||||
`
|
`
|
||||||
|
|
||||||
// Should update "reconcile" class addon.
|
// Should update "reconcile" class addon.
|
||||||
var reconcile_addon_controller_updated = `
|
var reconcileAddonControllerUpdated = `
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
@ -93,7 +93,7 @@ spec:
|
|||||||
protocol: TCP
|
protocol: TCP
|
||||||
`
|
`
|
||||||
|
|
||||||
var ensure_exists_addon_service = `
|
var ensureExistsAddonService = `
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
@ -112,7 +112,7 @@ spec:
|
|||||||
`
|
`
|
||||||
|
|
||||||
// Should create but don't update "ensure exist" class addon.
|
// Should create but don't update "ensure exist" class addon.
|
||||||
var ensure_exists_addon_service_updated = `
|
var ensureExistsAddonServiceUpdated = `
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
@ -131,7 +131,7 @@ spec:
|
|||||||
k8s-app: addon-ensure-exists-test
|
k8s-app: addon-ensure-exists-test
|
||||||
`
|
`
|
||||||
|
|
||||||
var deprecated_label_addon_service = `
|
var deprecatedLabelAddonService = `
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
@ -150,7 +150,7 @@ spec:
|
|||||||
`
|
`
|
||||||
|
|
||||||
// Should update addon with label "kubernetes.io/cluster-service=true".
|
// Should update addon with label "kubernetes.io/cluster-service=true".
|
||||||
var deprecated_label_addon_service_updated = `
|
var deprecatedLabelAddonServiceUpdated = `
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: Service
|
kind: Service
|
||||||
metadata:
|
metadata:
|
||||||
@ -170,7 +170,7 @@ spec:
|
|||||||
`
|
`
|
||||||
|
|
||||||
// Should not create addon without valid label.
|
// Should not create addon without valid label.
|
||||||
var invalid_addon_controller = `
|
var invalidAddonController = `
|
||||||
apiVersion: v1
|
apiVersion: v1
|
||||||
kind: ReplicationController
|
kind: ReplicationController
|
||||||
metadata:
|
metadata:
|
||||||
@ -214,7 +214,7 @@ var _ = SIGDescribe("Addon update", func() {
|
|||||||
var sshClient *ssh.Client
|
var sshClient *ssh.Client
|
||||||
f := framework.NewDefaultFramework("addon-update-test")
|
f := framework.NewDefaultFramework("addon-update-test")
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
// This test requires:
|
// This test requires:
|
||||||
// - SSH master access
|
// - SSH master access
|
||||||
// ... so the provider check should be identical to the intersection of
|
// ... so the provider check should be identical to the intersection of
|
||||||
@ -225,17 +225,17 @@ var _ = SIGDescribe("Addon update", func() {
|
|||||||
|
|
||||||
var err error
|
var err error
|
||||||
sshClient, err = getMasterSSHClient()
|
sshClient, err = getMasterSSHClient()
|
||||||
Expect(err).NotTo(HaveOccurred(), "Failed to get the master SSH client.")
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to get the master SSH client.")
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if sshClient != nil {
|
if sshClient != nil {
|
||||||
sshClient.Close()
|
sshClient.Close()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// WARNING: the test is not parallel-friendly!
|
// WARNING: the test is not parallel-friendly!
|
||||||
It("should propagate add-on file changes [Slow]", func() {
|
ginkgo.It("should propagate add-on file changes [Slow]", func() {
|
||||||
// This test requires:
|
// This test requires:
|
||||||
// - SSH
|
// - SSH
|
||||||
// - master access
|
// - master access
|
||||||
@ -244,7 +244,7 @@ var _ = SIGDescribe("Addon update", func() {
|
|||||||
framework.SkipUnlessProviderIs("gce")
|
framework.SkipUnlessProviderIs("gce")
|
||||||
|
|
||||||
//these tests are long, so I squeezed several cases in one scenario
|
//these tests are long, so I squeezed several cases in one scenario
|
||||||
Expect(sshClient).NotTo(BeNil())
|
gomega.Expect(sshClient).NotTo(gomega.BeNil())
|
||||||
dir = f.Namespace.Name // we use it only to give a unique string for each test execution
|
dir = f.Namespace.Name // we use it only to give a unique string for each test execution
|
||||||
|
|
||||||
temporaryRemotePathPrefix := "addon-test-dir"
|
temporaryRemotePathPrefix := "addon-test-dir"
|
||||||
@ -262,18 +262,18 @@ var _ = SIGDescribe("Addon update", func() {
|
|||||||
svcAddonEnsureExistsUpdated := "addon-ensure-exists-service-updated.yaml"
|
svcAddonEnsureExistsUpdated := "addon-ensure-exists-service-updated.yaml"
|
||||||
|
|
||||||
var remoteFiles []stringPair = []stringPair{
|
var remoteFiles []stringPair = []stringPair{
|
||||||
{fmt.Sprintf(reconcile_addon_controller, addonNsName, serveHostnameImage), rcAddonReconcile},
|
{fmt.Sprintf(reconcileAddonController, addonNsName, serveHostnameImage), rcAddonReconcile},
|
||||||
{fmt.Sprintf(reconcile_addon_controller_updated, addonNsName, serveHostnameImage), rcAddonReconcileUpdated},
|
{fmt.Sprintf(reconcileAddonControllerUpdated, addonNsName, serveHostnameImage), rcAddonReconcileUpdated},
|
||||||
{fmt.Sprintf(deprecated_label_addon_service, addonNsName), svcAddonDeprecatedLabel},
|
{fmt.Sprintf(deprecatedLabelAddonService, addonNsName), svcAddonDeprecatedLabel},
|
||||||
{fmt.Sprintf(deprecated_label_addon_service_updated, addonNsName), svcAddonDeprecatedLabelUpdated},
|
{fmt.Sprintf(deprecatedLabelAddonServiceUpdated, addonNsName), svcAddonDeprecatedLabelUpdated},
|
||||||
{fmt.Sprintf(ensure_exists_addon_service, addonNsName), svcAddonEnsureExists},
|
{fmt.Sprintf(ensureExistsAddonService, addonNsName), svcAddonEnsureExists},
|
||||||
{fmt.Sprintf(ensure_exists_addon_service_updated, addonNsName), svcAddonEnsureExistsUpdated},
|
{fmt.Sprintf(ensureExistsAddonServiceUpdated, addonNsName), svcAddonEnsureExistsUpdated},
|
||||||
{fmt.Sprintf(invalid_addon_controller, addonNsName, serveHostnameImage), rcInvalid},
|
{fmt.Sprintf(invalidAddonController, addonNsName, serveHostnameImage), rcInvalid},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, p := range remoteFiles {
|
for _, p := range remoteFiles {
|
||||||
err := writeRemoteFile(sshClient, p.data, temporaryRemotePath, p.fileName, 0644)
|
err := writeRemoteFile(sshClient, p.data, temporaryRemotePath, p.fileName, 0644)
|
||||||
Expect(err).NotTo(HaveOccurred(), "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient)
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient)
|
||||||
}
|
}
|
||||||
|
|
||||||
// directory on kubernetes-master
|
// directory on kubernetes-master
|
||||||
@ -282,23 +282,23 @@ var _ = SIGDescribe("Addon update", func() {
|
|||||||
|
|
||||||
// cleanup from previous tests
|
// cleanup from previous tests
|
||||||
_, _, _, err := sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix))
|
_, _, _, err := sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix))
|
||||||
Expect(err).NotTo(HaveOccurred(), "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient)
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient)
|
||||||
|
|
||||||
defer sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) // ignore result in cleanup
|
defer sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) // ignore result in cleanup
|
||||||
sshExecAndVerify(sshClient, fmt.Sprintf("sudo mkdir -p %s", destinationDir))
|
sshExecAndVerify(sshClient, fmt.Sprintf("sudo mkdir -p %s", destinationDir))
|
||||||
|
|
||||||
By("copy invalid manifests to the destination dir")
|
ginkgo.By("copy invalid manifests to the destination dir")
|
||||||
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcInvalid, destinationDir, rcInvalid))
|
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcInvalid, destinationDir, rcInvalid))
|
||||||
// we will verify at the end of the test that the objects weren't created from the invalid manifests
|
// we will verify at the end of the test that the objects weren't created from the invalid manifests
|
||||||
|
|
||||||
By("copy new manifests")
|
ginkgo.By("copy new manifests")
|
||||||
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcAddonReconcile, destinationDir, rcAddonReconcile))
|
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcAddonReconcile, destinationDir, rcAddonReconcile))
|
||||||
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonDeprecatedLabel, destinationDir, svcAddonDeprecatedLabel))
|
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonDeprecatedLabel, destinationDir, svcAddonDeprecatedLabel))
|
||||||
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists))
|
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists))
|
||||||
// Delete the "ensure exist class" addon at the end.
|
// Delete the "ensure exist class" addon at the end.
|
||||||
defer func() {
|
defer func() {
|
||||||
framework.Logf("Cleaning up ensure exist class addon.")
|
framework.Logf("Cleaning up ensure exist class addon.")
|
||||||
Expect(f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(HaveOccurred())
|
gomega.Expect(f.ClientSet.CoreV1().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(gomega.HaveOccurred())
|
||||||
}()
|
}()
|
||||||
|
|
||||||
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", true)
|
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", true)
|
||||||
@ -306,7 +306,7 @@ var _ = SIGDescribe("Addon update", func() {
|
|||||||
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true)
|
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true)
|
||||||
|
|
||||||
// Replace the manifests with new contents.
|
// Replace the manifests with new contents.
|
||||||
By("update manifests")
|
ginkgo.By("update manifests")
|
||||||
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcAddonReconcileUpdated, destinationDir, rcAddonReconcile))
|
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcAddonReconcileUpdated, destinationDir, rcAddonReconcile))
|
||||||
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonDeprecatedLabelUpdated, destinationDir, svcAddonDeprecatedLabel))
|
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonDeprecatedLabelUpdated, destinationDir, svcAddonDeprecatedLabel))
|
||||||
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExistsUpdated, destinationDir, svcAddonEnsureExists))
|
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExistsUpdated, destinationDir, svcAddonEnsureExists))
|
||||||
@ -320,7 +320,7 @@ var _ = SIGDescribe("Addon update", func() {
|
|||||||
ensureExistSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-ensure-exists-test"}))
|
ensureExistSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-ensure-exists-test"}))
|
||||||
waitForServicewithSelectorInAddonTest(f.ClientSet, addonNsName, false, ensureExistSelector)
|
waitForServicewithSelectorInAddonTest(f.ClientSet, addonNsName, false, ensureExistSelector)
|
||||||
|
|
||||||
By("remove manifests")
|
ginkgo.By("remove manifests")
|
||||||
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcAddonReconcile))
|
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcAddonReconcile))
|
||||||
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonDeprecatedLabel))
|
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonDeprecatedLabel))
|
||||||
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonEnsureExists))
|
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonEnsureExists))
|
||||||
@ -330,9 +330,9 @@ var _ = SIGDescribe("Addon update", func() {
|
|||||||
// "Ensure exist class" addon will not be deleted when manifest is removed.
|
// "Ensure exist class" addon will not be deleted when manifest is removed.
|
||||||
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true)
|
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true)
|
||||||
|
|
||||||
By("verify invalid addons weren't created")
|
ginkgo.By("verify invalid addons weren't created")
|
||||||
_, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get("invalid-addon-test", metav1.GetOptions{})
|
_, err = f.ClientSet.CoreV1().ReplicationControllers(addonNsName).Get("invalid-addon-test", metav1.GetOptions{})
|
||||||
Expect(err).To(HaveOccurred())
|
gomega.Expect(err).To(gomega.HaveOccurred())
|
||||||
|
|
||||||
// Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function.
|
// Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function.
|
||||||
})
|
})
|
||||||
@ -384,8 +384,8 @@ func getMasterSSHClient() (*ssh.Client, error) {
|
|||||||
|
|
||||||
func sshExecAndVerify(client *ssh.Client, cmd string) {
|
func sshExecAndVerify(client *ssh.Client, cmd string) {
|
||||||
_, _, rc, err := sshExec(client, cmd)
|
_, _, rc, err := sshExec(client, cmd)
|
||||||
Expect(err).NotTo(HaveOccurred(), "Failed to execute %q with ssh client %+v", cmd, client)
|
gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to execute %q with ssh client %+v", cmd, client)
|
||||||
Expect(rc).To(Equal(0), "error return code from executing command on the cluster: %s", cmd)
|
gomega.Expect(rc).To(gomega.Equal(0), "error return code from executing command on the cluster: %s", cmd)
|
||||||
}
|
}
|
||||||
|
|
||||||
func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
|
func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
|
||||||
|
@ -37,7 +37,7 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/upgrades/storage"
|
"k8s.io/kubernetes/test/e2e/upgrades/storage"
|
||||||
"k8s.io/kubernetes/test/utils/junit"
|
"k8s.io/kubernetes/test/utils/junit"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -86,8 +86,8 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
|||||||
// Create the frameworks here because we can only create them
|
// Create the frameworks here because we can only create them
|
||||||
// in a "Describe".
|
// in a "Describe".
|
||||||
testFrameworks := createUpgradeFrameworks(upgradeTests)
|
testFrameworks := createUpgradeFrameworks(upgradeTests)
|
||||||
Describe("master upgrade", func() {
|
ginkgo.Describe("master upgrade", func() {
|
||||||
It("should maintain a functioning cluster [Feature:MasterUpgrade]", func() {
|
ginkgo.It("should maintain a functioning cluster [Feature:MasterUpgrade]", func() {
|
||||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -109,8 +109,8 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("node upgrade", func() {
|
ginkgo.Describe("node upgrade", func() {
|
||||||
It("should maintain a functioning cluster [Feature:NodeUpgrade]", func() {
|
ginkgo.It("should maintain a functioning cluster [Feature:NodeUpgrade]", func() {
|
||||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -131,8 +131,8 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("cluster upgrade", func() {
|
ginkgo.Describe("cluster upgrade", func() {
|
||||||
It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func() {
|
ginkgo.It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func() {
|
||||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -160,8 +160,8 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
|
|||||||
// in a "Describe".
|
// in a "Describe".
|
||||||
testFrameworks := createUpgradeFrameworks(upgradeTests)
|
testFrameworks := createUpgradeFrameworks(upgradeTests)
|
||||||
|
|
||||||
Describe("cluster downgrade", func() {
|
ginkgo.Describe("cluster downgrade", func() {
|
||||||
It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func() {
|
ginkgo.It("should maintain a functioning cluster [Feature:ClusterDowngrade]", func() {
|
||||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -190,8 +190,8 @@ var _ = SIGDescribe("etcd Upgrade [Feature:EtcdUpgrade]", func() {
|
|||||||
// Create the frameworks here because we can only create them
|
// Create the frameworks here because we can only create them
|
||||||
// in a "Describe".
|
// in a "Describe".
|
||||||
testFrameworks := createUpgradeFrameworks(upgradeTests)
|
testFrameworks := createUpgradeFrameworks(upgradeTests)
|
||||||
Describe("etcd upgrade", func() {
|
ginkgo.Describe("etcd upgrade", func() {
|
||||||
It("should maintain a functioning cluster", func() {
|
ginkgo.It("should maintain a functioning cluster", func() {
|
||||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), "")
|
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), "")
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -215,8 +215,8 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
|||||||
// Create the frameworks here because we can only create them
|
// Create the frameworks here because we can only create them
|
||||||
// in a "Describe".
|
// in a "Describe".
|
||||||
testFrameworks := createUpgradeFrameworks(gpuUpgradeTests)
|
testFrameworks := createUpgradeFrameworks(gpuUpgradeTests)
|
||||||
Describe("master upgrade", func() {
|
ginkgo.Describe("master upgrade", func() {
|
||||||
It("should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]", func() {
|
ginkgo.It("should NOT disrupt gpu pod [Feature:GPUMasterUpgrade]", func() {
|
||||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -233,8 +233,8 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
|||||||
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.MasterUpgrade, upgradeFunc)
|
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.MasterUpgrade, upgradeFunc)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Describe("cluster upgrade", func() {
|
ginkgo.Describe("cluster upgrade", func() {
|
||||||
It("should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]", func() {
|
ginkgo.It("should be able to run gpu pod after upgrade [Feature:GPUClusterUpgrade]", func() {
|
||||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -253,8 +253,8 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
|||||||
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
|
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
Describe("cluster downgrade", func() {
|
ginkgo.Describe("cluster downgrade", func() {
|
||||||
It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func() {
|
ginkgo.It("should be able to run gpu pod after downgrade [Feature:GPUClusterDowngrade]", func() {
|
||||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -275,14 +275,14 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func() {
|
var _ = ginkgo.Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func() {
|
||||||
f := framework.NewDefaultFramework("stateful-upgrade")
|
f := framework.NewDefaultFramework("stateful-upgrade")
|
||||||
|
|
||||||
// Create the frameworks here because we can only create them
|
// Create the frameworks here because we can only create them
|
||||||
// in a "Describe".
|
// in a "Describe".
|
||||||
testFrameworks := createUpgradeFrameworks(statefulsetUpgradeTests)
|
testFrameworks := createUpgradeFrameworks(statefulsetUpgradeTests)
|
||||||
framework.KubeDescribe("stateful upgrade", func() {
|
framework.KubeDescribe("stateful upgrade", func() {
|
||||||
It("should maintain a functioning cluster", func() {
|
ginkgo.It("should maintain a functioning cluster", func() {
|
||||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -306,14 +306,14 @@ var _ = Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]", func()
|
|||||||
var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]", func() {
|
var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]", func() {
|
||||||
f := framework.NewDefaultFramework("kube-proxy-ds-migration")
|
f := framework.NewDefaultFramework("kube-proxy-ds-migration")
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
framework.SkipUnlessProviderIs("gce")
|
framework.SkipUnlessProviderIs("gce")
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("Upgrade kube-proxy from static pods to a DaemonSet", func() {
|
ginkgo.Describe("Upgrade kube-proxy from static pods to a DaemonSet", func() {
|
||||||
testFrameworks := createUpgradeFrameworks(kubeProxyUpgradeTests)
|
testFrameworks := createUpgradeFrameworks(kubeProxyUpgradeTests)
|
||||||
|
|
||||||
It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func() {
|
ginkgo.It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetUpgrade]", func() {
|
||||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -337,10 +337,10 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
|
|||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("Downgrade kube-proxy from a DaemonSet to static pods", func() {
|
ginkgo.Describe("Downgrade kube-proxy from a DaemonSet to static pods", func() {
|
||||||
testFrameworks := createUpgradeFrameworks(kubeProxyDowngradeTests)
|
testFrameworks := createUpgradeFrameworks(kubeProxyDowngradeTests)
|
||||||
|
|
||||||
It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func() {
|
ginkgo.It("should maintain a functioning cluster [Feature:KubeProxyDaemonSetDowngrade]", func() {
|
||||||
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
upgCtx, err := getUpgradeContext(f.ClientSet.Discovery(), *upgradeTarget)
|
||||||
framework.ExpectNoError(err)
|
framework.ExpectNoError(err)
|
||||||
|
|
||||||
@ -385,7 +385,7 @@ func (cma *chaosMonkeyAdapter) Test(sem *chaosmonkey.Semaphore) {
|
|||||||
defer finalizeUpgradeTest(start, cma.testReport)
|
defer finalizeUpgradeTest(start, cma.testReport)
|
||||||
defer ready()
|
defer ready()
|
||||||
if skippable, ok := cma.test.(upgrades.Skippable); ok && skippable.Skip(cma.upgCtx) {
|
if skippable, ok := cma.test.(upgrades.Skippable); ok && skippable.Skip(cma.upgCtx) {
|
||||||
By("skipping test " + cma.test.Name())
|
ginkgo.By("skipping test " + cma.test.Name())
|
||||||
cma.testReport.Skipped = "skipping test " + cma.test.Name()
|
cma.testReport.Skipped = "skipping test " + cma.test.Name()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@ package lifecycle
|
|||||||
|
|
||||||
import "github.com/onsi/ginkgo"
|
import "github.com/onsi/ginkgo"
|
||||||
|
|
||||||
|
// SIGDescribe annotates the test with the SIG label.
|
||||||
func SIGDescribe(text string, body func()) bool {
|
func SIGDescribe(text string, body func()) bool {
|
||||||
return ginkgo.Describe("[sig-cluster-lifecycle] "+text, body)
|
return ginkgo.Describe("[sig-cluster-lifecycle] "+text, body)
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
@ -118,7 +118,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
|
|||||||
var additionalNodesZones []string
|
var additionalNodesZones []string
|
||||||
var existingRCs []string
|
var existingRCs []string
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
framework.SkipUnlessProviderIs("gce")
|
framework.SkipUnlessProviderIs("gce")
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
@ -127,7 +127,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
|
|||||||
existingRCs = make([]string, 0)
|
existingRCs = make([]string, 0)
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
// Clean-up additional worker nodes if the test execution was broken.
|
// Clean-up additional worker nodes if the test execution was broken.
|
||||||
for _, zone := range additionalNodesZones {
|
for _, zone := range additionalNodesZones {
|
||||||
removeWorkerNodes(zone)
|
removeWorkerNodes(zone)
|
||||||
@ -176,7 +176,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
|
|||||||
verifyRCs(c, ns, existingRCs)
|
verifyRCs(c, ns, existingRCs)
|
||||||
}
|
}
|
||||||
|
|
||||||
It("survive addition/removal replicas same zone [Serial][Disruptive]", func() {
|
ginkgo.It("survive addition/removal replicas same zone [Serial][Disruptive]", func() {
|
||||||
zone := framework.TestContext.CloudConfig.Zone
|
zone := framework.TestContext.CloudConfig.Zone
|
||||||
step(None, "")
|
step(None, "")
|
||||||
numAdditionalReplicas := 2
|
numAdditionalReplicas := 2
|
||||||
@ -188,7 +188,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("survive addition/removal replicas different zones [Serial][Disruptive]", func() {
|
ginkgo.It("survive addition/removal replicas different zones [Serial][Disruptive]", func() {
|
||||||
zone := framework.TestContext.CloudConfig.Zone
|
zone := framework.TestContext.CloudConfig.Zone
|
||||||
region := findRegionForZone(zone)
|
region := findRegionForZone(zone)
|
||||||
zones := findZonesForRegion(region)
|
zones := findZonesForRegion(region)
|
||||||
@ -206,7 +206,7 @@ var _ = SIGDescribe("HA-master [Feature:HAMaster]", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("survive addition/removal replicas multizone workers [Serial][Disruptive]", func() {
|
ginkgo.It("survive addition/removal replicas multizone workers [Serial][Disruptive]", func() {
|
||||||
zone := framework.TestContext.CloudConfig.Zone
|
zone := framework.TestContext.CloudConfig.Zone
|
||||||
region := findRegionForZone(zone)
|
region := findRegionForZone(zone)
|
||||||
zones := findZonesForRegion(region)
|
zones := findZonesForRegion(region)
|
||||||
|
@ -26,8 +26,8 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/master/ports"
|
"k8s.io/kubernetes/pkg/master/ports"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
||||||
@ -36,35 +36,35 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
|||||||
var node *v1.Node
|
var node *v1.Node
|
||||||
var nodeName string
|
var nodeName string
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
|
||||||
Expect(len(nodes.Items)).NotTo(BeZero())
|
gomega.Expect(len(nodes.Items)).NotTo(gomega.BeZero())
|
||||||
node = &nodes.Items[0]
|
node = &nodes.Items[0]
|
||||||
nodeName = node.Name
|
nodeName = node.Name
|
||||||
})
|
})
|
||||||
|
|
||||||
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
|
// make sure kubelet readonly (10255) and cadvisor (4194) ports are disabled via API server proxy
|
||||||
It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() {
|
ginkgo.It(fmt.Sprintf("should not be able to proxy to the readonly kubelet port %v using proxy subresource", ports.KubeletReadOnlyPort), func() {
|
||||||
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
|
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "pods/", ports.KubeletReadOnlyPort)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
|
||||||
var statusCode int
|
var statusCode int
|
||||||
result.StatusCode(&statusCode)
|
result.StatusCode(&statusCode)
|
||||||
Expect(statusCode).NotTo(Equal(http.StatusOK))
|
gomega.Expect(statusCode).NotTo(gomega.Equal(http.StatusOK))
|
||||||
})
|
})
|
||||||
It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
|
ginkgo.It("should not be able to proxy to cadvisor port 4194 using proxy subresource", func() {
|
||||||
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
|
result, err := framework.NodeProxyRequest(f.ClientSet, nodeName, "containers/", 4194)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
|
||||||
var statusCode int
|
var statusCode int
|
||||||
result.StatusCode(&statusCode)
|
result.StatusCode(&statusCode)
|
||||||
Expect(statusCode).NotTo(Equal(http.StatusOK))
|
gomega.Expect(statusCode).NotTo(gomega.Equal(http.StatusOK))
|
||||||
})
|
})
|
||||||
|
|
||||||
// make sure kubelet readonly (10255) and cadvisor (4194) ports are closed on the public IP address
|
// make sure kubelet readonly (10255) and cadvisor (4194) ports are closed on the public IP address
|
||||||
disabledPorts := []int{ports.KubeletReadOnlyPort, 4194}
|
disabledPorts := []int{ports.KubeletReadOnlyPort, 4194}
|
||||||
for _, port := range disabledPorts {
|
for _, port := range disabledPorts {
|
||||||
It(fmt.Sprintf("should not have port %d open on its all public IP addresses", port), func() {
|
ginkgo.It(fmt.Sprintf("should not have port %d open on its all public IP addresses", port), func() {
|
||||||
portClosedTest(f, node, port)
|
portClosedTest(f, node, port)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -73,7 +73,7 @@ var _ = SIGDescribe("Ports Security Check [Feature:KubeletSecurity]", func() {
|
|||||||
// checks whether the target port is closed
|
// checks whether the target port is closed
|
||||||
func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) {
|
func portClosedTest(f *framework.Framework, pickNode *v1.Node, port int) {
|
||||||
nodeAddrs := framework.GetNodeAddresses(pickNode, v1.NodeExternalIP)
|
nodeAddrs := framework.GetNodeAddresses(pickNode, v1.NodeExternalIP)
|
||||||
Expect(len(nodeAddrs)).NotTo(BeZero())
|
gomega.Expect(len(nodeAddrs)).NotTo(gomega.BeZero())
|
||||||
|
|
||||||
for _, addr := range nodeAddrs {
|
for _, addr := range nodeAddrs {
|
||||||
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute)
|
conn, err := net.DialTimeout("tcp", fmt.Sprintf("%s:%d", addr, port), 1*time.Minute)
|
||||||
|
@ -26,8 +26,8 @@ import (
|
|||||||
clientset "k8s.io/client-go/kubernetes"
|
clientset "k8s.io/client-go/kubernetes"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
||||||
@ -37,11 +37,11 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
|||||||
var ns string
|
var ns string
|
||||||
var group string
|
var group string
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
||||||
Expect(err).To(BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
systemPodsNo = int32(len(systemPods))
|
systemPodsNo = int32(len(systemPods))
|
||||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||||
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||||
@ -50,22 +50,22 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
Describe("NodeLease deletion", func() {
|
ginkgo.Describe("NodeLease deletion", func() {
|
||||||
var skipped bool
|
var skipped bool
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
skipped = true
|
skipped = true
|
||||||
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
||||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||||
skipped = false
|
skipped = false
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if skipped {
|
if skipped {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
By("restoring the original node instance group size")
|
ginkgo.By("restoring the original node instance group size")
|
||||||
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||||
}
|
}
|
||||||
@ -78,7 +78,7 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
|||||||
//
|
//
|
||||||
// TODO(cjcullen) reduce this sleep (#19314)
|
// TODO(cjcullen) reduce this sleep (#19314)
|
||||||
if framework.ProviderIs("gke") {
|
if framework.ProviderIs("gke") {
|
||||||
By("waiting 5 minutes for all dead tunnels to be dropped")
|
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
|
||||||
time.Sleep(5 * time.Minute)
|
time.Sleep(5 * time.Minute)
|
||||||
}
|
}
|
||||||
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||||
@ -90,21 +90,21 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
|||||||
}
|
}
|
||||||
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
||||||
// the cluster is restored to health.
|
// the cluster is restored to health.
|
||||||
By("waiting for system pods to successfully restart")
|
ginkgo.By("waiting for system pods to successfully restart")
|
||||||
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||||
Expect(err).To(BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
})
|
})
|
||||||
|
|
||||||
It("node lease should be deleted when corresponding node is deleted", func() {
|
ginkgo.It("node lease should be deleted when corresponding node is deleted", func() {
|
||||||
leaseClient := c.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
|
leaseClient := c.CoordinationV1beta1().Leases(corev1.NamespaceNodeLease)
|
||||||
err := framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
|
err := framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes, 10*time.Minute)
|
||||||
Expect(err).To(BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
|
|
||||||
By("verify node lease exists for every nodes")
|
ginkgo.By("verify node lease exists for every nodes")
|
||||||
originalNodes := framework.GetReadySchedulableNodesOrDie(c)
|
originalNodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||||
Expect(len(originalNodes.Items)).To(Equal(framework.TestContext.CloudConfig.NumNodes))
|
gomega.Expect(len(originalNodes.Items)).To(gomega.Equal(framework.TestContext.CloudConfig.NumNodes))
|
||||||
|
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
pass := true
|
pass := true
|
||||||
for _, node := range originalNodes.Items {
|
for _, node := range originalNodes.Items {
|
||||||
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
|
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
|
||||||
@ -116,20 +116,20 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return fmt.Errorf("some node lease is not ready")
|
return fmt.Errorf("some node lease is not ready")
|
||||||
}, 1*time.Minute, 5*time.Second).Should(BeNil())
|
}, 1*time.Minute, 5*time.Second).Should(gomega.BeNil())
|
||||||
|
|
||||||
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
|
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
|
||||||
By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes))
|
ginkgo.By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes))
|
||||||
err = framework.ResizeGroup(group, targetNumNodes)
|
err = framework.ResizeGroup(group, targetNumNodes)
|
||||||
Expect(err).To(BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
err = framework.WaitForGroupSize(group, targetNumNodes)
|
err = framework.WaitForGroupSize(group, targetNumNodes)
|
||||||
Expect(err).To(BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
err = framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute)
|
err = framework.WaitForReadyNodes(c, framework.TestContext.CloudConfig.NumNodes-1, 10*time.Minute)
|
||||||
Expect(err).To(BeNil())
|
gomega.Expect(err).To(gomega.BeNil())
|
||||||
targetNodes := framework.GetReadySchedulableNodesOrDie(c)
|
targetNodes := framework.GetReadySchedulableNodesOrDie(c)
|
||||||
Expect(len(targetNodes.Items)).To(Equal(int(targetNumNodes)))
|
gomega.Expect(len(targetNodes.Items)).To(gomega.Equal(int(targetNumNodes)))
|
||||||
|
|
||||||
By("verify node lease is deleted for the deleted node")
|
ginkgo.By("verify node lease is deleted for the deleted node")
|
||||||
var deletedNodeName string
|
var deletedNodeName string
|
||||||
for _, originalNode := range originalNodes.Items {
|
for _, originalNode := range originalNodes.Items {
|
||||||
originalNodeName := originalNode.ObjectMeta.Name
|
originalNodeName := originalNode.ObjectMeta.Name
|
||||||
@ -141,23 +141,23 @@ var _ = SIGDescribe("[Disruptive]NodeLease", func() {
|
|||||||
deletedNodeName = originalNodeName
|
deletedNodeName = originalNodeName
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
Expect(deletedNodeName).NotTo(Equal(""))
|
gomega.Expect(deletedNodeName).NotTo(gomega.Equal(""))
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
if _, err := leaseClient.Get(deletedNodeName, metav1.GetOptions{}); err == nil {
|
if _, err := leaseClient.Get(deletedNodeName, metav1.GetOptions{}); err == nil {
|
||||||
return fmt.Errorf("node lease is not deleted yet for node %q", deletedNodeName)
|
return fmt.Errorf("node lease is not deleted yet for node %q", deletedNodeName)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, 1*time.Minute, 5*time.Second).Should(BeNil())
|
}, 1*time.Minute, 5*time.Second).Should(gomega.BeNil())
|
||||||
|
|
||||||
By("verify node leases still exist for remaining nodes")
|
ginkgo.By("verify node leases still exist for remaining nodes")
|
||||||
Eventually(func() error {
|
gomega.Eventually(func() error {
|
||||||
for _, node := range targetNodes.Items {
|
for _, node := range targetNodes.Items {
|
||||||
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
|
if _, err := leaseClient.Get(node.ObjectMeta.Name, metav1.GetOptions{}); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}, 1*time.Minute, 5*time.Second).Should(BeNil())
|
}, 1*time.Minute, 5*time.Second).Should(gomega.BeNil())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -32,8 +32,8 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -53,7 +53,7 @@ const (
|
|||||||
var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
||||||
var f *framework.Framework
|
var f *framework.Framework
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
// These tests requires SSH to nodes, so the provider check should be identical to there
|
// These tests requires SSH to nodes, so the provider check should be identical to there
|
||||||
// (the limiting factor is the implementation of util.go's framework.GetSigner(...)).
|
// (the limiting factor is the implementation of util.go's framework.GetSigner(...)).
|
||||||
|
|
||||||
@ -61,14 +61,14 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
|||||||
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if CurrentGinkgoTestDescription().Failed {
|
if ginkgo.CurrentGinkgoTestDescription().Failed {
|
||||||
// Most of the reboot tests just make sure that addon/system pods are running, so dump
|
// Most of the reboot tests just make sure that addon/system pods are running, so dump
|
||||||
// events for the kube-system namespace on failures
|
// events for the kube-system namespace on failures
|
||||||
namespaceName := metav1.NamespaceSystem
|
namespaceName := metav1.NamespaceSystem
|
||||||
By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
|
ginkgo.By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
|
||||||
events, err := f.ClientSet.CoreV1().Events(namespaceName).List(metav1.ListOptions{})
|
events, err := f.ClientSet.CoreV1().Events(namespaceName).List(metav1.ListOptions{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
|
||||||
for _, e := range events.Items {
|
for _, e := range events.Items {
|
||||||
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
framework.Logf("event for %v: %v %v: %v", e.InvolvedObject.Name, e.Source, e.Reason, e.Message)
|
||||||
@ -82,38 +82,38 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
|||||||
//
|
//
|
||||||
// TODO(cjcullen) reduce this sleep (#19314)
|
// TODO(cjcullen) reduce this sleep (#19314)
|
||||||
if framework.ProviderIs("gke") {
|
if framework.ProviderIs("gke") {
|
||||||
By("waiting 5 minutes for all dead tunnels to be dropped")
|
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
|
||||||
time.Sleep(5 * time.Minute)
|
time.Sleep(5 * time.Minute)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
f = framework.NewDefaultFramework("reboot")
|
f = framework.NewDefaultFramework("reboot")
|
||||||
|
|
||||||
It("each node by ordering clean reboot and ensure they function upon restart", func() {
|
ginkgo.It("each node by ordering clean reboot and ensure they function upon restart", func() {
|
||||||
// clean shutdown and restart
|
// clean shutdown and restart
|
||||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted.
|
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is rebooted.
|
||||||
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &", nil)
|
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo reboot' >/dev/null 2>&1 &", nil)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("each node by ordering unclean reboot and ensure they function upon restart", func() {
|
ginkgo.It("each node by ordering unclean reboot and ensure they function upon restart", func() {
|
||||||
// unclean shutdown and restart
|
// unclean shutdown and restart
|
||||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown.
|
// We sleep 10 seconds to give some time for ssh command to cleanly finish before the node is shutdown.
|
||||||
testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
|
testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo b | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("each node by triggering kernel panic and ensure they function upon restart", func() {
|
ginkgo.It("each node by triggering kernel panic and ensure they function upon restart", func() {
|
||||||
// kernel panic
|
// kernel panic
|
||||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered.
|
// We sleep 10 seconds to give some time for ssh command to cleanly finish before kernel panic is triggered.
|
||||||
testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
|
testReboot(f.ClientSet, "nohup sh -c 'echo 1 | sudo tee /proc/sys/kernel/sysrq && sleep 10 && echo c | sudo tee /proc/sysrq-trigger' >/dev/null 2>&1 &", nil)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("each node by switching off the network interface and ensure they function upon switch on", func() {
|
ginkgo.It("each node by switching off the network interface and ensure they function upon switch on", func() {
|
||||||
// switch the network interface off for a while to simulate a network outage
|
// switch the network interface off for a while to simulate a network outage
|
||||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down.
|
// We sleep 10 seconds to give some time for ssh command to cleanly finish before network is down.
|
||||||
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo ip link set eth0 down && sleep 120 && sudo ip link set eth0 up && (sudo dhclient || true)' >/dev/null 2>&1 &", nil)
|
testReboot(f.ClientSet, "nohup sh -c 'sleep 10 && sudo ip link set eth0 down && sleep 120 && sudo ip link set eth0 up && (sudo dhclient || true)' >/dev/null 2>&1 &", nil)
|
||||||
})
|
})
|
||||||
|
|
||||||
It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() {
|
ginkgo.It("each node by dropping all inbound packets for a while and ensure they function afterwards", func() {
|
||||||
// tell the firewall to drop all inbound packets for a while
|
// tell the firewall to drop all inbound packets for a while
|
||||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets.
|
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping inbound packets.
|
||||||
// We still accept packages send from localhost to prevent monit from restarting kubelet.
|
// We still accept packages send from localhost to prevent monit from restarting kubelet.
|
||||||
@ -121,7 +121,7 @@ var _ = SIGDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
|
|||||||
testReboot(f.ClientSet, dropPacketsScript("INPUT", tmpLogPath), catLogHook(tmpLogPath))
|
testReboot(f.ClientSet, dropPacketsScript("INPUT", tmpLogPath), catLogHook(tmpLogPath))
|
||||||
})
|
})
|
||||||
|
|
||||||
It("each node by dropping all outbound packets for a while and ensure they function afterwards", func() {
|
ginkgo.It("each node by dropping all outbound packets for a while and ensure they function afterwards", func() {
|
||||||
// tell the firewall to drop all outbound packets for a while
|
// tell the firewall to drop all outbound packets for a while
|
||||||
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets.
|
// We sleep 10 seconds to give some time for ssh command to cleanly finish before starting dropping outbound packets.
|
||||||
// We still accept packages send to localhost to prevent monit from restarting kubelet.
|
// We still accept packages send to localhost to prevent monit from restarting kubelet.
|
||||||
|
@ -26,8 +26,8 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/common"
|
"k8s.io/kubernetes/test/e2e/common"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
|
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
|
||||||
@ -47,11 +47,11 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
|||||||
var ns string
|
var ns string
|
||||||
var group string
|
var group string
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
ns = f.Namespace.Name
|
ns = f.Namespace.Name
|
||||||
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
systemPods, err := framework.GetPodsInNamespace(c, ns, map[string]string{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
systemPodsNo = int32(len(systemPods))
|
systemPodsNo = int32(len(systemPods))
|
||||||
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
if strings.Index(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") >= 0 {
|
||||||
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
framework.Failf("Test dose not support cluster setup with more than one MIG: %s", framework.TestContext.CloudConfig.NodeInstanceGroup)
|
||||||
@ -61,23 +61,23 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
// Slow issue #13323 (8 min)
|
// Slow issue #13323 (8 min)
|
||||||
Describe("Resize [Slow]", func() {
|
ginkgo.Describe("Resize [Slow]", func() {
|
||||||
var originalNodeCount int32
|
var originalNodeCount int32
|
||||||
var skipped bool
|
var skipped bool
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
skipped = true
|
skipped = true
|
||||||
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
framework.SkipUnlessProviderIs("gce", "gke", "aws")
|
||||||
framework.SkipUnlessNodeCountIsAtLeast(2)
|
framework.SkipUnlessNodeCountIsAtLeast(2)
|
||||||
skipped = false
|
skipped = false
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if skipped {
|
if skipped {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
By("restoring the original node instance group size")
|
ginkgo.By("restoring the original node instance group size")
|
||||||
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
if err := framework.ResizeGroup(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||||
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
framework.Failf("Couldn't restore the original node instance group size: %v", err)
|
||||||
}
|
}
|
||||||
@ -90,7 +90,7 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
|||||||
//
|
//
|
||||||
// TODO(cjcullen) reduce this sleep (#19314)
|
// TODO(cjcullen) reduce this sleep (#19314)
|
||||||
if framework.ProviderIs("gke") {
|
if framework.ProviderIs("gke") {
|
||||||
By("waiting 5 minutes for all dead tunnels to be dropped")
|
ginkgo.By("waiting 5 minutes for all dead tunnels to be dropped")
|
||||||
time.Sleep(5 * time.Minute)
|
time.Sleep(5 * time.Minute)
|
||||||
}
|
}
|
||||||
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
if err := framework.WaitForGroupSize(group, int32(framework.TestContext.CloudConfig.NumNodes)); err != nil {
|
||||||
@ -102,67 +102,67 @@ var _ = SIGDescribe("Nodes [Disruptive]", func() {
|
|||||||
}
|
}
|
||||||
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
// Many e2e tests assume that the cluster is fully healthy before they start. Wait until
|
||||||
// the cluster is restored to health.
|
// the cluster is restored to health.
|
||||||
By("waiting for system pods to successfully restart")
|
ginkgo.By("waiting for system pods to successfully restart")
|
||||||
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
err := framework.WaitForPodsRunningReady(c, metav1.NamespaceSystem, systemPodsNo, 0, framework.PodReadyBeforeTimeout, map[string]string{})
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should be able to delete nodes", func() {
|
ginkgo.It("should be able to delete nodes", func() {
|
||||||
// Create a replication controller for a service that serves its hostname.
|
// Create a replication controller for a service that serves its hostname.
|
||||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||||
name := "my-hostname-delete-node"
|
name := "my-hostname-delete-node"
|
||||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
originalNodeCount = int32(numNodes)
|
originalNodeCount = int32(numNodes)
|
||||||
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
||||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
|
||||||
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
|
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes - 1)
|
||||||
By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes))
|
ginkgo.By(fmt.Sprintf("decreasing cluster size to %d", targetNumNodes))
|
||||||
err = framework.ResizeGroup(group, targetNumNodes)
|
err = framework.ResizeGroup(group, targetNumNodes)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
err = framework.WaitForGroupSize(group, targetNumNodes)
|
err = framework.WaitForGroupSize(group, targetNumNodes)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
err = framework.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute)
|
err = framework.WaitForReadyNodes(c, int(originalNodeCount-1), 10*time.Minute)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
|
||||||
By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " +
|
ginkgo.By("waiting 1 minute for the watch in the podGC to catch up, remove any pods scheduled on " +
|
||||||
"the now non-existent node and the RC to recreate it")
|
"the now non-existent node and the RC to recreate it")
|
||||||
time.Sleep(time.Minute)
|
time.Sleep(time.Minute)
|
||||||
|
|
||||||
By("verifying whether the pods from the removed node are recreated")
|
ginkgo.By("verifying whether the pods from the removed node are recreated")
|
||||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
})
|
})
|
||||||
|
|
||||||
// TODO: Bug here - testName is not correct
|
// TODO: Bug here - testName is not correct
|
||||||
It("should be able to add nodes", func() {
|
ginkgo.It("should be able to add nodes", func() {
|
||||||
// Create a replication controller for a service that serves its hostname.
|
// Create a replication controller for a service that serves its hostname.
|
||||||
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
// The source for the Docker container kubernetes/serve_hostname is in contrib/for-demos/serve_hostname
|
||||||
name := "my-hostname-add-node"
|
name := "my-hostname-add-node"
|
||||||
common.NewSVCByName(c, ns, name)
|
common.NewSVCByName(c, ns, name)
|
||||||
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
numNodes, err := framework.NumberOfRegisteredNodes(c)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
originalNodeCount = int32(numNodes)
|
originalNodeCount = int32(numNodes)
|
||||||
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
common.NewRCByName(c, ns, name, originalNodeCount, nil)
|
||||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
err = framework.VerifyPods(c, ns, name, true, originalNodeCount)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
|
||||||
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1)
|
targetNumNodes := int32(framework.TestContext.CloudConfig.NumNodes + 1)
|
||||||
By(fmt.Sprintf("increasing cluster size to %d", targetNumNodes))
|
ginkgo.By(fmt.Sprintf("increasing cluster size to %d", targetNumNodes))
|
||||||
err = framework.ResizeGroup(group, targetNumNodes)
|
err = framework.ResizeGroup(group, targetNumNodes)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
err = framework.WaitForGroupSize(group, targetNumNodes)
|
err = framework.WaitForGroupSize(group, targetNumNodes)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
err = framework.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute)
|
err = framework.WaitForReadyNodes(c, int(originalNodeCount+1), 10*time.Minute)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
|
||||||
By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1))
|
ginkgo.By(fmt.Sprintf("increasing size of the replication controller to %d and verifying all pods are running", originalNodeCount+1))
|
||||||
err = resizeRC(c, ns, name, originalNodeCount+1)
|
err = resizeRC(c, ns, name, originalNodeCount+1)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
err = framework.VerifyPods(c, ns, name, true, originalNodeCount+1)
|
err = framework.VerifyPods(c, ns, name, true, originalNodeCount+1)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -27,8 +27,8 @@ import (
|
|||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
testutils "k8s.io/kubernetes/test/utils"
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
"github.com/onsi/ginkgo"
|
||||||
. "github.com/onsi/gomega"
|
"github.com/onsi/gomega"
|
||||||
)
|
)
|
||||||
|
|
||||||
func nodeNames(nodes []v1.Node) []string {
|
func nodeNames(nodes []v1.Node) []string {
|
||||||
@ -47,23 +47,23 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
|
|||||||
var numNodes int
|
var numNodes int
|
||||||
var systemNamespace string
|
var systemNamespace string
|
||||||
|
|
||||||
BeforeEach(func() {
|
ginkgo.BeforeEach(func() {
|
||||||
// This test requires the ability to restart all nodes, so the provider
|
// This test requires the ability to restart all nodes, so the provider
|
||||||
// check must be identical to that call.
|
// check must be identical to that call.
|
||||||
framework.SkipUnlessProviderIs("gce", "gke")
|
framework.SkipUnlessProviderIs("gce", "gke")
|
||||||
var err error
|
var err error
|
||||||
ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
|
ps, err = testutils.NewPodStore(f.ClientSet, metav1.NamespaceSystem, labels.Everything(), fields.Everything())
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
numNodes, err = framework.NumberOfRegisteredNodes(f.ClientSet)
|
numNodes, err = framework.NumberOfRegisteredNodes(f.ClientSet)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
systemNamespace = metav1.NamespaceSystem
|
systemNamespace = metav1.NamespaceSystem
|
||||||
|
|
||||||
By("ensuring all nodes are ready")
|
ginkgo.By("ensuring all nodes are ready")
|
||||||
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
|
originalNodes, err = framework.CheckNodesReady(f.ClientSet, numNodes, framework.NodeReadyInitialTimeout)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
framework.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
|
framework.Logf("Got the following nodes before restart: %v", nodeNames(originalNodes))
|
||||||
|
|
||||||
By("ensuring all pods are running and ready")
|
ginkgo.By("ensuring all pods are running and ready")
|
||||||
allPods := ps.List()
|
allPods := ps.List()
|
||||||
pods := framework.FilterNonRestartablePods(allPods)
|
pods := framework.FilterNonRestartablePods(allPods)
|
||||||
|
|
||||||
@ -77,25 +77,25 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
ginkgo.AfterEach(func() {
|
||||||
if ps != nil {
|
if ps != nil {
|
||||||
ps.Stop()
|
ps.Stop()
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
It("should restart all nodes and ensure all nodes and pods recover", func() {
|
ginkgo.It("should restart all nodes and ensure all nodes and pods recover", func() {
|
||||||
By("restarting all of the nodes")
|
ginkgo.By("restarting all of the nodes")
|
||||||
err := common.RestartNodes(f.ClientSet, originalNodes)
|
err := common.RestartNodes(f.ClientSet, originalNodes)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
|
|
||||||
By("ensuring all nodes are ready after the restart")
|
ginkgo.By("ensuring all nodes are ready after the restart")
|
||||||
nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
|
nodesAfter, err := framework.CheckNodesReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
framework.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
|
framework.Logf("Got the following nodes after restart: %v", nodeNames(nodesAfter))
|
||||||
|
|
||||||
// Make sure that we have the same number of nodes. We're not checking
|
// Make sure that we have the same number of nodes. We're not checking
|
||||||
// that the names match because that's implementation specific.
|
// that the names match because that's implementation specific.
|
||||||
By("ensuring the same number of nodes exist after the restart")
|
ginkgo.By("ensuring the same number of nodes exist after the restart")
|
||||||
if len(originalNodes) != len(nodesAfter) {
|
if len(originalNodes) != len(nodesAfter) {
|
||||||
framework.Failf("Had %d nodes before nodes were restarted, but now only have %d",
|
framework.Failf("Had %d nodes before nodes were restarted, but now only have %d",
|
||||||
len(originalNodes), len(nodesAfter))
|
len(originalNodes), len(nodesAfter))
|
||||||
@ -104,10 +104,10 @@ var _ = SIGDescribe("Restart [Disruptive]", func() {
|
|||||||
// Make sure that we have the same number of pods. We're not checking
|
// Make sure that we have the same number of pods. We're not checking
|
||||||
// that the names match because they are recreated with different names
|
// that the names match because they are recreated with different names
|
||||||
// across node restarts.
|
// across node restarts.
|
||||||
By("ensuring the same number of pods are running and ready after restart")
|
ginkgo.By("ensuring the same number of pods are running and ready after restart")
|
||||||
podCheckStart := time.Now()
|
podCheckStart := time.Now()
|
||||||
podNamesAfter, err := framework.WaitForNRestartablePods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout)
|
podNamesAfter, err := framework.WaitForNRestartablePods(ps, len(originalPodNames), framework.RestartPodReadyAgainTimeout)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||||
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
|
remaining := framework.RestartPodReadyAgainTimeout - time.Since(podCheckStart)
|
||||||
if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) {
|
if !framework.CheckPodsRunningReadyOrSucceeded(f.ClientSet, systemNamespace, podNamesAfter, remaining) {
|
||||||
pods := ps.List()
|
pods := ps.List()
|
||||||
|
Loading…
Reference in New Issue
Block a user