Merge pull request #89787 from oomichi/NodeUpgrade

Move NodeUpgrade() into GCP e2e tests
This commit is contained in:
Kubernetes Prow Robot 2020-04-06 11:20:03 -07:00 committed by GitHub
commit 64d75b3bad
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 131 additions and 128 deletions

View File

@ -131,7 +131,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
start := time.Now()
defer finalizeUpgradeTest(start, nodeUpgradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.NodeUpgrade, upgradeFunc)
@ -152,7 +152,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(f, target))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
@ -181,7 +181,7 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
defer finalizeUpgradeTest(start, clusterDowngradeTest)
// Yes this really is a downgrade. And nodes must downgrade first.
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgrade(f, target))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
@ -251,7 +251,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(f, target))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
@ -269,7 +269,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
start := time.Now()
defer finalizeUpgradeTest(start, gpuDowngradeTest)
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgrade(f, target))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
@ -299,7 +299,7 @@ var _ = ginkgo.Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]",
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgrade(f, target))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(nodeUpgrade(f, target, *upgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, statefulsetUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
@ -334,7 +334,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, true))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, true))
framework.ExpectNoError(nodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, true))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
}
runUpgradeSuite(f, kubeProxyUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
@ -360,7 +360,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
defer finalizeUpgradeTest(start, kubeProxyDowngradeTest)
// Yes this really is a downgrade. And nodes must downgrade first.
target := upgCtx.Versions[1].Version.String()
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, false))
framework.ExpectNoError(nodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, false))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, false))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
@ -605,3 +605,113 @@ func checkNodesVersions(cs clientset.Interface, want string) error {
}
return nil
}
// nodeUpgrade upgrades nodes on GCE/GKE.
func nodeUpgrade(f *framework.Framework, v string, img string) error {
// Perform the upgrade.
var err error
switch framework.TestContext.Provider {
case "gce":
err = nodeUpgradeGCE(v, img, false)
case "gke":
err = nodeUpgradeGKE(f.Namespace.Name, v, img)
default:
err = fmt.Errorf("nodeUpgrade() is not implemented for provider %s", framework.TestContext.Provider)
}
if err != nil {
return err
}
return waitForNodesReadyAfterUpgrade(f)
}
// nodeUpgradeGCEWithKubeProxyDaemonSet upgrades nodes on GCE with enabling/disabling the daemon set of kube-proxy.
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
func nodeUpgradeGCEWithKubeProxyDaemonSet(f *framework.Framework, v string, img string, enableKubeProxyDaemonSet bool) error {
// Perform the upgrade.
if err := nodeUpgradeGCE(v, img, enableKubeProxyDaemonSet); err != nil {
return err
}
return waitForNodesReadyAfterUpgrade(f)
}
// TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default.
func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
v := "v" + rawV
env := append(os.Environ(), fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet))
if img != "" {
env = append(env, "KUBE_NODE_OS_DISTRIBUTION="+img)
_, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-N", "-o", v)
return err
}
_, _, err := framework.RunCmdEnv(env, framework.GCEUpgradeScript(), "-N", v)
return err
}
func nodeUpgradeGKE(namespace string, v string, img string) error {
framework.Logf("Upgrading nodes to version %q and image %q", v, img)
nps, err := nodePoolsGKE()
if err != nil {
return err
}
framework.Logf("Found node pools %v", nps)
for _, np := range nps {
args := []string{
"container",
"clusters",
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
framework.LocationParamGKE(),
"upgrade",
framework.TestContext.CloudConfig.Cluster,
fmt.Sprintf("--node-pool=%s", np),
fmt.Sprintf("--cluster-version=%s", v),
"--quiet",
}
if len(img) > 0 {
args = append(args, fmt.Sprintf("--image-type=%s", img))
}
_, _, err = framework.RunCmd("gcloud", framework.AppendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return err
}
framework.WaitForSSHTunnels(namespace)
}
return nil
}
func nodePoolsGKE() ([]string, error) {
args := []string{
"container",
"node-pools",
fmt.Sprintf("--project=%s", framework.TestContext.CloudConfig.ProjectID),
framework.LocationParamGKE(),
"list",
fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster),
"--format=get(name)",
}
stdout, _, err := framework.RunCmd("gcloud", framework.AppendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return nil, err
}
if len(strings.TrimSpace(stdout)) == 0 {
return []string{}, nil
}
return strings.Fields(stdout), nil
}
func waitForNodesReadyAfterUpgrade(f *framework.Framework) error {
// Wait for it to complete and validate nodes are healthy.
//
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
// GKE; the operation shouldn't return until they all are.
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
if err != nil {
return fmt.Errorf("couldn't detect number of nodes")
}
framework.Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", framework.RestartNodeReadyAgainTimeout, numNodes)
if _, err := e2enode.CheckReady(f.ClientSet, numNodes, framework.RestartNodeReadyAgainTimeout); err != nil {
return err
}
return nil
}

View File

@ -21,7 +21,6 @@ import (
"os"
"path"
"path/filepath"
"strings"
"sync"
"time"
@ -67,7 +66,7 @@ func etcdUpgradeGCE(targetStorage, targetVersion string) error {
"STORAGE_BACKEND="+targetStorage,
"TEST_ETCD_IMAGE="+etcdImage)
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-l", "-M")
_, _, err := RunCmdEnv(env, GCEUpgradeScript(), "-l", "-M")
return err
}
@ -93,11 +92,12 @@ func masterUpgradeGCE(rawV string, enableKubeProxyDaemonSet bool) error {
}
v := "v" + rawV
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-M", v)
_, _, err := RunCmdEnv(env, GCEUpgradeScript(), "-M", v)
return err
}
func locationParamGKE() string {
// LocationParamGKE returns parameter related to location for gcloud command.
func LocationParamGKE() string {
if TestContext.CloudConfig.MultiMaster {
// GKE Regional Clusters are being tested.
return fmt.Sprintf("--region=%s", TestContext.CloudConfig.Region)
@ -105,7 +105,8 @@ func locationParamGKE() string {
return fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone)
}
func appendContainerCommandGroupIfNeeded(args []string) []string {
// AppendContainerCommandGroupIfNeeded returns container command group parameter if necessary.
func AppendContainerCommandGroupIfNeeded(args []string) []string {
if TestContext.CloudConfig.Region != "" {
// TODO(wojtek-t): Get rid of it once Regional Clusters go to GA.
return append([]string{"beta"}, args...)
@ -120,19 +121,19 @@ func MasterUpgradeGKE(namespace string, v string) error {
"container",
"clusters",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
locationParamGKE(),
LocationParamGKE(),
"upgrade",
TestContext.CloudConfig.Cluster,
"--master",
fmt.Sprintf("--cluster-version=%s", v),
"--quiet",
}
_, _, err := RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
_, _, err := RunCmd("gcloud", AppendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return err
}
waitForSSHTunnels(namespace)
WaitForSSHTunnels(namespace)
return nil
}
@ -174,124 +175,16 @@ func masterUpgradeKubernetesAnywhere(v string) error {
return nil
}
// NodeUpgrade upgrades nodes on GCE/GKE.
func NodeUpgrade(f *Framework, v string, img string) error {
// Perform the upgrade.
var err error
switch TestContext.Provider {
case "gce":
err = nodeUpgradeGCE(v, img, false)
case "gke":
err = nodeUpgradeGKE(f.Namespace.Name, v, img)
default:
err = fmt.Errorf("NodeUpgrade() is not implemented for provider %s", TestContext.Provider)
}
if err != nil {
return err
}
return waitForNodesReadyAfterUpgrade(f)
}
// NodeUpgradeGCEWithKubeProxyDaemonSet upgrades nodes on GCE with enabling/disabling the daemon set of kube-proxy.
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
func NodeUpgradeGCEWithKubeProxyDaemonSet(f *Framework, v string, img string, enableKubeProxyDaemonSet bool) error {
// Perform the upgrade.
if err := nodeUpgradeGCE(v, img, enableKubeProxyDaemonSet); err != nil {
return err
}
return waitForNodesReadyAfterUpgrade(f)
}
func waitForNodesReadyAfterUpgrade(f *Framework) error {
// Wait for it to complete and validate nodes are healthy.
//
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
// GKE; the operation shouldn't return until they all are.
numNodes, err := e2enode.TotalRegistered(f.ClientSet)
if err != nil {
return fmt.Errorf("couldn't detect number of nodes")
}
Logf("Waiting up to %v for all %d nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout, numNodes)
if _, err := e2enode.CheckReady(f.ClientSet, numNodes, RestartNodeReadyAgainTimeout); err != nil {
return err
}
return nil
}
// TODO(mrhohn): Remove 'enableKubeProxyDaemonSet' when kube-proxy is run as a DaemonSet by default.
func nodeUpgradeGCE(rawV, img string, enableKubeProxyDaemonSet bool) error {
v := "v" + rawV
env := append(os.Environ(), fmt.Sprintf("KUBE_PROXY_DAEMONSET=%v", enableKubeProxyDaemonSet))
if img != "" {
env = append(env, "KUBE_NODE_OS_DISTRIBUTION="+img)
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-N", "-o", v)
return err
}
_, _, err := RunCmdEnv(env, gceUpgradeScript(), "-N", v)
return err
}
func nodeUpgradeGKE(namespace string, v string, img string) error {
Logf("Upgrading nodes to version %q and image %q", v, img)
nps, err := nodePoolsGKE()
if err != nil {
return err
}
Logf("Found node pools %v", nps)
for _, np := range nps {
args := []string{
"container",
"clusters",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
locationParamGKE(),
"upgrade",
TestContext.CloudConfig.Cluster,
fmt.Sprintf("--node-pool=%s", np),
fmt.Sprintf("--cluster-version=%s", v),
"--quiet",
}
if len(img) > 0 {
args = append(args, fmt.Sprintf("--image-type=%s", img))
}
_, _, err = RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return err
}
waitForSSHTunnels(namespace)
}
return nil
}
func nodePoolsGKE() ([]string, error) {
args := []string{
"container",
"node-pools",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
locationParamGKE(),
"list",
fmt.Sprintf("--cluster=%s", TestContext.CloudConfig.Cluster),
"--format=get(name)",
}
stdout, _, err := RunCmd("gcloud", appendContainerCommandGroupIfNeeded(args)...)
if err != nil {
return nil, err
}
if len(strings.TrimSpace(stdout)) == 0 {
return []string{}, nil
}
return strings.Fields(stdout), nil
}
func gceUpgradeScript() string {
// GCEUpgradeScript returns path of script for upgrading on GCE.
func GCEUpgradeScript() string {
if len(TestContext.GCEUpgradeScript) == 0 {
return path.Join(TestContext.RepoRoot, "cluster/gce/upgrade.sh")
}
return TestContext.GCEUpgradeScript
}
func waitForSSHTunnels(namespace string) {
// WaitForSSHTunnels waits for establishing SSH tunnel to busybox pod.
func WaitForSSHTunnels(namespace string) {
Logf("Waiting for SSH tunnels to establish")
RunKubectl(namespace, "run", "ssh-tunnel-test",
"--image=busybox",