mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-25 04:33:26 +00:00
Merge pull request #28575 from fejta/check-resources
Automatic merge from submit-queue Move kubemark, skew, upgrade, leak logic into hack/e2e.go And move out of the `e2e_runner.sh` Also make `start-kubemark.sh` automatically find `kubernetes-server-linux-amd64.tar.gz` instead of depending on a `RUN_FROM_DISTRO` env var @gmarek can you review the kubemark-related changes? @zmerlynn are you the right person to review upgrade changes? @spxtr take a look at the overall `e2e.go` changes. This is for https://github.com/kubernetes/kubernetes/issues/21843 and https://github.com/kubernetes/test-infra/pull/250 <!-- Reviewable:start --> --- This change is [<img src="https://reviewable.kubernetes.io/review_button.svg" height="34" align="absmiddle" alt="Reviewable"/>](https://reviewable.kubernetes.io/reviews/kubernetes/kubernetes/28575) <!-- Reviewable:end -->
This commit is contained in:
commit
012eb941d6
@ -1,32 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Copyright 2015 The Kubernetes Authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
set -o errexit
|
|
||||||
set -o nounset
|
|
||||||
set -o pipefail
|
|
||||||
|
|
||||||
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
|
|
||||||
|
|
||||||
: ${KUBECTL:=${KUBE_ROOT}/cluster/kubectl.sh}
|
|
||||||
: ${KUBE_CONFIG_FILE:="config-test.sh"}
|
|
||||||
|
|
||||||
export KUBECTL KUBE_CONFIG_FILE
|
|
||||||
|
|
||||||
source "${KUBE_ROOT}/cluster/kube-util.sh"
|
|
||||||
|
|
||||||
prepare-e2e
|
|
||||||
|
|
||||||
test-build-release
|
|
356
hack/e2e.go
356
hack/e2e.go
@ -19,10 +19,11 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"flag"
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
"log"
|
"log"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
@ -30,52 +31,51 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
isup = flag.Bool("isup", false, "Check to see if the e2e cluster is up, then exit.")
|
|
||||||
build = flag.Bool("build", false, "If true, build a new release. Otherwise, use whatever is there.")
|
build = flag.Bool("build", false, "If true, build a new release. Otherwise, use whatever is there.")
|
||||||
up = flag.Bool("up", false, "If true, start the the e2e cluster. If cluster is already up, recreate it.")
|
|
||||||
push = flag.Bool("push", false, "If true, push to e2e cluster. Has no effect if -up is true.")
|
|
||||||
pushup = flag.Bool("pushup", false, "If true, push to e2e cluster if it's up, otherwise start the e2e cluster.")
|
|
||||||
down = flag.Bool("down", false, "If true, tear down the cluster before exiting.")
|
|
||||||
test = flag.Bool("test", false, "Run Ginkgo tests.")
|
|
||||||
testArgs = flag.String("test_args", "", "Space-separated list of arguments to pass to Ginkgo test runner.")
|
|
||||||
root = flag.String("root", absOrDie(filepath.Clean(filepath.Join(path.Base(os.Args[0]), ".."))), "Root directory of kubernetes repository.")
|
|
||||||
verbose = flag.Bool("v", false, "If true, print all command output.")
|
|
||||||
checkVersionSkew = flag.Bool("check_version_skew", true, ""+
|
|
||||||
"By default, verify that client and server have exact version match. "+
|
|
||||||
"You can explicitly set to false if you're, e.g., testing client changes "+
|
|
||||||
"for which the server version doesn't make a difference.")
|
|
||||||
checkNodeCount = flag.Bool("check_node_count", true, ""+
|
checkNodeCount = flag.Bool("check_node_count", true, ""+
|
||||||
"By default, verify that the cluster has at least two nodes."+
|
"By default, verify that the cluster has at least two nodes."+
|
||||||
"You can explicitly set to false if you're, e.g., testing single-node clusters "+
|
"You can explicitly set to false if you're, e.g., testing single-node clusters "+
|
||||||
"for which the node count is supposed to be one.")
|
"for which the node count is supposed to be one.")
|
||||||
|
checkVersionSkew = flag.Bool("check_version_skew", true, ""+
|
||||||
ctlCmd = flag.String("ctl", "", "If nonempty, pass this as an argument, and call kubectl. Implies -v. (-test, -cfg, -ctl are mutually exclusive)")
|
"By default, verify that client and server have exact version match. "+
|
||||||
|
"You can explicitly set to false if you're, e.g., testing client changes "+
|
||||||
|
"for which the server version doesn't make a difference.")
|
||||||
|
checkLeakedResources = flag.Bool("check_leaked_resources", false, "Ensure project ends with the same resources")
|
||||||
|
ctlCmd = flag.String("ctl", "", "If nonempty, pass this as an argument, and call kubectl. Implies -v.")
|
||||||
|
down = flag.Bool("down", false, "If true, tear down the cluster before exiting.")
|
||||||
|
dump = flag.String("dump", "", "If set, dump cluster logs to this location")
|
||||||
|
kubemark = flag.Bool("kubemark", false, "If true, run kubemark tests.")
|
||||||
|
isup = flag.Bool("isup", false, "Check to see if the e2e cluster is up, then exit.")
|
||||||
|
push = flag.Bool("push", false, "If true, push to e2e cluster. Has no effect if -up is true.")
|
||||||
|
pushup = flag.Bool("pushup", false, "If true, push to e2e cluster if it's up, otherwise start the e2e cluster.")
|
||||||
|
skewTests = flag.Bool("skew", false, "If true, run tests in another version at ../kubernetes/hack/e2e.go")
|
||||||
|
testArgs = flag.String("test_args", "", "Space-separated list of arguments to pass to Ginkgo test runner.")
|
||||||
|
test = flag.Bool("test", false, "Run Ginkgo tests.")
|
||||||
|
up = flag.Bool("up", false, "If true, start the the e2e cluster. If cluster is already up, recreate it.")
|
||||||
|
upgradeArgs = flag.String("upgrade_args", "", "If set, run upgrade tests before other tests")
|
||||||
|
verbose = flag.Bool("v", false, "If true, print all command output.")
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
minNodeCount = 2
|
minNodeCount = 2
|
||||||
)
|
)
|
||||||
|
|
||||||
func absOrDie(path string) string {
|
|
||||||
out, err := filepath.Abs(path)
|
|
||||||
if err != nil {
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
type TestResult struct {
|
|
||||||
Pass int
|
|
||||||
Fail int
|
|
||||||
}
|
|
||||||
|
|
||||||
type ResultsByTest map[string]TestResult
|
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
log.SetFlags(log.LstdFlags | log.Lshortfile)
|
||||||
flag.Parse()
|
flag.Parse()
|
||||||
|
|
||||||
os.Setenv("KUBECTL", *root+`/cluster/kubectl.sh`+kubectlArgs())
|
cwd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Could not get pwd: %v", err)
|
||||||
|
}
|
||||||
|
acwd, err := filepath.Abs(cwd)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to convert to an absolute path: %v", err)
|
||||||
|
}
|
||||||
|
if !strings.Contains(filepath.Base(acwd), "kubernetes") {
|
||||||
|
// TODO(fejta): cd up into the kubernetes directory
|
||||||
|
log.Fatalf("Must run from kubernetes directory: %v", cwd)
|
||||||
|
}
|
||||||
|
|
||||||
if *isup {
|
if *isup {
|
||||||
status := 1
|
status := 1
|
||||||
@ -91,13 +91,29 @@ func main() {
|
|||||||
if *build {
|
if *build {
|
||||||
// The build-release script needs stdin to ask the user whether
|
// The build-release script needs stdin to ask the user whether
|
||||||
// it's OK to download the docker image.
|
// it's OK to download the docker image.
|
||||||
cmd := exec.Command(path.Join(*root, "hack/e2e-internal/build-release.sh"))
|
cmd := exec.Command("make", "quick-release")
|
||||||
cmd.Stdin = os.Stdin
|
cmd.Stdin = os.Stdin
|
||||||
if !finishRunning("build-release", cmd) {
|
if !finishRunning("build-release", cmd) {
|
||||||
log.Fatal("Error building. Aborting.")
|
log.Fatal("Error building. Aborting.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if *up && !TearDown() {
|
||||||
|
log.Fatal("Could not tear down previous cluster")
|
||||||
|
}
|
||||||
|
|
||||||
|
beforeResources := ""
|
||||||
|
if *checkLeakedResources {
|
||||||
|
beforeResources = ListResources()
|
||||||
|
}
|
||||||
|
|
||||||
|
os.Setenv("KUBECTL", strings.Join(append([]string{"./cluster/kubectl.sh"}, kubectlArgs()...), " "))
|
||||||
|
|
||||||
|
if *upgradeArgs != "" { // Start the cluster using a previous version.
|
||||||
|
if !UpgradeUp() {
|
||||||
|
log.Fatal("Failed to start cluster to upgrade. Aborting.")
|
||||||
|
}
|
||||||
|
} else { // Start the cluster using this version.
|
||||||
if *pushup {
|
if *pushup {
|
||||||
if IsUp() {
|
if IsUp() {
|
||||||
log.Printf("e2e cluster is up, pushing.")
|
log.Printf("e2e cluster is up, pushing.")
|
||||||
@ -114,23 +130,58 @@ func main() {
|
|||||||
log.Fatal("Error starting e2e cluster. Aborting.")
|
log.Fatal("Error starting e2e cluster. Aborting.")
|
||||||
}
|
}
|
||||||
} else if *push {
|
} else if *push {
|
||||||
if !finishRunning("push", exec.Command(path.Join(*root, "hack/e2e-internal/e2e-push.sh"))) {
|
if !finishRunning("push", exec.Command("./hack/e2e-internal/e2e-push.sh")) {
|
||||||
log.Fatal("Error pushing e2e cluster. Aborting.")
|
log.Fatal("Error pushing e2e cluster. Aborting.")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
upResources := ""
|
||||||
|
if *checkLeakedResources {
|
||||||
|
upResources = ListResources()
|
||||||
|
}
|
||||||
|
|
||||||
success := true
|
success := true
|
||||||
switch {
|
|
||||||
case *ctlCmd != "":
|
if *ctlCmd != "" {
|
||||||
ctlArgs := strings.Fields(*ctlCmd)
|
ctlArgs := strings.Fields(*ctlCmd)
|
||||||
os.Setenv("KUBE_CONFIG_FILE", "config-test.sh")
|
os.Setenv("KUBE_CONFIG_FILE", "config-test.sh")
|
||||||
success = finishRunning("'kubectl "+*ctlCmd+"'", exec.Command(path.Join(*root, "cluster/kubectl.sh"), ctlArgs...))
|
ctlSuccess := finishRunning("'kubectl "+*ctlCmd+"'", exec.Command("./cluster/kubectl.sh", ctlArgs...))
|
||||||
case *test:
|
success = success && ctlSuccess
|
||||||
success = Test()
|
}
|
||||||
|
|
||||||
|
if *upgradeArgs != "" {
|
||||||
|
upgradeSuccess := UpgradeTest(*upgradeArgs)
|
||||||
|
success = success && upgradeSuccess
|
||||||
|
}
|
||||||
|
|
||||||
|
if *test {
|
||||||
|
if *skewTests {
|
||||||
|
skewSuccess := SkewTest()
|
||||||
|
success = success && skewSuccess
|
||||||
|
} else {
|
||||||
|
testSuccess := Test()
|
||||||
|
success = success && testSuccess
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if *kubemark {
|
||||||
|
kubeSuccess := KubemarkTest()
|
||||||
|
success = success && kubeSuccess
|
||||||
}
|
}
|
||||||
|
|
||||||
if *down {
|
if *down {
|
||||||
TearDown()
|
if !success && *dump != "" {
|
||||||
|
DumpClusterLogs(*dump)
|
||||||
|
}
|
||||||
|
tearSuccess := TearDown()
|
||||||
|
success = success && tearSuccess
|
||||||
|
}
|
||||||
|
|
||||||
|
if *checkLeakedResources {
|
||||||
|
log.Print("Sleeping for 30 seconds...") // Wait for eventually consistent listing
|
||||||
|
time.Sleep(30 * time.Second)
|
||||||
|
DiffResources(beforeResources, upResources, ListResources(), *dump)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !success {
|
if !success {
|
||||||
@ -138,29 +189,86 @@ func main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func writeOrDie(dir, name, data string) string {
|
||||||
|
f, err := os.Create(filepath.Join(dir, name))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if _, err := f.WriteString(data); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
if err := f.Close(); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
log.Printf("Created file: %s", f.Name())
|
||||||
|
return f.Name()
|
||||||
|
}
|
||||||
|
|
||||||
|
func DiffResources(before, clusterUp, after, location string) {
|
||||||
|
if location == "" {
|
||||||
|
var err error
|
||||||
|
location, err = ioutil.TempDir("", "e2e-check-resources")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
bp := writeOrDie(location, "gcp-resources-before.txt", before)
|
||||||
|
writeOrDie(location, "gcp-resources-cluster-up.txt", clusterUp)
|
||||||
|
ap := writeOrDie(location, "gcp-resources-after.txt", after)
|
||||||
|
|
||||||
|
cmd := exec.Command("diff", "-sw", "-U0", "-F^\\[.*\\]$", bp, ap)
|
||||||
|
if *verbose {
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
}
|
||||||
|
o, err := cmd.Output()
|
||||||
|
stdout := string(o)
|
||||||
|
writeOrDie(location, "gcp-resources-diff.txt", stdout)
|
||||||
|
if err == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
lines := strings.Split(stdout, "\n")
|
||||||
|
if len(lines) < 3 { // Ignore the +++ and --- header lines
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var added []string
|
||||||
|
for _, l := range lines {
|
||||||
|
if strings.HasPrefix(l, "+") {
|
||||||
|
added = append(added, l)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(added) > 0 {
|
||||||
|
log.Printf("Error: %d leaked resources", len(added))
|
||||||
|
log.Fatal(strings.Join(added, "\n"))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ListResources() string {
|
||||||
|
log.Printf("Listing resources...")
|
||||||
|
cmd := exec.Command("./cluster/gce/list-resources.sh")
|
||||||
|
if *verbose {
|
||||||
|
cmd.Stderr = os.Stderr
|
||||||
|
}
|
||||||
|
stdout, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("Failed to list resources (%s):\n%s", err, stdout)
|
||||||
|
}
|
||||||
|
return string(stdout)
|
||||||
|
}
|
||||||
|
|
||||||
func TearDown() bool {
|
func TearDown() bool {
|
||||||
return finishRunning("teardown", exec.Command(path.Join(*root, "hack/e2e-internal/e2e-down.sh")))
|
return finishRunning("teardown", exec.Command("./hack/e2e-internal/e2e-down.sh"))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Up brings an e2e cluster up, recreating it if one is already running.
|
// Up brings an e2e cluster up, recreating it if one is already running.
|
||||||
func Up() bool {
|
func Up() bool {
|
||||||
if IsUp() {
|
return finishRunning("up", exec.Command("./hack/e2e-internal/e2e-up.sh"))
|
||||||
log.Printf("e2e cluster already running; will teardown")
|
|
||||||
if res := TearDown(); !res {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return finishRunning("up", exec.Command(path.Join(*root, "hack/e2e-internal/e2e-up.sh")))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure that the cluster is large engough to run the e2e tests.
|
// Ensure that the cluster is large engough to run the e2e tests.
|
||||||
func ValidateClusterSize() {
|
func ValidateClusterSize() {
|
||||||
if os.Getenv("FEDERATION") == "true" {
|
|
||||||
//TODO(colhom): federated equivalent of ValidateClusterSize
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Check that there are at least minNodeCount nodes running
|
// Check that there are at least minNodeCount nodes running
|
||||||
cmd := exec.Command(path.Join(*root, "hack/e2e-internal/e2e-cluster-size.sh"))
|
cmd := exec.Command("./hack/e2e-internal/e2e-cluster-size.sh")
|
||||||
if *verbose {
|
if *verbose {
|
||||||
cmd.Stderr = os.Stderr
|
cmd.Stderr = os.Stderr
|
||||||
}
|
}
|
||||||
@ -181,7 +289,128 @@ func ValidateClusterSize() {
|
|||||||
|
|
||||||
// Is the e2e cluster up?
|
// Is the e2e cluster up?
|
||||||
func IsUp() bool {
|
func IsUp() bool {
|
||||||
return finishRunning("get status", exec.Command(path.Join(*root, "hack/e2e-internal/e2e-status.sh")))
|
return finishRunning("get status", exec.Command("./hack/e2e-internal/e2e-status.sh"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func DumpClusterLogs(location string) {
|
||||||
|
log.Printf("Dumping cluster logs to: %v", location)
|
||||||
|
finishRunning("dump cluster logs", exec.Command("./cluster/log-dump.sh", location))
|
||||||
|
}
|
||||||
|
|
||||||
|
func KubemarkTest() bool {
|
||||||
|
// Stop previous run
|
||||||
|
if !finishRunning("Stop kubemark", exec.Command("./test/kubemark/stop-kubemark.sh")) {
|
||||||
|
log.Print("stop kubemark failed")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start new run
|
||||||
|
backups := []string{"NUM_NODES", "MASTER_SIZE"}
|
||||||
|
for _, item := range backups {
|
||||||
|
old, present := os.LookupEnv(item)
|
||||||
|
if present {
|
||||||
|
defer os.Setenv(item, old)
|
||||||
|
} else {
|
||||||
|
defer os.Unsetenv(item)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
os.Setenv("NUM_NODES", os.Getenv("KUBEMARK_NUM_NODES"))
|
||||||
|
os.Setenv("MASTER_SIZE", os.Getenv("KUBEMARK_MASTER_SIZE"))
|
||||||
|
if !finishRunning("Start Kubemark", exec.Command("./test/kubemark/start-kubemark.sh")) {
|
||||||
|
log.Print("Error: start kubemark failed")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run kubemark tests
|
||||||
|
focus, present := os.LookupEnv("KUBEMARK_TESTS")
|
||||||
|
if !present {
|
||||||
|
focus = "starting\\s30\\pods"
|
||||||
|
}
|
||||||
|
test_args := os.Getenv("KUBEMARK_TEST_ARGS")
|
||||||
|
|
||||||
|
if !finishRunning("Run kubemark tests", exec.Command("./test/kubemark/run-e2e-tests.sh", "--ginkgo.focus="+focus, test_args)) {
|
||||||
|
log.Print("Error: run kubemark tests failed")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop kubemark
|
||||||
|
if !finishRunning("Stop kubemark", exec.Command("./test/kubemark/stop-kubemark.sh")) {
|
||||||
|
log.Print("Error: stop kubemark failed")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func UpgradeUp() bool {
|
||||||
|
old, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to os.Getwd(): %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer os.Chdir(old)
|
||||||
|
err = os.Chdir("../kubernetes_skew")
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to cd ../kubernetes_skew: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return finishRunning("UpgradeUp",
|
||||||
|
exec.Command(
|
||||||
|
"go", "run", "./hack/e2e.go",
|
||||||
|
fmt.Sprintf("--check_version_skew=%t", *checkVersionSkew),
|
||||||
|
fmt.Sprintf("--push=%t", *push),
|
||||||
|
fmt.Sprintf("--pushup=%t", *pushup),
|
||||||
|
fmt.Sprintf("--up=%t", *up),
|
||||||
|
fmt.Sprintf("--v=%t", *verbose),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
func UpgradeTest(args string) bool {
|
||||||
|
old, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to os.Getwd(): %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer os.Chdir(old)
|
||||||
|
err = os.Chdir("../kubernetes_skew")
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to cd ../kubernetes_skew: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
previous, present := os.LookupEnv("E2E_REPORT_PREFIX")
|
||||||
|
if present {
|
||||||
|
defer os.Setenv("E2E_REPORT_PREFIX", previous)
|
||||||
|
} else {
|
||||||
|
defer os.Unsetenv("E2E_REPORT_PREFIX")
|
||||||
|
}
|
||||||
|
os.Setenv("E2E_REPORT_PREFIX", "upgrade")
|
||||||
|
return finishRunning("Upgrade Ginkgo tests",
|
||||||
|
exec.Command(
|
||||||
|
"go", "run", "./hack/e2e.go",
|
||||||
|
"--test",
|
||||||
|
"--test_args="+args,
|
||||||
|
fmt.Sprintf("--v=%t", *verbose),
|
||||||
|
fmt.Sprintf("--check_version_skew=%t", *checkVersionSkew)))
|
||||||
|
}
|
||||||
|
|
||||||
|
func SkewTest() bool {
|
||||||
|
old, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to Getwd: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
defer os.Chdir(old)
|
||||||
|
err = os.Chdir("../kubernetes_skew")
|
||||||
|
if err != nil {
|
||||||
|
log.Printf("Failed to cd ../kubernetes_skew: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return finishRunning("Skewed Ginkgo tests",
|
||||||
|
exec.Command(
|
||||||
|
"go", "run", "./hack/e2e.go",
|
||||||
|
"--test",
|
||||||
|
"--test_args="+*testArgs,
|
||||||
|
fmt.Sprintf("--v=%t", *verbose),
|
||||||
|
fmt.Sprintf("--check_version_skew=%t", *checkVersionSkew)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test() bool {
|
func Test() bool {
|
||||||
@ -189,18 +418,18 @@ func Test() bool {
|
|||||||
log.Fatal("Testing requested, but e2e cluster not up!")
|
log.Fatal("Testing requested, but e2e cluster not up!")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(fejta): add a --federated or something similar
|
||||||
|
if os.Getenv("FEDERATION") == "" {
|
||||||
if *checkNodeCount {
|
if *checkNodeCount {
|
||||||
ValidateClusterSize()
|
ValidateClusterSize()
|
||||||
}
|
}
|
||||||
|
return finishRunning("Ginkgo tests", exec.Command("./hack/ginkgo-e2e.sh", strings.Fields(*testArgs)...))
|
||||||
|
}
|
||||||
|
|
||||||
if os.Getenv("FEDERATION") == "true" {
|
|
||||||
if *testArgs == "" {
|
if *testArgs == "" {
|
||||||
*testArgs = "--ginkgo.focus=\\[Feature:Federation\\]"
|
*testArgs = "--ginkgo.focus=\\[Feature:Federation\\]"
|
||||||
}
|
}
|
||||||
return finishRunning("Federated Ginkgo tests", exec.Command(filepath.Join(*root, "hack/federated-ginkgo-e2e.sh"), strings.Fields(*testArgs)...))
|
return finishRunning("Federated Ginkgo tests", exec.Command("./hack/federated-ginkgo-e2e.sh", strings.Fields(*testArgs)...))
|
||||||
} else {
|
|
||||||
return finishRunning("Ginkgo tests", exec.Command(filepath.Join(*root, "hack/ginkgo-e2e.sh"), strings.Fields(*testArgs)...))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func finishRunning(stepName string, cmd *exec.Cmd) bool {
|
func finishRunning(stepName string, cmd *exec.Cmd) bool {
|
||||||
@ -222,10 +451,9 @@ func finishRunning(stepName string, cmd *exec.Cmd) bool {
|
|||||||
|
|
||||||
// returns either "", or a list of args intended for appending with the
|
// returns either "", or a list of args intended for appending with the
|
||||||
// kubectl command (beginning with a space).
|
// kubectl command (beginning with a space).
|
||||||
func kubectlArgs() string {
|
func kubectlArgs() []string {
|
||||||
args := []string{""}
|
if !*checkVersionSkew {
|
||||||
if *checkVersionSkew {
|
return []string{}
|
||||||
args = append(args, "--match-server-version")
|
|
||||||
}
|
}
|
||||||
return strings.Join(args, " ")
|
return []string{"--match-server-version"}
|
||||||
}
|
}
|
||||||
|
@ -161,35 +161,6 @@ function install_google_cloud_sdk_tarball() {
|
|||||||
export PATH=${install_dir}/google-cloud-sdk/bin:${PATH}
|
export PATH=${install_dir}/google-cloud-sdk/bin:${PATH}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Only call after attempting to bring the cluster up. Don't call after
|
|
||||||
# bringing the cluster down.
|
|
||||||
function dump_cluster_logs_and_exit() {
|
|
||||||
local -r exit_status=$?
|
|
||||||
dump_cluster_logs
|
|
||||||
if [[ "${USE_KUBEMARK:-}" == "true" ]]; then
|
|
||||||
# If we tried to bring the Kubemark cluster up, make a courtesy
|
|
||||||
# attempt to bring it down so we're not leaving resources around.
|
|
||||||
./test/kubemark/stop-kubemark.sh || true
|
|
||||||
fi
|
|
||||||
if [[ "${E2E_DOWN,,}" == "true" ]]; then
|
|
||||||
# If we tried to bring the cluster up, make a courtesy attempt
|
|
||||||
# to bring the cluster down so we're not leaving resources
|
|
||||||
# around. Unlike later, don't sleep beforehand, though. (We're
|
|
||||||
# just trying to tear down as many resources as we can as fast
|
|
||||||
# as possible and don't even know if we brought the master up.)
|
|
||||||
go run ./hack/e2e.go ${E2E_OPT:-} -v --down || true
|
|
||||||
fi
|
|
||||||
exit ${exit_status}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Only call after attempting to bring the cluster up. Don't call after
|
|
||||||
# bringing the cluster down.
|
|
||||||
function dump_cluster_logs() {
|
|
||||||
if [[ -x "cluster/log-dump.sh" ]]; then
|
|
||||||
./cluster/log-dump.sh "${ARTIFACTS}"
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Figures out the builtin k8s version of a GCI image.
|
# Figures out the builtin k8s version of a GCI image.
|
||||||
function get_gci_k8s_version() {
|
function get_gci_k8s_version() {
|
||||||
local -r image_description=$(gcloud compute images describe ${KUBE_GCE_MASTER_IMAGE} --project=${KUBE_GCE_MASTER_PROJECT})
|
local -r image_description=$(gcloud compute images describe ${KUBE_GCE_MASTER_IMAGE} --project=${KUBE_GCE_MASTER_PROJECT})
|
||||||
@ -265,24 +236,6 @@ elif [[ -n "${KUBEKINS_SERVICE_ACCOUNT_FILE:-}" ]]; then
|
|||||||
echo "ERROR: cannot access service account file at: ${KUBEKINS_SERVICE_ACCOUNT_FILE}"
|
echo "ERROR: cannot access service account file at: ${KUBEKINS_SERVICE_ACCOUNT_FILE}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
function e2e_test() {
|
|
||||||
local -r ginkgo_test_args="${1}"
|
|
||||||
# Check to make sure the cluster is up before running tests, and fail if it's not.
|
|
||||||
go run ./hack/e2e.go ${E2E_OPT:-} -v --isup
|
|
||||||
# Jenkins will look at the junit*.xml files for test failures, so don't exit with a nonzero
|
|
||||||
# error code if it was only tests that failed.
|
|
||||||
go run ./hack/e2e.go ${E2E_OPT:-} -v --test \
|
|
||||||
${ginkgo_test_args:+--test_args="${ginkgo_test_args}"} \
|
|
||||||
&& exitcode=0 || exitcode=$?
|
|
||||||
if [[ "${E2E_PUBLISH_GREEN_VERSION:-}" == "true" && ${exitcode} == 0 ]]; then
|
|
||||||
# Use plaintext version file packaged with kubernetes.tar.gz
|
|
||||||
echo "Publish version to ci/latest-green.txt: $(cat version)"
|
|
||||||
gsutil cp ./version "gs://${KUBE_GCS_DEV_RELEASE_BUCKET}/ci/latest-green.txt"
|
|
||||||
fi
|
|
||||||
return ${exitcode}
|
|
||||||
}
|
|
||||||
|
|
||||||
echo "--------------------------------------------------------------------------------"
|
echo "--------------------------------------------------------------------------------"
|
||||||
echo "Test Environment:"
|
echo "Test Environment:"
|
||||||
printenv | sort
|
printenv | sort
|
||||||
@ -360,47 +313,6 @@ case "${KUBERNETES_PROVIDER}" in
|
|||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
|
||||||
cd kubernetes
|
|
||||||
|
|
||||||
# Upload build start time and k8s version to GCS, but not on PR Jenkins.
|
|
||||||
# On PR Jenkins this is done before the build.
|
|
||||||
if [[ ! "${JOB_NAME}" =~ -pull- ]]; then
|
|
||||||
JENKINS_BUILD_STARTED=true bash <(curl -fsS --retry 3 --keepalive-time 2 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/upload-to-gcs.sh")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# When run inside Docker, we need to make sure all files are world-readable
|
|
||||||
# (since they will be owned by root on the host).
|
|
||||||
trap "chmod -R o+r '${ARTIFACTS}'" EXIT SIGINT SIGTERM
|
|
||||||
export E2E_REPORT_DIR=${ARTIFACTS}
|
|
||||||
declare -r gcp_list_resources_script="./cluster/gce/list-resources.sh"
|
|
||||||
declare -r gcp_resources_before="${ARTIFACTS}/gcp-resources-before.txt"
|
|
||||||
declare -r gcp_resources_cluster_up="${ARTIFACTS}/gcp-resources-cluster-up.txt"
|
|
||||||
declare -r gcp_resources_after="${ARTIFACTS}/gcp-resources-after.txt"
|
|
||||||
if [[ ( ${KUBERNETES_PROVIDER} == "gce" || ${KUBERNETES_PROVIDER} == "gke" ) && -x "${gcp_list_resources_script}" ]]; then
|
|
||||||
gcp_list_resources="true"
|
|
||||||
# Always pull the script from HEAD, overwriting the local one if it exists.
|
|
||||||
# We do this to pick up fixes if we are running tests from a branch or tag.
|
|
||||||
curl -fsS --retry 3 --keepalive-time 2 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/cluster/gce/list-resources.sh" > "${gcp_list_resources_script}"
|
|
||||||
else
|
|
||||||
gcp_list_resources="false"
|
|
||||||
fi
|
|
||||||
|
|
||||||
### Set up ###
|
|
||||||
if [[ "${E2E_UP,,}" == "true" ]]; then
|
|
||||||
go run ./hack/e2e.go ${E2E_OPT:-} -v --down
|
|
||||||
fi
|
|
||||||
if [[ "${gcp_list_resources}" == "true" ]]; then
|
|
||||||
${gcp_list_resources_script} > "${gcp_resources_before}"
|
|
||||||
fi
|
|
||||||
if [[ "${E2E_UP,,}" == "true" ]]; then
|
|
||||||
# We want to try to gather logs even if kube-up fails, so collect the
|
|
||||||
# result here and fail after dumping logs if it's nonzero.
|
|
||||||
go run ./hack/e2e.go ${E2E_OPT:-} -v --up || dump_cluster_logs_and_exit
|
|
||||||
go run ./hack/e2e.go -v --ctl="version --match-server-version=false"
|
|
||||||
if [[ "${gcp_list_resources}" == "true" ]]; then
|
|
||||||
${gcp_list_resources_script} > "${gcp_resources_cluster_up}"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Allow download & unpack of alternate version of tests, for cross-version & upgrade testing.
|
# Allow download & unpack of alternate version of tests, for cross-version & upgrade testing.
|
||||||
#
|
#
|
||||||
@ -414,87 +326,75 @@ fi
|
|||||||
# JENKINS_USE_SKEW_TESTS=true will run tests from the skewed version rather
|
# JENKINS_USE_SKEW_TESTS=true will run tests from the skewed version rather
|
||||||
# than the original version.
|
# than the original version.
|
||||||
if [[ -n "${JENKINS_PUBLISHED_SKEW_VERSION:-}" ]]; then
|
if [[ -n "${JENKINS_PUBLISHED_SKEW_VERSION:-}" ]]; then
|
||||||
cd ..
|
mv kubernetes kubernetes_orig
|
||||||
mv kubernetes kubernetes_old
|
|
||||||
fetch_published_version_tars "${JENKINS_PUBLISHED_SKEW_VERSION}"
|
fetch_published_version_tars "${JENKINS_PUBLISHED_SKEW_VERSION}"
|
||||||
cd kubernetes
|
mv kubernetes kubernetes_skew
|
||||||
# Upgrade the cluster before running other tests
|
mv kubernetes_orig kubernetes
|
||||||
if [[ "${E2E_UPGRADE_TEST:-}" == "true" ]]; then
|
|
||||||
# Add a report prefix for the e2e tests so that the tests don't get overwritten when we run
|
|
||||||
# the rest of the e2es.
|
|
||||||
E2E_REPORT_PREFIX='upgrade' e2e_test "${GINKGO_UPGRADE_TEST_ARGS:-}" || EXIT_CODE=1
|
|
||||||
fi
|
|
||||||
if [[ "${JENKINS_USE_SKEW_TESTS:-}" != "true" ]]; then
|
if [[ "${JENKINS_USE_SKEW_TESTS:-}" != "true" ]]; then
|
||||||
# Back out into the old tests now that we've downloaded & maybe upgraded.
|
|
||||||
cd ../kubernetes_old
|
|
||||||
# Append kubectl-path of skewed kubectl to test args, since we always
|
# Append kubectl-path of skewed kubectl to test args, since we always
|
||||||
# want that to use the skewed kubectl version:
|
# # want that to use the skewed kubectl version:
|
||||||
#
|
# #
|
||||||
# - for upgrade jobs, we want kubectl to be at the same version as master.
|
# # - for upgrade jobs, we want kubectl to be at the same version as
|
||||||
# - for client skew tests, we want to use the skewed kubectl (that's what we're testing).
|
# master.
|
||||||
GINKGO_TEST_ARGS="${GINKGO_TEST_ARGS:-} --kubectl-path=$(pwd)/../kubernetes/cluster/kubectl.sh"
|
# # - for client skew tests, we want to use the skewed kubectl
|
||||||
|
# (that's what we're testing).
|
||||||
|
GINKGO_TEST_ARGS="${GINKGO_TEST_ARGS:-} --kubectl-path=$(pwd)/../kubernetes_skew/cluster/kubectl.sh"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
cd kubernetes
|
||||||
|
|
||||||
|
# Upload build start time and k8s version to GCS, but not on PR Jenkins.
|
||||||
|
# On PR Jenkins this is done before the build.
|
||||||
|
if [[ ! "${JOB_NAME}" =~ -pull- ]]; then
|
||||||
|
JENKINS_BUILD_STARTED=true bash <(curl -fsS --retry 3 --keepalive-time 2 "https://raw.githubusercontent.com/kubernetes/kubernetes/master/hack/jenkins/upload-to-gcs.sh")
|
||||||
|
fi
|
||||||
|
|
||||||
|
# When run inside Docker, we need to make sure all files are world-readable
|
||||||
|
# (since they will be owned by root on the host).
|
||||||
|
trap "chmod -R o+r '${ARTIFACTS}'" EXIT SIGINT SIGTERM
|
||||||
|
export E2E_REPORT_DIR=${ARTIFACTS}
|
||||||
|
|
||||||
|
e2e_go_args=( \
|
||||||
|
-v \
|
||||||
|
--dump="${ARTIFACTS}" \
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
case "${KUBERNETES_PROVIDER}" in
|
||||||
|
gce|gke)
|
||||||
|
e2e_go_args+=(--check_leaked_resources)
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
if [[ "${E2E_UP,,}" == "true" ]]; then
|
||||||
|
e2e_go_args+=(--up --ctl="version --match-server-version=false")
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ "${E2E_DOWN,,}" == "true" ]]; then
|
||||||
|
e2e_go_args+=(--down)
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "${E2E_TEST,,}" == "true" ]]; then
|
if [[ "${E2E_TEST,,}" == "true" ]]; then
|
||||||
e2e_test "${GINKGO_TEST_ARGS:-}" || EXIT_CODE=1
|
e2e_go_args+=(--test --test_args="${GINKGO_TEST_ARGS}")
|
||||||
fi
|
fi
|
||||||
|
|
||||||
### Start Kubemark ###
|
# Optionally run tests from the version in kubernetes_skew
|
||||||
if [[ "${USE_KUBEMARK:-}" == "true" ]]; then
|
if [[ "${JENKINS_USE_SKEW_TESTS:-}" == "true" ]]; then
|
||||||
export RUN_FROM_DISTRO=true
|
e2e_go_args+=(--skew)
|
||||||
NUM_NODES_BKP=${NUM_NODES}
|
|
||||||
MASTER_SIZE_BKP=${MASTER_SIZE}
|
|
||||||
./test/kubemark/stop-kubemark.sh
|
|
||||||
NUM_NODES=${KUBEMARK_NUM_NODES:-$NUM_NODES}
|
|
||||||
MASTER_SIZE=${KUBEMARK_MASTER_SIZE:-$MASTER_SIZE}
|
|
||||||
./test/kubemark/start-kubemark.sh || dump_cluster_logs_and_exit
|
|
||||||
# Similarly, if tests fail, we trigger empty set of tests that would trigger storing logs from the base cluster.
|
|
||||||
# We intentionally overwrite the exit-code from `run-e2e-tests.sh` because we want jenkins to look at the
|
|
||||||
# junit.xml results for test failures and not process the exit code. This is needed by jenkins to more gracefully
|
|
||||||
# handle blocking the merge queue as a result of test failure flakes. Infrastructure failures should continue to
|
|
||||||
# exit non-0.
|
|
||||||
# TODO: The above comment is no longer accurate. Need to fix this before
|
|
||||||
# turning xunit off for the postsubmit tests. See: #28200
|
|
||||||
./test/kubemark/run-e2e-tests.sh --ginkgo.focus="${KUBEMARK_TESTS:-starting\s30\spods}" "${KUBEMARK_TEST_ARGS:-}" || dump_cluster_logs_and_exit
|
|
||||||
./test/kubemark/stop-kubemark.sh
|
|
||||||
NUM_NODES=${NUM_NODES_BKP}
|
|
||||||
MASTER_SIZE=${MASTER_SIZE_BKP}
|
|
||||||
unset RUN_FROM_DISTRO
|
|
||||||
unset NUM_NODES_BKP
|
|
||||||
unset MASTER_SIZE_BKP
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
### Clean up ###
|
# Optionally run upgrade tests before other tests.
|
||||||
if [[ "${E2E_DOWN,,}" == "true" ]]; then
|
if [[ "${E2E_UPGRADE_TEST:-}" == "true" ]]; then
|
||||||
# Sleep before deleting the cluster to give the controller manager time to
|
e2e_go_args+=(--upgrade_args="${GINKGO_UPGRADE_TEST_ARGS}")
|
||||||
# delete any cloudprovider resources still around from the last test.
|
|
||||||
# This is calibrated to allow enough time for 3 attempts to delete the
|
|
||||||
# resources. Each attempt is allocated 5 seconds for requests to the
|
|
||||||
# cloudprovider plus the processingRetryInterval from servicecontroller.go
|
|
||||||
# for the wait between attempts.
|
|
||||||
sleep 30
|
|
||||||
go run ./hack/e2e.go ${E2E_OPT:-} -v --down
|
|
||||||
fi
|
|
||||||
if [[ "${gcp_list_resources}" == "true" ]]; then
|
|
||||||
${gcp_list_resources_script} > "${gcp_resources_after}"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Compare resources if either the cluster was
|
go run ./hack/e2e.go \
|
||||||
# * started and destroyed (normal e2e)
|
${E2E_OPT:-} \
|
||||||
# * neither started nor destroyed (soak test)
|
"${e2e_go_args[@]}"
|
||||||
if [[ "${E2E_UP:-}" == "${E2E_DOWN:-}" && -f "${gcp_resources_before}" && -f "${gcp_resources_after}" ]]; then
|
|
||||||
difference=$(diff -sw -U0 -F'^\[.*\]$' "${gcp_resources_before}" "${gcp_resources_after}") || true
|
|
||||||
noleak=true
|
|
||||||
if [[ -n $(echo "${difference}" | tail -n +3 | grep -E "^\+") ]] && [[ "${FAIL_ON_GCP_RESOURCE_LEAK:-}" == "true" ]]; then
|
|
||||||
noleak=false
|
|
||||||
fi
|
|
||||||
if ! ${noleak} ; then
|
|
||||||
echo "${difference}"
|
|
||||||
echo "!!! FAIL: Google Cloud Platform resources leaked while running tests!"
|
|
||||||
EXIT_CODE=1
|
|
||||||
fi
|
|
||||||
record_command "${STAGE_CLEANUP}" "gcp_resource_leak_check" ${noleak}
|
|
||||||
fi
|
|
||||||
|
|
||||||
exit ${EXIT_CODE}
|
if [[ "${E2E_PUBLISH_GREEN_VERSION:-}" == "true" ]]; then
|
||||||
|
# Use plaintext version file packaged with kubernetes.tar.gz
|
||||||
|
echo "Publish version to ci/latest-green.txt: $(cat version)"
|
||||||
|
gsutil cp ./version "gs://${KUBE_GCS_DEV_RELEASE_BUCKET}/ci/latest-green.txt"
|
||||||
|
fi
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
check_version_skew
|
check_leaked_resources
|
||||||
check_node_count
|
check_node_count
|
||||||
|
check_version_skew
|
||||||
concurrent_rc_syncs
|
concurrent_rc_syncs
|
||||||
file_content
|
file_content
|
||||||
file_mode
|
file_mode
|
||||||
@ -18,6 +19,7 @@ pods_per_node
|
|||||||
test_args
|
test_args
|
||||||
up_to
|
up_to
|
||||||
up_to
|
up_to
|
||||||
|
upgrade_args
|
||||||
valid_flag
|
valid_flag
|
||||||
retry_time
|
retry_time
|
||||||
file_content_in_loop
|
file_content_in_loop
|
||||||
|
@ -42,17 +42,23 @@ EOF
|
|||||||
sed -i'' -e "s/\"//g" "${RESOURCE_DIRECTORY}/controllers_flags"
|
sed -i'' -e "s/\"//g" "${RESOURCE_DIRECTORY}/controllers_flags"
|
||||||
}
|
}
|
||||||
|
|
||||||
RUN_FROM_DISTRO=${RUN_FROM_DISTRO:-false}
|
|
||||||
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
|
MAKE_DIR="${KUBE_ROOT}/cluster/images/kubemark"
|
||||||
|
|
||||||
if [ "${RUN_FROM_DISTRO}" == "false" ]; then
|
echo "Copying kubemark to ${MAKE_DIR}"
|
||||||
# Running from repository
|
if [[ -f "${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" ]]; then
|
||||||
|
# Running from distro
|
||||||
|
SERVER_TARBALL="${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz"
|
||||||
cp "${KUBE_ROOT}/_output/release-stage/server/linux-amd64/kubernetes/server/bin/kubemark" "${MAKE_DIR}"
|
cp "${KUBE_ROOT}/_output/release-stage/server/linux-amd64/kubernetes/server/bin/kubemark" "${MAKE_DIR}"
|
||||||
|
elif [[ -f "${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz" ]]; then
|
||||||
|
# Running from an extracted release tarball (kubernetes.tar.gz)
|
||||||
|
SERVER_TARBALL="${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz"
|
||||||
|
tar \
|
||||||
|
--strip-components=3 \
|
||||||
|
-xzf kubernetes-server-linux-amd64.tar.gz \
|
||||||
|
-C "${MAKE_DIR}" 'kubernetes/server/bin/kubemark' || exit 1
|
||||||
else
|
else
|
||||||
cp "${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz" "."
|
echo 'Cannot find kubernetes/server/bin/kubemark binary'
|
||||||
tar -xzf kubernetes-server-linux-amd64.tar.gz
|
exit 1
|
||||||
cp "kubernetes/server/bin/kubemark" "${MAKE_DIR}"
|
|
||||||
rm -rf "kubernetes-server-linux-amd64.tar.gz" "kubernetes"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
CURR_DIR=`pwd`
|
CURR_DIR=`pwd`
|
||||||
@ -167,25 +173,14 @@ gcloud compute ssh --zone="${ZONE}" --project="${PROJECT}" "${MASTER_NAME}" \
|
|||||||
|
|
||||||
writeEnvironmentFiles
|
writeEnvironmentFiles
|
||||||
|
|
||||||
if [ "${RUN_FROM_DISTRO}" == "false" ]; then
|
|
||||||
gcloud compute copy-files --zone="${ZONE}" --project="${PROJECT}" \
|
gcloud compute copy-files --zone="${ZONE}" --project="${PROJECT}" \
|
||||||
"${KUBE_ROOT}/_output/release-tars/kubernetes-server-linux-amd64.tar.gz" \
|
"${SERVER_TARBALL}" \
|
||||||
"${KUBEMARK_DIRECTORY}/start-kubemark-master.sh" \
|
"${KUBEMARK_DIRECTORY}/start-kubemark-master.sh" \
|
||||||
"${KUBEMARK_DIRECTORY}/configure-kubectl.sh" \
|
"${KUBEMARK_DIRECTORY}/configure-kubectl.sh" \
|
||||||
"${RESOURCE_DIRECTORY}/apiserver_flags" \
|
"${RESOURCE_DIRECTORY}/apiserver_flags" \
|
||||||
"${RESOURCE_DIRECTORY}/scheduler_flags" \
|
"${RESOURCE_DIRECTORY}/scheduler_flags" \
|
||||||
"${RESOURCE_DIRECTORY}/controllers_flags" \
|
"${RESOURCE_DIRECTORY}/controllers_flags" \
|
||||||
"${MASTER_NAME}":~
|
"${MASTER_NAME}":~
|
||||||
else
|
|
||||||
gcloud compute copy-files --zone="${ZONE}" --project="${PROJECT}" \
|
|
||||||
"${KUBE_ROOT}/server/kubernetes-server-linux-amd64.tar.gz" \
|
|
||||||
"${KUBEMARK_DIRECTORY}/start-kubemark-master.sh" \
|
|
||||||
"${KUBEMARK_DIRECTORY}/configure-kubectl.sh" \
|
|
||||||
"${RESOURCE_DIRECTORY}/apiserver_flags" \
|
|
||||||
"${RESOURCE_DIRECTORY}/scheduler_flags" \
|
|
||||||
"${RESOURCE_DIRECTORY}/controllers_flags" \
|
|
||||||
"${MASTER_NAME}":~
|
|
||||||
fi
|
|
||||||
|
|
||||||
gcloud compute ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" \
|
gcloud compute ssh "${MASTER_NAME}" --zone="${ZONE}" --project="${PROJECT}" \
|
||||||
--command="chmod a+x configure-kubectl.sh && chmod a+x start-kubemark-master.sh && sudo ./start-kubemark-master.sh ${EVENT_STORE_IP:-127.0.0.1} ${NUM_NODES:-0}"
|
--command="chmod a+x configure-kubectl.sh && chmod a+x start-kubemark-master.sh && sudo ./start-kubemark-master.sh ${EVENT_STORE_IP:-127.0.0.1} ${NUM_NODES:-0}"
|
||||||
|
Loading…
Reference in New Issue
Block a user