mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Add run-services-mode option, and start e2e services in a separate
process.
This commit is contained in:
parent
89651077b1
commit
3910a66bb5
@ -289,6 +289,7 @@ log-flush-frequency
|
|||||||
long-running-request-regexp
|
long-running-request-regexp
|
||||||
low-diskspace-threshold-mb
|
low-diskspace-threshold-mb
|
||||||
make-symlinks
|
make-symlinks
|
||||||
|
manifest-path
|
||||||
manifest-url
|
manifest-url
|
||||||
manifest-url-header
|
manifest-url-header
|
||||||
masquerade-all
|
masquerade-all
|
||||||
@ -421,6 +422,7 @@ rkt-stage1-image
|
|||||||
root-ca-file
|
root-ca-file
|
||||||
root-dir
|
root-dir
|
||||||
run-proxy
|
run-proxy
|
||||||
|
run-services-mode
|
||||||
runtime-cgroups
|
runtime-cgroups
|
||||||
runtime-config
|
runtime-config
|
||||||
runtime-request-timeout
|
runtime-request-timeout
|
||||||
|
@ -67,14 +67,24 @@ type TestContextType struct {
|
|||||||
CreateTestingNS CreateTestingNSFn
|
CreateTestingNS CreateTestingNSFn
|
||||||
// If set to true test will dump data about the namespace in which test was running.
|
// If set to true test will dump data about the namespace in which test was running.
|
||||||
DumpLogsOnFailure bool
|
DumpLogsOnFailure bool
|
||||||
|
// If the garbage collector is enabled in the kube-apiserver and kube-controller-manager.
|
||||||
|
GarbageCollectorEnabled bool
|
||||||
|
// Node e2e specific test context
|
||||||
|
NodeTestContextType
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeTestContextType is part of TestContextType, it is shared by all node e2e test.
|
||||||
|
type NodeTestContextType struct {
|
||||||
// Name of the node to run tests on (node e2e suite only).
|
// Name of the node to run tests on (node e2e suite only).
|
||||||
NodeName string
|
NodeName string
|
||||||
|
// DisableKubenet disables kubenet when starting kubelet.
|
||||||
|
DisableKubenet bool
|
||||||
// Whether to enable the QoS Cgroup Hierarchy or not
|
// Whether to enable the QoS Cgroup Hierarchy or not
|
||||||
CgroupsPerQOS bool
|
CgroupsPerQOS bool
|
||||||
// The hard eviction thresholds
|
// The hard eviction thresholds
|
||||||
EvictionHard string
|
EvictionHard string
|
||||||
// If the garbage collector is enabled in the kube-apiserver and kube-controller-manager.
|
// ManifestPath is the static pod manifest path.
|
||||||
GarbageCollectorEnabled bool
|
ManifestPath string
|
||||||
}
|
}
|
||||||
|
|
||||||
type CloudConfig struct {
|
type CloudConfig struct {
|
||||||
@ -112,7 +122,6 @@ func RegisterCommonFlags() {
|
|||||||
flag.StringVar(&TestContext.Host, "host", "http://127.0.0.1:8080", "The host, or apiserver, to connect to")
|
flag.StringVar(&TestContext.Host, "host", "http://127.0.0.1:8080", "The host, or apiserver, to connect to")
|
||||||
flag.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
|
flag.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
|
||||||
flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
|
flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
|
||||||
flag.BoolVar(&TestContext.GarbageCollectorEnabled, "garbage-collector-enabled", false, "Set to true if the garbage collector is enabled in the kube-apiserver and kube-controller-manager, then some tests will rely on the garbage collector to delete dependent resources.")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register flags specific to the cluster e2e test suite.
|
// Register flags specific to the cluster e2e test suite.
|
||||||
@ -149,11 +158,16 @@ func RegisterClusterFlags() {
|
|||||||
flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
|
flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
|
||||||
flag.StringVar(&TestContext.PrometheusPushGateway, "prom-push-gateway", "", "The URL to prometheus gateway, so that metrics can be pushed during e2es and scraped by prometheus. Typically something like 127.0.0.1:9091.")
|
flag.StringVar(&TestContext.PrometheusPushGateway, "prom-push-gateway", "", "The URL to prometheus gateway, so that metrics can be pushed during e2es and scraped by prometheus. Typically something like 127.0.0.1:9091.")
|
||||||
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
|
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
|
||||||
|
flag.BoolVar(&TestContext.GarbageCollectorEnabled, "garbage-collector-enabled", false, "Set to true if the garbage collector is enabled in the kube-apiserver and kube-controller-manager, then some tests will rely on the garbage collector to delete dependent resources.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Register flags specific to the node e2e test suite.
|
// Register flags specific to the node e2e test suite.
|
||||||
func RegisterNodeFlags() {
|
func RegisterNodeFlags() {
|
||||||
flag.StringVar(&TestContext.NodeName, "node-name", "", "Name of the node to run tests on (node e2e suite only).")
|
flag.StringVar(&TestContext.NodeName, "node-name", "", "Name of the node to run tests on (node e2e suite only).")
|
||||||
|
// TODO(random-liu): Remove kubelet related flags when we move the kubelet start logic out of the test.
|
||||||
|
// TODO(random-liu): Find someway to get kubelet configuration, and automatic config and filter test based on the configuration.
|
||||||
|
flag.BoolVar(&TestContext.DisableKubenet, "disable-kubenet", false, "If true, start kubelet without kubenet. (default false)")
|
||||||
flag.BoolVar(&TestContext.CgroupsPerQOS, "cgroups-per-qos", false, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
flag.BoolVar(&TestContext.CgroupsPerQOS, "cgroups-per-qos", false, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
|
||||||
flag.StringVar(&TestContext.EvictionHard, "eviction-hard", "memory.available<250Mi", "The hard eviction thresholds. If set, pods get evicted when the specified resources drop below the thresholds.")
|
flag.StringVar(&TestContext.EvictionHard, "eviction-hard", "memory.available<250Mi", "The hard eviction thresholds. If set, pods get evicted when the specified resources drop below the thresholds.")
|
||||||
|
flag.StringVar(&TestContext.ManifestPath, "manifest-path", "", "The path to the static pod manifest file.")
|
||||||
}
|
}
|
||||||
|
@ -45,41 +45,30 @@ import (
|
|||||||
"github.com/onsi/ginkgo/config"
|
"github.com/onsi/ginkgo/config"
|
||||||
more_reporters "github.com/onsi/ginkgo/reporters"
|
more_reporters "github.com/onsi/ginkgo/reporters"
|
||||||
. "github.com/onsi/gomega"
|
. "github.com/onsi/gomega"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
)
|
)
|
||||||
|
|
||||||
var e2es *e2eService
|
var e2es *E2EServices
|
||||||
|
|
||||||
// context is the test context shared by all parallel nodes.
|
|
||||||
// Originally we setup the test environment and initialize global variables
|
|
||||||
// in BeforeSuite, and then used the global variables in the test.
|
|
||||||
// However, after we make the test parallel, ginkgo will run all tests
|
|
||||||
// in several parallel test nodes. And for each test node, the BeforeSuite
|
|
||||||
// and AfterSuite will be run.
|
|
||||||
// We don't want to start services (kubelet, apiserver and etcd) for all
|
|
||||||
// parallel nodes, but we do want to set some globally shared variable which
|
|
||||||
// could be used in test.
|
|
||||||
// We have to use SynchronizedBeforeSuite to achieve that. The first
|
|
||||||
// function of SynchronizedBeforeSuite is only called once, and the second
|
|
||||||
// function is called in each parallel test node. The result returned by
|
|
||||||
// the first function will be the parameter of the second function.
|
|
||||||
// So we'll start all services and initialize the shared context in the first
|
|
||||||
// function, and propagate the context to all parallel test nodes in the
|
|
||||||
// second function.
|
|
||||||
// Notice no lock is needed for shared context, because context should only be
|
|
||||||
// initialized in the first function in SynchronizedBeforeSuite. After that
|
|
||||||
// it should never be modified.
|
|
||||||
var context SharedContext
|
|
||||||
|
|
||||||
var prePullImages = flag.Bool("prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.")
|
var prePullImages = flag.Bool("prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.")
|
||||||
|
var runServicesMode = flag.Bool("run-services-mode", false, "If true, only run services (etcd, apiserver) in current process, and not run test.")
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
framework.RegisterCommonFlags()
|
framework.RegisterCommonFlags()
|
||||||
framework.RegisterNodeFlags()
|
framework.RegisterNodeFlags()
|
||||||
|
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
|
||||||
|
// Mark the run-services-mode flag as hidden to prevent user from using it.
|
||||||
|
pflag.CommandLine.MarkHidden("run-services-mode")
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestE2eNode(t *testing.T) {
|
func TestE2eNode(t *testing.T) {
|
||||||
flag.Parse()
|
pflag.Parse()
|
||||||
|
if *runServicesMode {
|
||||||
|
// If run-services-mode is specified, only run services in current process.
|
||||||
|
RunE2EServices()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// If run-services-mode is not specified, run test.
|
||||||
rand.Seed(time.Now().UTC().UnixNano())
|
rand.Seed(time.Now().UTC().UnixNano())
|
||||||
RegisterFailHandler(Fail)
|
RegisterFailHandler(Fail)
|
||||||
reporters := []Reporter{}
|
reporters := []Reporter{}
|
||||||
@ -103,6 +92,8 @@ var _ = SynchronizedBeforeSuite(func() []byte {
|
|||||||
if *buildServices {
|
if *buildServices {
|
||||||
buildGo()
|
buildGo()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize node name here, so that the following code can get right node name.
|
||||||
if framework.TestContext.NodeName == "" {
|
if framework.TestContext.NodeName == "" {
|
||||||
hostname, err := os.Hostname()
|
hostname, err := os.Hostname()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -110,7 +101,6 @@ var _ = SynchronizedBeforeSuite(func() []byte {
|
|||||||
}
|
}
|
||||||
framework.TestContext.NodeName = hostname
|
framework.TestContext.NodeName = hostname
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pre-pull the images tests depend on so we can fail immediately if there is an image pull issue
|
// Pre-pull the images tests depend on so we can fail immediately if there is an image pull issue
|
||||||
// This helps with debugging test flakes since it is hard to tell when a test failure is due to image pulling.
|
// This helps with debugging test flakes since it is hard to tell when a test failure is due to image pulling.
|
||||||
if *prePullImages {
|
if *prePullImages {
|
||||||
@ -124,11 +114,10 @@ var _ = SynchronizedBeforeSuite(func() []byte {
|
|||||||
// We should mask locksmithd when provisioning the machine.
|
// We should mask locksmithd when provisioning the machine.
|
||||||
maskLocksmithdOnCoreos()
|
maskLocksmithdOnCoreos()
|
||||||
|
|
||||||
shared := &SharedContext{}
|
|
||||||
if *startServices {
|
if *startServices {
|
||||||
e2es = newE2eService(framework.TestContext.NodeName, framework.TestContext.CgroupsPerQOS, framework.TestContext.EvictionHard, shared)
|
e2es = NewE2EServices()
|
||||||
if err := e2es.start(); err != nil {
|
if err := e2es.Start(); err != nil {
|
||||||
Fail(fmt.Sprintf("Unable to start node services.\n%v", err))
|
glog.Fatalf("Unable to start node services: %v", err)
|
||||||
}
|
}
|
||||||
glog.Infof("Node services started. Running tests...")
|
glog.Infof("Node services started. Running tests...")
|
||||||
} else {
|
} else {
|
||||||
@ -136,33 +125,31 @@ var _ = SynchronizedBeforeSuite(func() []byte {
|
|||||||
}
|
}
|
||||||
|
|
||||||
glog.Infof("Starting namespace controller")
|
glog.Infof("Starting namespace controller")
|
||||||
|
// TODO(random-liu): Move namespace controller into namespace services.
|
||||||
startNamespaceController()
|
startNamespaceController()
|
||||||
|
|
||||||
// Reference common test to make the import valid.
|
// Reference common test to make the import valid.
|
||||||
commontest.CurrentSuite = commontest.NodeE2E
|
commontest.CurrentSuite = commontest.NodeE2E
|
||||||
|
|
||||||
// Share the node name with the other test nodes.
|
data, err := json.Marshal(&framework.TestContext.NodeTestContextType)
|
||||||
shared.NodeName = framework.TestContext.NodeName
|
if err != nil {
|
||||||
data, err := json.Marshal(shared)
|
glog.Fatalf("Failed to serialize node test context: %v", err)
|
||||||
Expect(err).NotTo(HaveOccurred())
|
}
|
||||||
|
|
||||||
return data
|
return data
|
||||||
}, func(data []byte) {
|
}, func(data []byte) {
|
||||||
// Set the shared context got from the synchronized initialize function
|
// The node test context is updated in the first function, update it on every test node.
|
||||||
shared := &SharedContext{}
|
err := json.Unmarshal(data, &framework.TestContext.NodeTestContextType)
|
||||||
Expect(json.Unmarshal(data, shared)).To(Succeed())
|
if err != nil {
|
||||||
context = *shared
|
glog.Fatalf("Failed to deserialize node test context: %v", err)
|
||||||
|
}
|
||||||
framework.TestContext.NodeName = shared.NodeName
|
|
||||||
})
|
})
|
||||||
|
|
||||||
// Tear down the kubelet on the node
|
// Tear down the kubelet on the node
|
||||||
var _ = SynchronizedAfterSuite(func() {}, func() {
|
var _ = SynchronizedAfterSuite(func() {}, func() {
|
||||||
if e2es != nil {
|
if e2es != nil {
|
||||||
e2es.getLogFiles()
|
|
||||||
if *startServices && *stopServices {
|
if *startServices && *stopServices {
|
||||||
glog.Infof("Stopping node services...")
|
glog.Infof("Stopping node services...")
|
||||||
e2es.stop()
|
e2es.Stop()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"os/signal"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"reflect"
|
"reflect"
|
||||||
@ -33,22 +34,122 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
"github.com/kardianos/osext"
|
||||||
|
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// TODO(random-liu): Move this file to a separate package.
|
||||||
var serverStartTimeout = flag.Duration("server-start-timeout", time.Second*120, "Time to wait for each server to become healthy.")
|
var serverStartTimeout = flag.Duration("server-start-timeout", time.Second*120, "Time to wait for each server to become healthy.")
|
||||||
|
|
||||||
type e2eService struct {
|
// E2EServices starts and stops e2e services in a separate process. The test uses it to start and
|
||||||
killCmds []*killCmd
|
// stop all e2e services.
|
||||||
rmDirs []string
|
type E2EServices struct {
|
||||||
|
services *server
|
||||||
|
}
|
||||||
|
|
||||||
context *SharedContext
|
func NewE2EServices() *E2EServices {
|
||||||
etcdDataDir string
|
return &E2EServices{}
|
||||||
nodeName string
|
}
|
||||||
logFiles map[string]logFileData
|
|
||||||
cgroupsPerQOS bool
|
// services.log is the combined log of all services
|
||||||
evictionHard string
|
const servicesLogFile = "services.log"
|
||||||
|
|
||||||
|
// Start starts the e2e services in another process, it returns when all e2e
|
||||||
|
// services are ready.
|
||||||
|
// We want to statically link e2e services into the test binary, but we don't
|
||||||
|
// want their glog to pollute the test result. So we run the binary in run-
|
||||||
|
// services-mode to start e2e services in another process.
|
||||||
|
func (e *E2EServices) Start() error {
|
||||||
|
var err error
|
||||||
|
// Create the manifest path for kubelet.
|
||||||
|
// TODO(random-liu): Remove related logic when we move kubelet starting logic out of the test.
|
||||||
|
framework.TestContext.ManifestPath, err = ioutil.TempDir("", "node-e2e-pod")
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to create static pod manifest directory: %v", err)
|
||||||
|
}
|
||||||
|
testBin, err := osext.Executable()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("can't get current binary: %v", err)
|
||||||
|
}
|
||||||
|
// TODO(random-liu): Add sudo after we statically link apiserver and etcd, because apiserver needs
|
||||||
|
// sudo. We can't add sudo now, because etcd may not be in PATH of root.
|
||||||
|
startCmd := exec.Command(testBin,
|
||||||
|
"--run-services-mode",
|
||||||
|
"--server-start-timeout", serverStartTimeout.String(),
|
||||||
|
"--report-dir", framework.TestContext.ReportDir,
|
||||||
|
// TODO(random-liu): Remove the following flags after we move kubelet starting logic
|
||||||
|
// out of the test.
|
||||||
|
"--node-name", framework.TestContext.NodeName,
|
||||||
|
"--disable-kubenet="+strconv.FormatBool(framework.TestContext.DisableKubenet),
|
||||||
|
"--cgroups-per-qos="+strconv.FormatBool(framework.TestContext.CgroupsPerQOS),
|
||||||
|
"--manifest-path", framework.TestContext.ManifestPath,
|
||||||
|
"--eviction-hard", framework.TestContext.EvictionHard,
|
||||||
|
)
|
||||||
|
e.services = newServer("services", startCmd, nil, getHealthCheckURLs(), servicesLogFile)
|
||||||
|
return e.services.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the e2e services.
|
||||||
|
func (e *E2EServices) Stop() error {
|
||||||
|
defer func() {
|
||||||
|
// Cleanup the manifest path for kubelet.
|
||||||
|
manifestPath := framework.TestContext.ManifestPath
|
||||||
|
if manifestPath != "" {
|
||||||
|
err := os.RemoveAll(manifestPath)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("Failed to delete static pod manifest directory %s.\n%v", manifestPath, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if e.services == nil {
|
||||||
|
glog.Errorf("can't stop e2e services, because `services` is nil")
|
||||||
|
}
|
||||||
|
return e.services.kill()
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunE2EServices actually start the e2e services. This function is used to
|
||||||
|
// start e2e services in current process. This is only used in run-services-mode.
|
||||||
|
func RunE2EServices() {
|
||||||
|
e := newE2EService()
|
||||||
|
if err := e.run(); err != nil {
|
||||||
|
glog.Fatalf("Failed to run e2e services: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ports of different e2e services.
|
||||||
|
const (
|
||||||
|
etcdPort = "4001"
|
||||||
|
apiserverPort = "8080"
|
||||||
|
kubeletPort = "10250"
|
||||||
|
kubeletReadOnlyPort = "10255"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Health check urls of different e2e services.
|
||||||
|
var (
|
||||||
|
etcdHealthCheckURL = getEndpoint(etcdPort) + "/v2/keys/" // Trailing slash is required,
|
||||||
|
apiserverHealthCheckURL = getEndpoint(apiserverPort) + "/healthz"
|
||||||
|
kubeletHealthCheckURL = getEndpoint(kubeletReadOnlyPort) + "/healthz"
|
||||||
|
)
|
||||||
|
|
||||||
|
// getEndpoint generates endpoint url from service port.
|
||||||
|
func getEndpoint(port string) string {
|
||||||
|
return "http://127.0.0.1:" + port
|
||||||
|
}
|
||||||
|
|
||||||
|
func getHealthCheckURLs() []string {
|
||||||
|
return []string{
|
||||||
|
etcdHealthCheckURL,
|
||||||
|
apiserverHealthCheckURL,
|
||||||
|
kubeletHealthCheckURL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// e2eService is used internally in this file to start e2e services in current process.
|
||||||
|
type e2eService struct {
|
||||||
|
services []*server
|
||||||
|
rmDirs []string
|
||||||
|
logFiles map[string]logFileData
|
||||||
}
|
}
|
||||||
|
|
||||||
type logFileData struct {
|
type logFileData struct {
|
||||||
@ -63,7 +164,7 @@ const (
|
|||||||
defaultEtcdPath = "/tmp/etcd"
|
defaultEtcdPath = "/tmp/etcd"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newE2eService(nodeName string, cgroupsPerQOS bool, evictionHard string, context *SharedContext) *e2eService {
|
func newE2EService() *e2eService {
|
||||||
// Special log files that need to be collected for additional debugging.
|
// Special log files that need to be collected for additional debugging.
|
||||||
var logFiles = map[string]logFileData{
|
var logFiles = map[string]logFileData{
|
||||||
"kern.log": {[]string{"/var/log/kern.log"}, []string{"-k"}},
|
"kern.log": {[]string{"/var/log/kern.log"}, []string{"-k"}},
|
||||||
@ -71,13 +172,25 @@ func newE2eService(nodeName string, cgroupsPerQOS bool, evictionHard string, con
|
|||||||
"cloud-init.log": {[]string{"/var/log/cloud-init.log"}, []string{"-u", "cloud*"}},
|
"cloud-init.log": {[]string{"/var/log/cloud-init.log"}, []string{"-u", "cloud*"}},
|
||||||
}
|
}
|
||||||
|
|
||||||
return &e2eService{
|
return &e2eService{logFiles: logFiles}
|
||||||
context: context,
|
}
|
||||||
nodeName: nodeName,
|
|
||||||
logFiles: logFiles,
|
// terminationSignals are signals that cause the program to exit in the
|
||||||
cgroupsPerQOS: cgroupsPerQOS,
|
// supported platforms (linux, darwin, windows).
|
||||||
evictionHard: evictionHard,
|
var terminationSignals = []os.Signal{syscall.SIGHUP, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT}
|
||||||
|
|
||||||
|
// run starts all e2e services and wait for the termination signal. Once receives the
|
||||||
|
// termination signal, it will stop the e2e services gracefully.
|
||||||
|
func (es *e2eService) run() error {
|
||||||
|
defer es.stop()
|
||||||
|
if err := es.start(); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
// Wait until receiving a termination signal.
|
||||||
|
sig := make(chan os.Signal, 1)
|
||||||
|
signal.Notify(sig, terminationSignals...)
|
||||||
|
<-sig
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *e2eService) start() error {
|
func (es *e2eService) start() error {
|
||||||
@ -88,25 +201,23 @@ func (es *e2eService) start() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
cmd, err := es.startEtcd()
|
s, err := es.startEtcd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
es.killCmds = append(es.killCmds, cmd)
|
es.services = append(es.services, s)
|
||||||
es.rmDirs = append(es.rmDirs, es.etcdDataDir)
|
|
||||||
|
|
||||||
cmd, err = es.startApiServer()
|
s, err = es.startApiServer()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
es.killCmds = append(es.killCmds, cmd)
|
es.services = append(es.services, s)
|
||||||
|
|
||||||
cmd, err = es.startKubeletServer()
|
s, err = es.startKubeletServer()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
es.killCmds = append(es.killCmds, cmd)
|
es.services = append(es.services, s)
|
||||||
es.rmDirs = append(es.rmDirs, es.context.PodConfigPath)
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -167,9 +278,10 @@ func isJournaldAvailable() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (es *e2eService) stop() {
|
func (es *e2eService) stop() {
|
||||||
for _, k := range es.killCmds {
|
es.getLogFiles()
|
||||||
if err := k.Kill(); err != nil {
|
for _, s := range es.services {
|
||||||
glog.Errorf("Failed to stop %v: %v", k.name, err)
|
if err := s.kill(); err != nil {
|
||||||
|
glog.Errorf("Failed to stop %v: %v", s.name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for _, d := range es.rmDirs {
|
for _, d := range es.rmDirs {
|
||||||
@ -180,12 +292,13 @@ func (es *e2eService) stop() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *e2eService) startEtcd() (*killCmd, error) {
|
func (es *e2eService) startEtcd() (*server, error) {
|
||||||
dataDir, err := ioutil.TempDir("", "node-e2e")
|
dataDir, err := ioutil.TempDir("", "node-e2e")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
es.etcdDataDir = dataDir
|
// Mark the dataDir as directories to remove.
|
||||||
|
es.rmDirs = append(es.rmDirs, dataDir)
|
||||||
var etcdPath string
|
var etcdPath string
|
||||||
// CoreOS ships a binary named 'etcd' which is really old, so prefer 'etcd2' if it exists
|
// CoreOS ships a binary named 'etcd' which is really old, so prefer 'etcd2' if it exists
|
||||||
etcdPath, err = exec.LookPath("etcd2")
|
etcdPath, err = exec.LookPath("etcd2")
|
||||||
@ -205,37 +318,36 @@ func (es *e2eService) startEtcd() (*killCmd, error) {
|
|||||||
"--advertise-client-urls=http://0.0.0.0:2379,http://0.0.0.0:4001")
|
"--advertise-client-urls=http://0.0.0.0:2379,http://0.0.0.0:4001")
|
||||||
// Execute etcd in the data directory instead of using --data-dir because the flag sometimes requires additional
|
// Execute etcd in the data directory instead of using --data-dir because the flag sometimes requires additional
|
||||||
// configuration (e.g. --name in version 0.4.9)
|
// configuration (e.g. --name in version 0.4.9)
|
||||||
cmd.Dir = es.etcdDataDir
|
cmd.Dir = dataDir
|
||||||
hcc := newHealthCheckCommand(
|
server := newServer(
|
||||||
"http://127.0.0.1:4001/v2/keys/", // Trailing slash is required,
|
"etcd",
|
||||||
cmd,
|
cmd,
|
||||||
|
nil,
|
||||||
|
[]string{etcdHealthCheckURL},
|
||||||
"etcd.log")
|
"etcd.log")
|
||||||
return &killCmd{name: "etcd", cmd: cmd}, es.startServer(hcc)
|
return server, server.start()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *e2eService) startApiServer() (*killCmd, error) {
|
func (es *e2eService) startApiServer() (*server, error) {
|
||||||
cmd := exec.Command("sudo", getApiServerBin(),
|
cmd := exec.Command("sudo", getApiServerBin(),
|
||||||
"--etcd-servers", "http://127.0.0.1:4001",
|
"--etcd-servers", getEndpoint(etcdPort),
|
||||||
"--insecure-bind-address", "0.0.0.0",
|
"--insecure-bind-address", "0.0.0.0",
|
||||||
"--service-cluster-ip-range", "10.0.0.1/24",
|
"--service-cluster-ip-range", "10.0.0.1/24",
|
||||||
"--kubelet-port", "10250",
|
"--kubelet-port", kubeletPort,
|
||||||
"--allow-privileged", "true",
|
"--allow-privileged", "true",
|
||||||
"--v", LOG_VERBOSITY_LEVEL, "--logtostderr",
|
"--v", LOG_VERBOSITY_LEVEL, "--logtostderr",
|
||||||
)
|
)
|
||||||
hcc := newHealthCheckCommand(
|
server := newServer(
|
||||||
"http://127.0.0.1:8080/healthz",
|
"apiserver",
|
||||||
cmd,
|
cmd,
|
||||||
|
nil,
|
||||||
|
[]string{apiserverHealthCheckURL},
|
||||||
"kube-apiserver.log")
|
"kube-apiserver.log")
|
||||||
return &killCmd{name: "kube-apiserver", cmd: cmd}, es.startServer(hcc)
|
return server, server.start()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *e2eService) startKubeletServer() (*killCmd, error) {
|
func (es *e2eService) startKubeletServer() (*server, error) {
|
||||||
dataDir, err := ioutil.TempDir("", "node-e2e-pod")
|
var killCommand *exec.Cmd
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
es.context.PodConfigPath = dataDir
|
|
||||||
var killOverride *exec.Cmd
|
|
||||||
cmdArgs := []string{}
|
cmdArgs := []string{}
|
||||||
if systemdRun, err := exec.LookPath("systemd-run"); err == nil {
|
if systemdRun, err := exec.LookPath("systemd-run"); err == nil {
|
||||||
// On systemd services, detection of a service / unit works reliably while
|
// On systemd services, detection of a service / unit works reliably while
|
||||||
@ -244,7 +356,7 @@ func (es *e2eService) startKubeletServer() (*killCmd, error) {
|
|||||||
// sense to test it that way
|
// sense to test it that way
|
||||||
unitName := fmt.Sprintf("kubelet-%d.service", rand.Int31())
|
unitName := fmt.Sprintf("kubelet-%d.service", rand.Int31())
|
||||||
cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, getKubeletServerBin())
|
cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, getKubeletServerBin())
|
||||||
killOverride = exec.Command("sudo", "systemctl", "kill", unitName)
|
killCommand = exec.Command("sudo", "systemctl", "kill", unitName)
|
||||||
es.logFiles["kubelet.log"] = logFileData{
|
es.logFiles["kubelet.log"] = logFileData{
|
||||||
journalctlCommand: []string{"-u", unitName},
|
journalctlCommand: []string{"-u", unitName},
|
||||||
}
|
}
|
||||||
@ -258,26 +370,27 @@ func (es *e2eService) startKubeletServer() (*killCmd, error) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
cmdArgs = append(cmdArgs,
|
cmdArgs = append(cmdArgs,
|
||||||
"--api-servers", "http://127.0.0.1:8080",
|
"--api-servers", getEndpoint(apiserverPort),
|
||||||
"--address", "0.0.0.0",
|
"--address", "0.0.0.0",
|
||||||
"--port", "10250",
|
"--port", kubeletPort,
|
||||||
"--hostname-override", es.nodeName, // Required because hostname is inconsistent across hosts
|
"--read-only-port", kubeletReadOnlyPort,
|
||||||
|
"--hostname-override", framework.TestContext.NodeName, // Required because hostname is inconsistent across hosts
|
||||||
"--volume-stats-agg-period", "10s", // Aggregate volumes frequently so tests don't need to wait as long
|
"--volume-stats-agg-period", "10s", // Aggregate volumes frequently so tests don't need to wait as long
|
||||||
"--allow-privileged", "true",
|
"--allow-privileged", "true",
|
||||||
"--serialize-image-pulls", "false",
|
"--serialize-image-pulls", "false",
|
||||||
"--config", es.context.PodConfigPath,
|
"--config", framework.TestContext.ManifestPath,
|
||||||
"--file-check-frequency", "10s", // Check file frequently so tests won't wait too long
|
"--file-check-frequency", "10s", // Check file frequently so tests won't wait too long
|
||||||
"--v", LOG_VERBOSITY_LEVEL, "--logtostderr",
|
"--v", LOG_VERBOSITY_LEVEL, "--logtostderr",
|
||||||
"--pod-cidr=10.180.0.0/24", // Assign a fixed CIDR to the node because there is no node controller.
|
"--pod-cidr=10.180.0.0/24", // Assign a fixed CIDR to the node because there is no node controller.
|
||||||
"--eviction-hard", es.evictionHard,
|
"--eviction-hard", framework.TestContext.EvictionHard,
|
||||||
"--eviction-pressure-transition-period", "30s",
|
"--eviction-pressure-transition-period", "30s",
|
||||||
)
|
)
|
||||||
if es.cgroupsPerQOS {
|
if framework.TestContext.CgroupsPerQOS {
|
||||||
cmdArgs = append(cmdArgs,
|
cmdArgs = append(cmdArgs,
|
||||||
"--cgroups-per-qos", "true",
|
"--cgroups-per-qos", "true",
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
if !*disableKubenet {
|
if !framework.TestContext.DisableKubenet {
|
||||||
cwd, err := os.Getwd()
|
cwd, err := os.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -288,31 +401,99 @@ func (es *e2eService) startKubeletServer() (*killCmd, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
cmd := exec.Command("sudo", cmdArgs...)
|
cmd := exec.Command("sudo", cmdArgs...)
|
||||||
hcc := newHealthCheckCommand(
|
server := newServer(
|
||||||
"http://127.0.0.1:10255/healthz",
|
"kubelet",
|
||||||
cmd,
|
cmd,
|
||||||
|
killCommand,
|
||||||
|
[]string{kubeletHealthCheckURL},
|
||||||
"kubelet.log")
|
"kubelet.log")
|
||||||
return &killCmd{name: "kubelet", cmd: cmd, override: killOverride}, es.startServer(hcc)
|
return server, server.start()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es *e2eService) startServer(cmd *healthCheckCommand) error {
|
// server manages a server started and killed with commands.
|
||||||
cmdErrorChan := make(chan error)
|
type server struct {
|
||||||
|
// name is the name of the server, it is only used for logging.
|
||||||
|
name string
|
||||||
|
// startCommand is the command used to start the server
|
||||||
|
startCommand *exec.Cmd
|
||||||
|
// killCommand is the command used to stop the server. It is not required. If it
|
||||||
|
// is not specified, `sudo kill` will be used to stop the server.
|
||||||
|
killCommand *exec.Cmd
|
||||||
|
// healthCheckUrls is the urls used to check whether the server is ready.
|
||||||
|
healthCheckUrls []string
|
||||||
|
// outFilename is the name of the log file. The stdout and stderr of the server
|
||||||
|
// will be redirected to this file.
|
||||||
|
outFilename string
|
||||||
|
}
|
||||||
|
|
||||||
|
func newServer(name string, start, kill *exec.Cmd, urls []string, filename string) *server {
|
||||||
|
return &server{
|
||||||
|
name: name,
|
||||||
|
startCommand: start,
|
||||||
|
killCommand: kill,
|
||||||
|
healthCheckUrls: urls,
|
||||||
|
outFilename: filename,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// commandToString format command to string.
|
||||||
|
func commandToString(c *exec.Cmd) string {
|
||||||
|
if c == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return strings.Join(append([]string{c.Path}, c.Args[1:]...), " ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *server) String() string {
|
||||||
|
return fmt.Sprintf("server %q start-command: `%s`, kill-command: `%s`, health-check: %v, output-file: %q", s.name,
|
||||||
|
commandToString(s.startCommand), commandToString(s.killCommand), s.healthCheckUrls, s.outFilename)
|
||||||
|
}
|
||||||
|
|
||||||
|
// readinessCheck checks whether services are ready via the health check urls. Once there is
|
||||||
|
// an error in errCh, the function will stop waiting and return the error.
|
||||||
|
// TODO(random-liu): Move this to util
|
||||||
|
func readinessCheck(urls []string, errCh <-chan error) error {
|
||||||
|
endTime := time.Now().Add(*serverStartTimeout)
|
||||||
|
for endTime.After(time.Now()) {
|
||||||
|
select {
|
||||||
|
case err := <-errCh:
|
||||||
|
return err
|
||||||
|
case <-time.After(time.Second):
|
||||||
|
ready := true
|
||||||
|
for _, url := range urls {
|
||||||
|
resp, err := http.Get(url)
|
||||||
|
if err != nil || resp.StatusCode != http.StatusOK {
|
||||||
|
ready = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if ready {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return fmt.Errorf("e2e service readiness check timeout %v", *serverStartTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *server) start() error {
|
||||||
|
errCh := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
defer close(cmdErrorChan)
|
defer close(errCh)
|
||||||
|
|
||||||
// Create the output filename
|
// Create the output filename
|
||||||
outPath := path.Join(framework.TestContext.ReportDir, cmd.outputFilename)
|
outPath := path.Join(framework.TestContext.ReportDir, s.outFilename)
|
||||||
outfile, err := os.Create(outPath)
|
outfile, err := os.Create(outPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cmdErrorChan <- fmt.Errorf("Failed to create file %s for `%s` %v.", outPath, cmd, err)
|
errCh <- fmt.Errorf("failed to create file %q for `%s` %v.", outPath, s, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer outfile.Close()
|
defer outfile.Close()
|
||||||
defer outfile.Sync()
|
defer outfile.Sync()
|
||||||
|
|
||||||
|
cmd := s.startCommand
|
||||||
// Set the command to write the output file
|
// Set the command to write the output file
|
||||||
cmd.Cmd.Stdout = outfile
|
cmd.Stdout = outfile
|
||||||
cmd.Cmd.Stderr = outfile
|
cmd.Stderr = outfile
|
||||||
|
|
||||||
// Death of this test process should kill the server as well.
|
// Death of this test process should kill the server as well.
|
||||||
attrs := &syscall.SysProcAttr{}
|
attrs := &syscall.SysProcAttr{}
|
||||||
@ -321,63 +502,41 @@ func (es *e2eService) startServer(cmd *healthCheckCommand) error {
|
|||||||
if deathSigField.IsValid() {
|
if deathSigField.IsValid() {
|
||||||
deathSigField.Set(reflect.ValueOf(syscall.SIGTERM))
|
deathSigField.Set(reflect.ValueOf(syscall.SIGTERM))
|
||||||
} else {
|
} else {
|
||||||
cmdErrorChan <- fmt.Errorf("Failed to set Pdeathsig field (non-linux build)")
|
errCh <- fmt.Errorf("failed to set Pdeathsig field (non-linux build)")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cmd.Cmd.SysProcAttr = attrs
|
cmd.SysProcAttr = attrs
|
||||||
|
|
||||||
// Run the command
|
// Run the command
|
||||||
err = cmd.Run()
|
err = cmd.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cmdErrorChan <- fmt.Errorf("%s Failed with error \"%v\". Output written to: %s", cmd, err, outPath)
|
errCh <- fmt.Errorf("failed to run server start command %q: %v", commandToString(cmd), err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
endTime := time.Now().Add(*serverStartTimeout)
|
return readinessCheck(s.healthCheckUrls, errCh)
|
||||||
for endTime.After(time.Now()) {
|
|
||||||
select {
|
|
||||||
case err := <-cmdErrorChan:
|
|
||||||
return err
|
|
||||||
case <-time.After(time.Second):
|
|
||||||
resp, err := http.Get(cmd.HealthCheckUrl)
|
|
||||||
if err == nil && resp.StatusCode == http.StatusOK {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("Timeout waiting for service %s", cmd)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// killCmd is a struct to kill a given cmd. The cmd member specifies a command
|
func (s *server) kill() error {
|
||||||
// to find the pid of and attempt to kill.
|
name := s.name
|
||||||
// If the override field is set, that will be used instead to kill the command.
|
cmd := s.startCommand
|
||||||
// name is only used for logging
|
|
||||||
type killCmd struct {
|
|
||||||
name string
|
|
||||||
cmd *exec.Cmd
|
|
||||||
override *exec.Cmd
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k *killCmd) Kill() error {
|
if s.killCommand != nil {
|
||||||
name := k.name
|
return s.killCommand.Run()
|
||||||
cmd := k.cmd
|
|
||||||
|
|
||||||
if k.override != nil {
|
|
||||||
return k.override.Run()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if cmd == nil {
|
if cmd == nil {
|
||||||
return fmt.Errorf("Could not kill %s because both `override` and `cmd` are nil", name)
|
return fmt.Errorf("could not kill %q because both `killCommand` and `startCommand` are nil", name)
|
||||||
}
|
}
|
||||||
|
|
||||||
if cmd.Process == nil {
|
if cmd.Process == nil {
|
||||||
glog.V(2).Infof("%s not running", name)
|
glog.V(2).Infof("%q not running", name)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
pid := cmd.Process.Pid
|
pid := cmd.Process.Pid
|
||||||
if pid <= 1 {
|
if pid <= 1 {
|
||||||
return fmt.Errorf("invalid PID %d for %s", pid, name)
|
return fmt.Errorf("invalid PID %d for %q", pid, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to shut down the process in a friendly manner before forcing it.
|
// Attempt to shut down the process in a friendly manner before forcing it.
|
||||||
@ -413,7 +572,7 @@ func (k *killCmd) Kill() error {
|
|||||||
select {
|
select {
|
||||||
case err := <-waitChan:
|
case err := <-waitChan:
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error stopping %s: %v", name, err)
|
return fmt.Errorf("error stopping %q: %v", name, err)
|
||||||
}
|
}
|
||||||
// Success!
|
// Success!
|
||||||
return nil
|
return nil
|
||||||
@ -422,23 +581,5 @@ func (k *killCmd) Kill() error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return fmt.Errorf("unable to stop %s", name)
|
return fmt.Errorf("unable to stop %q", name)
|
||||||
}
|
|
||||||
|
|
||||||
type healthCheckCommand struct {
|
|
||||||
*exec.Cmd
|
|
||||||
HealthCheckUrl string
|
|
||||||
outputFilename string
|
|
||||||
}
|
|
||||||
|
|
||||||
func newHealthCheckCommand(healthCheckUrl string, cmd *exec.Cmd, filename string) *healthCheckCommand {
|
|
||||||
return &healthCheckCommand{
|
|
||||||
HealthCheckUrl: healthCheckUrl,
|
|
||||||
Cmd: cmd,
|
|
||||||
outputFilename: filename,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (hcc *healthCheckCommand) String() string {
|
|
||||||
return fmt.Sprintf("`%s` health-check: %s", strings.Join(append([]string{hcc.Path}, hcc.Args[1:]...), " "), hcc.HealthCheckUrl)
|
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
|||||||
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
mirrorPodName = staticPodName + "-" + framework.TestContext.NodeName
|
||||||
|
|
||||||
By("create the static pod")
|
By("create the static pod")
|
||||||
err := createStaticPod(context.PodConfigPath, staticPodName, ns, ImageRegistry[nginxImage], api.RestartPolicyAlways)
|
err := createStaticPod(framework.TestContext.ManifestPath, staticPodName, ns, ImageRegistry[nginxImage], api.RestartPolicyAlways)
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
Expect(err).ShouldNot(HaveOccurred())
|
||||||
|
|
||||||
By("wait for the mirror pod to be running")
|
By("wait for the mirror pod to be running")
|
||||||
@ -60,7 +60,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
|||||||
|
|
||||||
By("update the static pod container image")
|
By("update the static pod container image")
|
||||||
image := ImageRegistry[pauseImage]
|
image := ImageRegistry[pauseImage]
|
||||||
err = createStaticPod(context.PodConfigPath, staticPodName, ns, image, api.RestartPolicyAlways)
|
err = createStaticPod(framework.TestContext.ManifestPath, staticPodName, ns, image, api.RestartPolicyAlways)
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
Expect(err).ShouldNot(HaveOccurred())
|
||||||
|
|
||||||
By("wait for the mirror pod to be updated")
|
By("wait for the mirror pod to be updated")
|
||||||
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("MirrorPod", func() {
|
|||||||
})
|
})
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
By("delete the static pod")
|
By("delete the static pod")
|
||||||
err := deleteStaticPod(context.PodConfigPath, staticPodName, ns)
|
err := deleteStaticPod(framework.TestContext.ManifestPath, staticPodName, ns)
|
||||||
Expect(err).ShouldNot(HaveOccurred())
|
Expect(err).ShouldNot(HaveOccurred())
|
||||||
|
|
||||||
By("wait for the mirror pod to disappear")
|
By("wait for the mirror pod to disappear")
|
||||||
|
@ -22,12 +22,6 @@ import (
|
|||||||
|
|
||||||
var kubeletAddress = flag.String("kubelet-address", "http://127.0.0.1:10255", "Host and port of the kubelet")
|
var kubeletAddress = flag.String("kubelet-address", "http://127.0.0.1:10255", "Host and port of the kubelet")
|
||||||
|
|
||||||
var disableKubenet = flag.Bool("disable-kubenet", false, "If true, start kubelet without kubenet")
|
|
||||||
var buildServices = flag.Bool("build-services", true, "If true, build local executables")
|
var buildServices = flag.Bool("build-services", true, "If true, build local executables")
|
||||||
var startServices = flag.Bool("start-services", true, "If true, start local node services")
|
var startServices = flag.Bool("start-services", true, "If true, start local node services")
|
||||||
var stopServices = flag.Bool("stop-services", true, "If true, stop local node services after running tests")
|
var stopServices = flag.Bool("stop-services", true, "If true, stop local node services after running tests")
|
||||||
|
|
||||||
type SharedContext struct {
|
|
||||||
NodeName string
|
|
||||||
PodConfigPath string
|
|
||||||
}
|
|
||||||
|
Loading…
Reference in New Issue
Block a user