Remove kubelet related flags from node e2e. Add a single flag kubelet-flags to pass kubelet flags all together.

This commit is contained in:
Random-Liu 2016-11-16 13:40:23 -08:00
parent 090809d8ad
commit edf7608c51
17 changed files with 228 additions and 168 deletions

View File

@ -140,10 +140,9 @@ else
sudo -v || exit 1
fi
# If the flag --disable-kubenet is not set, set true by default.
if ! [[ $test_args =~ "--disable-kubenet" ]]; then
test_args="$test_args --disable-kubenet=true"
fi
# Do not use any network plugin by default. User could override the flags with
# test_args.
test_args='--kubelet-flags="--network-plugin= --network-plugin-dir=" '$test_args
# Test using the host the script was run on
# Provided for backwards compatibility

View File

@ -318,6 +318,7 @@ kubelet-client-certificate
kubelet-client-key
kubelet-docker-endpoint
kubelet-enable-debugging-handlers
kubelet-flags
kubelet-host-network-sources
kubelet-https
kubelet-kubeconfig

View File

@ -107,27 +107,9 @@ type NodeTestContextType struct {
NodeName string
// NodeConformance indicates whether the test is running in node conformance mode.
NodeConformance bool
// DisableKubenet disables kubenet when starting kubelet.
DisableKubenet bool
// Whether to enable the QoS Cgroup Hierarchy or not
CgroupsPerQOS bool
// How the kubelet should interface with the cgroup hierarchy (cgroupfs or systemd)
CgroupDriver string
// The hard eviction thresholds
EvictionHard string
// ManifestPath is the static pod manifest path.
ManifestPath string
// PrepullImages indicates whether node e2e framework should prepull images.
PrepullImages bool
// Enable CRI integration.
EnableCRI bool
// ContainerRuntimeEndpoint is the endpoint of remote container runtime grpc server. This is mainly used for Remote CRI
// validation test.
ContainerRuntimeEndpoint string
// MounterPath is the path to the program to run to perform a mount
MounterPath string
// KubeletConfig is the kubelet configuration the test is running against.
// TODO(random-liu): Remove kubelet configuration related fields above after removing kubelet start logic out.
KubeletConfig componentconfig.KubeletConfiguration
}
@ -224,17 +206,7 @@ func RegisterNodeFlags() {
// For different situation we need to mount different things into the container, run different commands.
// It is hard and unnecessary to deal with the complexity inside the test suite.
flag.BoolVar(&TestContext.NodeConformance, "conformance", false, "If true, the test suite will not start kubelet, and fetch system log (kernel, docker, kubelet log etc.) to the report directory.")
// TODO(random-liu): Remove kubelet related flags when we move the kubelet start logic out of the test.
// TODO(random-liu): Find someway to get kubelet configuration, and automatic config and filter test based on the configuration.
flag.BoolVar(&TestContext.DisableKubenet, "disable-kubenet", false, "If true, start kubelet without kubenet. (default false)")
flag.StringVar(&TestContext.EvictionHard, "eviction-hard", "memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%", "The hard eviction thresholds. If set, pods get evicted when the specified resources drop below the thresholds.")
flag.BoolVar(&TestContext.CgroupsPerQOS, "experimental-cgroups-per-qos", false, "Enable creation of QoS cgroup hierarchy, if true top level QoS and pod cgroups are created.")
flag.StringVar(&TestContext.CgroupDriver, "cgroup-driver", "", "Driver that the kubelet uses to manipulate cgroups on the host. Possible values: 'cgroupfs', 'systemd'")
flag.StringVar(&TestContext.ManifestPath, "manifest-path", "", "The path to the static pod manifest file.")
flag.BoolVar(&TestContext.PrepullImages, "prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.")
flag.BoolVar(&TestContext.EnableCRI, "enable-cri", false, "Enable Container Runtime Interface (CRI) integration.")
flag.StringVar(&TestContext.ContainerRuntimeEndpoint, "container-runtime-endpoint", "", "The endpoint of remote container runtime grpc server, mainly used for Remote CRI validation.")
flag.StringVar(&TestContext.MounterPath, "experimental-mounter-path", "", "Path of mounter binary. Leave empty to use the default mount.")
}
// overwriteFlagsWithViperConfig finds and writes values to flags using viper as input.

View File

@ -5,5 +5,6 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]"'
SETUP_NODE=false
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --experimental-cgroups-per-qos=true'
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
PARALLELISM=1

View File

@ -5,5 +5,6 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]"'
SETUP_NODE=false
TEST_ARGS='--enable-cri=true --feature-gates=DynamicKubeletConfig=true,StreamingProxyRedirects=true'
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true,StreamingProxyRedirects=true'
KUBELET_ARGS='--experimental-cri=true'
PARALLELISM=1

View File

@ -5,4 +5,5 @@ GCE_PROJECT=k8s-jkns-pr-node-e2e
CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2'
SETUP_NODE=false
TEST_ARGS='--enable-cri=true --feature-gates="StreamingProxyRedirects=true"'
TEST_ARGS='--feature-gates=StreamingProxyRedirects=true'
KUBELET_ARGS='--experimental-cri=true'

View File

@ -5,6 +5,7 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
SETUP_NODE=false
TEST_ARGS='--enable-cri=true --feature-gates=DynamicKubeletConfig=true,StreamingProxyRedirects=true'
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true,StreamingProxyRedirects=true'
KUBELET_ARGS='--experimental-cri=true'
PARALLELISM=1
TIMEOUT=3h

View File

@ -5,5 +5,6 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
SETUP_NODE=false
TEST_ARGS='--enable-cri=true --feature-gates="StreamingProxyRedirects=true"'
TEST_ARGS='--feature-gates=StreamingProxyRedirects=true'
KUBELET_ARGS='--experimental-cri=true'
TIMEOUT=1h

View File

@ -47,5 +47,5 @@ go run test/e2e_node/runner/remote/run_remote.go --logtostderr --vmodule=*=4 --
--images="$GCE_IMAGES" --image-project="$GCE_IMAGE_PROJECT" \
--image-config-file="$GCE_IMAGE_CONFIG_PATH" --cleanup="$CLEANUP" \
--results-dir="$ARTIFACTS" --ginkgo-flags="--nodes=$PARALLELISM $GINKGO_FLAGS" \
--test-timeout="$TIMEOUT" --setup-node="$SETUP_NODE" --test_args="$TEST_ARGS" \
--test-timeout="$TIMEOUT" --setup-node="$SETUP_NODE" --test_args="$TEST_ARGS --kubelet-flags=\"$KUBELET_ARGS\"" \
--instance-metadata="$GCE_INSTANCE_METADATA"

View File

@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]|\[Serial\]"'
SETUP_NODE=false
TEST_ARGS=--experimental-cgroups-per-qos=true
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
TIMEOUT=1h

View File

@ -5,4 +5,4 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--focus="\[Flaky\]"'
SETUP_NODE=false
TEST_ARGS=--experimental-cgroups-per-qos=true
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'

View File

@ -5,5 +5,5 @@ GCE_PROJECT=k8s-jkns-pr-node-e2e
CLEANUP=true
GINKGO_FLAGS='--skip="\[Flaky\]|\[Slow\]|\[Serial\]" --flakeAttempts=2'
SETUP_NODE=false
TEST_ARGS=--experimental-cgroups-per-qos=true
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'

View File

@ -5,6 +5,7 @@ GCE_PROJECT=k8s-jkns-ci-node-e2e
CLEANUP=true
GINKGO_FLAGS='--focus="\[Serial\]" --skip="\[Flaky\]|\[Benchmark\]"'
SETUP_NODE=false
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true --experimental-cgroups-per-qos=true'
TEST_ARGS='--feature-gates=DynamicKubeletConfig=true'
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
PARALLELISM=1
TIMEOUT=3h

View File

@ -17,5 +17,9 @@ GCE_IMAGE_PROJECT=
CLEANUP=true
# If true, current user will be added to the docker group on test node
SETUP_NODE=false
# KUBELET_ARGS are the arguments passed to kubelet. The args will override corresponding default kubelet
# setting in the test framework and --kubelet-flags in TEST_ARGS.
# If true QoS Cgroup Hierarchy is created and tests specifc to the cgroup hierarchy run
TEST_ARGS=--experimental-cgroups-per-qos=true
KUBELET_ARGS='--experimental-cgroups-per-qos=true --cgroup-root=/'
# TEST_ARGS are args passed to node e2e test.
TEST_ARGS=''

View File

@ -275,7 +275,7 @@ func RunRemote(archive string, host string, cleanup bool, junitFilePrefix string
return "", false, err
}
// Insert args at beginning of testArgs, so any values from command line take precedence
testArgs = fmt.Sprintf("--experimental-mounter-path=%s ", mounterPath) + testArgs
testArgs = fmt.Sprintf("--kubelet-flags=--experimental-mounter-path=%s ", mounterPath) + testArgs
}
// Run the tests

View File

@ -0,0 +1,193 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package services
import (
"flag"
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path/filepath"
"strings"
"github.com/golang/glog"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e_node/builder"
)
// TODO(random-liu): Replace this with standard kubelet launcher.
// args is the type used to accumulate args from the flags with the same name.
type args []string
// String function of flag.Value
func (a *args) String() string {
return fmt.Sprint(*a)
}
// Set function of flag.Value
func (a *args) Set(value string) error {
// Someone else is calling flag.Parse after the flags are parsed in the
// test framework. Use this to avoid the flag being parsed twice.
// TODO(random-liu): Figure out who is parsing the flags.
if flag.Parsed() {
return nil
}
// Note that we assume all white space in flag string is separating fields
na := strings.Fields(value)
*a = append(*a, na...)
return nil
}
// kubeletArgs is the override kubelet args specified by the test runner.
var kubeletArgs args
func init() {
flag.Var(&kubeletArgs, "kubelet-flags", "Kubelet flags passed to kubelet, this will override default kubelet flags in the test. Flags specified in multiple kubelet-flags will be concatenate.")
}
const (
// Ports of different e2e services.
kubeletPort = "10250"
kubeletReadOnlyPort = "10255"
// Health check url of kubelet
kubeletHealthCheckURL = "http://127.0.0.1:" + kubeletReadOnlyPort + "/healthz"
)
// startKubelet starts the Kubelet in a separate process or returns an error
// if the Kubelet fails to start.
func (e *E2EServices) startKubelet() (*server, error) {
glog.Info("Starting kubelet")
// Create pod manifest path
manifestPath, err := createPodManifestDirectory()
if err != nil {
return nil, err
}
e.rmDirs = append(e.rmDirs, manifestPath)
var killCommand, restartCommand *exec.Cmd
var isSystemd bool
// Apply default kubelet flags.
cmdArgs := []string{}
if systemdRun, err := exec.LookPath("systemd-run"); err == nil {
// On systemd services, detection of a service / unit works reliably while
// detection of a process started from an ssh session does not work.
// Since kubelet will typically be run as a service it also makes more
// sense to test it that way
isSystemd = true
unitName := fmt.Sprintf("kubelet-%d.service", rand.Int31())
cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, "--remain-after-exit", builder.GetKubeletServerBin())
killCommand = exec.Command("systemctl", "kill", unitName)
restartCommand = exec.Command("systemctl", "restart", unitName)
e.logFiles["kubelet.log"] = logFileData{
journalctlCommand: []string{"-u", unitName},
}
} else {
cmdArgs = append(cmdArgs, builder.GetKubeletServerBin())
cmdArgs = append(cmdArgs,
"--runtime-cgroups=/docker-daemon",
"--kubelet-cgroups=/kubelet",
"--cgroup-root=/",
"--system-cgroups=/system",
)
}
cmdArgs = append(cmdArgs,
"--api-servers", getAPIServerClientURL(),
"--address", "0.0.0.0",
"--port", kubeletPort,
"--read-only-port", kubeletReadOnlyPort,
"--volume-stats-agg-period", "10s", // Aggregate volumes frequently so tests don't need to wait as long
"--allow-privileged", "true",
"--serialize-image-pulls", "false",
"--config", manifestPath,
"--file-check-frequency", "10s", // Check file frequently so tests won't wait too long
"--pod-cidr", "10.180.0.0/24", // Assign a fixed CIDR to the node because there is no node controller.
"--eviction-pressure-transition-period", "30s",
// Apply test framework feature gates by default. This could also be overridden
// by kubelet-flags.
"--feature-gates", framework.TestContext.FeatureGates,
"--eviction-hard", "memory.available<250Mi,nodefs.available<10%,nodefs.inodesFree<5%", // The hard eviction thresholds.
"--v", LOG_VERBOSITY_LEVEL, "--logtostderr",
)
// Enable kubenet by default.
cniDir, err := getCNIDirectory()
if err != nil {
return nil, err
}
cmdArgs = append(cmdArgs,
"--network-plugin=kubenet",
"--network-plugin-dir", cniDir)
// Keep hostname override for convenience.
if framework.TestContext.NodeName != "" { // If node name is specified, set hostname override.
cmdArgs = append(cmdArgs, "--hostname-override", framework.TestContext.NodeName)
}
// Override the default kubelet flags.
cmdArgs = append(cmdArgs, kubeletArgs...)
// Adjust the args if we are running kubelet with systemd.
if isSystemd {
adjustArgsForSystemd(cmdArgs)
}
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
server := newServer(
"kubelet",
cmd,
killCommand,
restartCommand,
[]string{kubeletHealthCheckURL},
"kubelet.log",
e.monitorParent,
true /* restartOnExit */)
return server, server.start()
}
// createPodManifestDirectory creates pod manifest directory.
func createPodManifestDirectory() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("failed to get current working directory: %v", err)
}
path, err := ioutil.TempDir(cwd, "pod-manifest")
if err != nil {
return "", fmt.Errorf("failed to create static pod manifest directory: %v", err)
}
return path, nil
}
// getCNIDirectory returns CNI directory.
func getCNIDirectory() (string, error) {
cwd, err := os.Getwd()
if err != nil {
return "", err
}
// TODO(random-liu): Make sure the cni directory name is the same with that in remote/remote.go
return filepath.Join(cwd, "cni", "bin"), nil
}
// adjustArgsForSystemd escape special characters in kubelet arguments for systemd. Systemd
// may try to do auto expansion without escaping.
func adjustArgsForSystemd(args []string) {
for i := range args {
args[i] = strings.Replace(args[i], "%", "%%", -1)
args[i] = strings.Replace(args[i], "$", "$$", -1)
}
}

View File

@ -19,19 +19,15 @@ package services
import (
"fmt"
"io/ioutil"
"math/rand"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"github.com/golang/glog"
"github.com/kardianos/osext"
utilconfig "k8s.io/kubernetes/pkg/util/config"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e_node/builder"
)
// E2EServices starts and stops e2e services in a separate process. The test
@ -39,6 +35,7 @@ import (
type E2EServices struct {
// monitorParent determines whether the sub-processes should watch and die with the current
// process.
rmDirs []string
monitorParent bool
services *server
kubelet *server
@ -80,16 +77,6 @@ func (e *E2EServices) Start() error {
var err error
if !framework.TestContext.NodeConformance {
// Start kubelet
// Create the manifest path for kubelet.
// TODO(random-liu): Remove related logic when we move kubelet starting logic out of the test.
cwd, err := os.Getwd()
if err != nil {
return fmt.Errorf("failed to get current working directory: %v", err)
}
framework.TestContext.ManifestPath, err = ioutil.TempDir(cwd, "pod-manifest")
if err != nil {
return fmt.Errorf("failed to create static pod manifest directory: %v", err)
}
e.kubelet, err = e.startKubelet()
if err != nil {
return fmt.Errorf("failed to start kubelet: %v", err)
@ -105,14 +92,6 @@ func (e *E2EServices) Stop() {
if !framework.TestContext.NodeConformance {
// Collect log files.
e.getLogFiles()
// Cleanup the manifest path for kubelet.
manifestPath := framework.TestContext.ManifestPath
if manifestPath != "" {
err := os.RemoveAll(manifestPath)
if err != nil {
glog.Errorf("Failed to delete static pod manifest directory %s: %v", manifestPath, err)
}
}
}
}()
if e.services != nil {
@ -125,6 +104,14 @@ func (e *E2EServices) Stop() {
glog.Errorf("Failed to stop kubelet: %v", err)
}
}
if e.rmDirs != nil {
for _, d := range e.rmDirs {
err := os.RemoveAll(d)
if err != nil {
glog.Errorf("Failed to delete directory %s: %v", d, err)
}
}
}
}
// RunE2EServices actually start the e2e services. This function is used to
@ -158,108 +145,6 @@ func (e *E2EServices) startInternalServices() (*server, error) {
return server, server.start()
}
const (
// Ports of different e2e services.
kubeletPort = "10250"
kubeletReadOnlyPort = "10255"
// Health check url of kubelet
kubeletHealthCheckURL = "http://127.0.0.1:" + kubeletReadOnlyPort + "/healthz"
)
// startKubelet starts the Kubelet in a separate process or returns an error
// if the Kubelet fails to start.
func (e *E2EServices) startKubelet() (*server, error) {
glog.Info("Starting kubelet")
var killCommand, restartCommand *exec.Cmd
cmdArgs := []string{}
if systemdRun, err := exec.LookPath("systemd-run"); err == nil {
// On systemd services, detection of a service / unit works reliably while
// detection of a process started from an ssh session does not work.
// Since kubelet will typically be run as a service it also makes more
// sense to test it that way
unitName := fmt.Sprintf("kubelet-%d.service", rand.Int31())
cmdArgs = append(cmdArgs, systemdRun, "--unit="+unitName, "--remain-after-exit", builder.GetKubeletServerBin())
killCommand = exec.Command("systemctl", "kill", unitName)
restartCommand = exec.Command("systemctl", "restart", unitName)
e.logFiles["kubelet.log"] = logFileData{
journalctlCommand: []string{"-u", unitName},
}
framework.TestContext.EvictionHard = adjustConfigForSystemd(framework.TestContext.EvictionHard)
} else {
cmdArgs = append(cmdArgs, builder.GetKubeletServerBin())
cmdArgs = append(cmdArgs,
"--runtime-cgroups=/docker-daemon",
"--kubelet-cgroups=/kubelet",
"--cgroup-root=/",
"--system-cgroups=/system",
)
}
cmdArgs = append(cmdArgs,
"--api-servers", getAPIServerClientURL(),
"--address", "0.0.0.0",
"--port", kubeletPort,
"--read-only-port", kubeletReadOnlyPort,
"--volume-stats-agg-period", "10s", // Aggregate volumes frequently so tests don't need to wait as long
"--allow-privileged", "true",
"--serialize-image-pulls", "false",
"--config", framework.TestContext.ManifestPath,
"--file-check-frequency", "10s", // Check file frequently so tests won't wait too long
"--pod-cidr=10.180.0.0/24", // Assign a fixed CIDR to the node because there is no node controller.
"--eviction-hard", framework.TestContext.EvictionHard,
"--eviction-pressure-transition-period", "30s",
"--feature-gates", framework.TestContext.FeatureGates,
"--v", LOG_VERBOSITY_LEVEL, "--logtostderr",
"--experimental-mounter-path", framework.TestContext.MounterPath,
)
if framework.TestContext.NodeName != "" { // If node name is specified, set hostname override.
cmdArgs = append(cmdArgs, "--hostname-override", framework.TestContext.NodeName)
}
if framework.TestContext.EnableCRI {
cmdArgs = append(cmdArgs, "--experimental-cri", "true") // Whether to use experimental cri integration.
}
if framework.TestContext.ContainerRuntimeEndpoint != "" {
cmdArgs = append(cmdArgs, "--container-runtime-endpoint", framework.TestContext.ContainerRuntimeEndpoint)
}
if framework.TestContext.CgroupsPerQOS {
cmdArgs = append(cmdArgs,
"--experimental-cgroups-per-qos", "true",
"--cgroup-root", "/",
)
}
if framework.TestContext.CgroupDriver != "" {
cmdArgs = append(cmdArgs,
"--cgroup-driver", framework.TestContext.CgroupDriver,
)
}
if !framework.TestContext.DisableKubenet {
cwd, err := os.Getwd()
if err != nil {
return nil, err
}
cmdArgs = append(cmdArgs,
"--network-plugin=kubenet",
// TODO(random-liu): Make sure the cni directory name is the same with that in remote/remote.go
"--network-plugin-dir", filepath.Join(cwd, "cni", "bin")) // Enable kubenet
}
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
server := newServer(
"kubelet",
cmd,
killCommand,
restartCommand,
[]string{kubeletHealthCheckURL},
"kubelet.log",
e.monitorParent,
true /* restartOnExit */)
return server, server.start()
}
func adjustConfigForSystemd(config string) string {
return strings.Replace(config, "%", "%%", -1)
}
// getLogFiles gets logs of interest either via journalctl or by creating sym
// links. Since we scp files from the remote directory, symlinks will be
// treated as normal files and file contents will be copied over.