mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-22 19:31:44 +00:00
Merge pull request #105575 from endocrimes/dani/cleanup-launcher
Allow the e2e_node runner to receive a KubeletConfiguration rather than requiring flags
This commit is contained in:
commit
b489b03946
@ -49,6 +49,7 @@ extra_envs=${EXTRA_ENVS:-}
|
||||
runtime_config=${RUNTIME_CONFIG:-}
|
||||
ssh_user=${SSH_USER:-"${USER}"}
|
||||
ssh_key=${SSH_KEY:-}
|
||||
kubelet_config_file=${KUBELET_CONFIG_FILE:-"test/e2e_node/jenkins/default-kubelet-config.yaml"}
|
||||
|
||||
# Parse the flags to pass to ginkgo
|
||||
ginkgoflags=""
|
||||
@ -164,6 +165,8 @@ if [ "${remote}" = true ] ; then
|
||||
echo "Ginkgo Flags: ${ginkgoflags}"
|
||||
echo "Instance Metadata: ${metadata}"
|
||||
echo "Image Config File: ${image_config_file}"
|
||||
echo "Kubelet Config File: ${kubelet_config_file}"
|
||||
|
||||
# Invoke the runner
|
||||
go run test/e2e_node/runner/remote/run_remote.go --logtostderr --vmodule=*=4 --ssh-env="gce" \
|
||||
--zone="${zone}" --project="${project}" --gubernator="${gubernator}" \
|
||||
@ -174,7 +177,7 @@ if [ "${remote}" = true ] ; then
|
||||
--image-config-file="${image_config_file}" --system-spec-name="${system_spec_name}" \
|
||||
--runtime-config="${runtime_config}" --preemptible-instances="${preemptible_instances}" \
|
||||
--ssh-user="${ssh_user}" --ssh-key="${ssh_key}" --image-config-dir="${image_config_dir}" \
|
||||
--extra-envs="${extra_envs}" --test-suite="${test_suite}" \
|
||||
--extra-envs="${extra_envs}" --kubelet-config-file="${kubelet_config_file}" --test-suite="${test_suite}" \
|
||||
"${timeout_arg}" \
|
||||
2>&1 | tee -i "${artifacts}/build-log.txt"
|
||||
exit $?
|
||||
@ -210,6 +213,7 @@ else
|
||||
--ginkgo-flags="${ginkgoflags}" --test-flags="--container-runtime=${runtime} \
|
||||
--alsologtostderr --v 4 --report-dir=${artifacts} --node-name $(hostname) \
|
||||
${test_args}" --runtime-config="${runtime_config}" \
|
||||
--kubelet-config-file="${kubelet_config_file}" \
|
||||
--build-dependencies=true 2>&1 | tee -i "${artifacts}/build-log.txt"
|
||||
exit $?
|
||||
fi
|
||||
|
@ -168,12 +168,13 @@ func TestE2eNode(t *testing.T) {
|
||||
}
|
||||
return
|
||||
}
|
||||
// If run-services-mode is not specified, run test.
|
||||
|
||||
// We're not running in a special mode so lets run tests.
|
||||
gomega.RegisterFailHandler(ginkgo.Fail)
|
||||
reporters := []ginkgo.Reporter{}
|
||||
reportDir := framework.TestContext.ReportDir
|
||||
if reportDir != "" {
|
||||
// Create the directory if it doesn't already exists
|
||||
// Create the directory if it doesn't already exist
|
||||
if err := os.MkdirAll(reportDir, 0755); err != nil {
|
||||
klog.Errorf("Failed creating report directory: %v", err)
|
||||
} else {
|
||||
@ -294,8 +295,6 @@ func waitForNodeReady() {
|
||||
}
|
||||
|
||||
// updateTestContext updates the test context with the node name.
|
||||
// TODO(random-liu): Using dynamic kubelet configuration feature to
|
||||
// update test context with node configuration.
|
||||
func updateTestContext() error {
|
||||
setExtraEnvs()
|
||||
updateImageAllowList()
|
||||
|
27
test/e2e_node/jenkins/default-kubelet-config.yaml
Normal file
27
test/e2e_node/jenkins/default-kubelet-config.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
cgroupDriver: cgroupfs
|
||||
cgroupRoot: /
|
||||
|
||||
# Assign a fixed CIDR to the node because we do not run a node controller
|
||||
# This MUST be in sync with IPs in:
|
||||
# - cluster/gce/config-test.sh and
|
||||
# - test/e2e_node/conformance/run_test.sh
|
||||
podCIDR: "10.100.0.0/24"
|
||||
|
||||
# Aggregate volumes frequently to reduce test wait times
|
||||
volumeStatsAggPeriod: 10s
|
||||
# Check files frequently to reduce test wait times
|
||||
fileCheckFrequency: 10s
|
||||
|
||||
evictionPressureTransitionPeriod: 30s
|
||||
evictionHard:
|
||||
memory.available: 250Mi
|
||||
nodefs.available: 10%
|
||||
nodefs.inodesFree: 5%
|
||||
evictionMinimumReclaim:
|
||||
nodefs.available: 5%
|
||||
nodefs.inodesFree: 5%
|
||||
|
||||
serializeImagePulls: false
|
||||
|
@ -19,6 +19,7 @@ package remote
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -36,8 +37,39 @@ var resultsDir = flag.String("results-dir", "/tmp/", "Directory to scp test resu
|
||||
|
||||
const archiveName = "e2e_node_test.tar.gz"
|
||||
|
||||
func copyKubeletConfigIfExists(kubeletConfigFile, dstDir string) error {
|
||||
srcStat, err := os.Stat(kubeletConfigFile)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !srcStat.Mode().IsRegular() {
|
||||
return fmt.Errorf("%s is not a regular file", kubeletConfigFile)
|
||||
}
|
||||
|
||||
source, err := os.Open(kubeletConfigFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer source.Close()
|
||||
|
||||
dst := filepath.Join(dstDir, "kubeletconfig.yaml")
|
||||
destination, err := os.Create(dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer destination.Close()
|
||||
|
||||
_, err = io.Copy(destination, source)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateTestArchive creates the archive package for the node e2e test.
|
||||
func CreateTestArchive(suite TestSuite, systemSpecName string) (string, error) {
|
||||
func CreateTestArchive(suite TestSuite, systemSpecName, kubeletConfigFile string) (string, error) {
|
||||
klog.V(2).Infof("Building archive...")
|
||||
tardir, err := ioutil.TempDir("", "node-e2e-archive")
|
||||
if err != nil {
|
||||
@ -45,6 +77,11 @@ func CreateTestArchive(suite TestSuite, systemSpecName string) (string, error) {
|
||||
}
|
||||
defer os.RemoveAll(tardir)
|
||||
|
||||
err = copyKubeletConfigIfExists(kubeletConfigFile, tardir)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to copy kubelet config: %v", err)
|
||||
}
|
||||
|
||||
// Call the suite function to setup the test package.
|
||||
err = suite.SetupTestPackage(tardir, systemSpecName)
|
||||
if err != nil {
|
||||
@ -65,8 +102,7 @@ func CreateTestArchive(suite TestSuite, systemSpecName string) (string, error) {
|
||||
}
|
||||
|
||||
// RunRemote returns the command output, whether the exit was ok, and any errors
|
||||
// TODO(random-liu): junitFilePrefix is not prefix actually, the file name is junit-junitFilePrefix.xml. Change the variable name.
|
||||
func RunRemote(suite TestSuite, archive string, host string, cleanup bool, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName, extraEnvs, runtimeConfig string) (string, bool, error) {
|
||||
func RunRemote(suite TestSuite, archive string, host string, cleanup bool, imageDesc, junitFileName, testArgs, ginkgoArgs, systemSpecName, extraEnvs, runtimeConfig string) (string, bool, error) {
|
||||
// Create the temp staging directory
|
||||
klog.V(2).Infof("Staging test binaries on %q", host)
|
||||
workspace := newWorkspaceDir()
|
||||
@ -111,7 +147,7 @@ func RunRemote(suite TestSuite, archive string, host string, cleanup bool, image
|
||||
}
|
||||
|
||||
klog.V(2).Infof("Running test on %q", host)
|
||||
output, err := suite.RunTest(host, workspace, resultDir, imageDesc, junitFilePrefix, testArgs, ginkgoArgs, systemSpecName, extraEnvs, runtimeConfig, *testTimeout)
|
||||
output, err := suite.RunTest(host, workspace, resultDir, imageDesc, junitFileName, testArgs, ginkgoArgs, systemSpecName, extraEnvs, runtimeConfig, *testTimeout)
|
||||
|
||||
aggErrs := []error{}
|
||||
// Do not log the output here, let the caller deal with the test output.
|
||||
|
@ -37,6 +37,7 @@ var testFlags = flag.String("test-flags", "", "Space-separated list of arguments
|
||||
var systemSpecName = flag.String("system-spec-name", "", fmt.Sprintf("The name of the system spec used for validating the image in the node conformance test. The specs are at %s. If unspecified, the default built-in spec (system.DefaultSpec) will be used.", system.SystemSpecPath))
|
||||
var extraEnvs = flag.String("extra-envs", "", "The extra environment variables needed for node e2e tests. Format: a list of key=value pairs, e.g., env1=val1,env2=val2")
|
||||
var runtimeConfig = flag.String("runtime-config", "", "The runtime configuration for the API server on the node e2e tests. Format: a list of key=value pairs, e.g., env1=val1,env2=val2")
|
||||
var kubeletConfigFile = flag.String("kubelet-config-file", "", "The KubeletConfiguration file that should be applied to the kubelet")
|
||||
|
||||
func main() {
|
||||
klog.InitFlags(nil)
|
||||
@ -67,6 +68,9 @@ func main() {
|
||||
systemSpecFile := filepath.Join(rootDir, system.SystemSpecPath, *systemSpecName+".yaml")
|
||||
args = append(args, fmt.Sprintf("--system-spec-name=%s --system-spec-file=%s --extra-envs=%s", *systemSpecName, systemSpecFile, *extraEnvs))
|
||||
}
|
||||
if *kubeletConfigFile != "" {
|
||||
args = append(args, fmt.Sprintf("--kubelet-config-file=\"%s\"", *kubeletConfigFile))
|
||||
}
|
||||
if err := runCommand(ginkgo, args...); err != nil {
|
||||
klog.Exitf("Test failed: %v", err)
|
||||
}
|
||||
|
@ -69,6 +69,7 @@ var ginkgoFlags = flag.String("ginkgo-flags", "", "Passed to ginkgo to specify a
|
||||
var systemSpecName = flag.String("system-spec-name", "", fmt.Sprintf("The name of the system spec used for validating the image in the node conformance test. The specs are at %s. If unspecified, the default built-in spec (system.DefaultSpec) will be used.", system.SystemSpecPath))
|
||||
var extraEnvs = flag.String("extra-envs", "", "The extra environment variables needed for node e2e tests. Format: a list of key=value pairs, e.g., env1=val1,env2=val2")
|
||||
var runtimeConfig = flag.String("runtime-config", "", "The runtime configuration for the API server on the node e2e tests.. Format: a list of key=value pairs, e.g., env1=val1,env2=val2")
|
||||
var kubeletConfigFile = flag.String("kubelet-config-file", "", "The KubeletConfiguration file that should be applied to the kubelet")
|
||||
|
||||
// envs is the type used to collect all node envs. The key is the env name,
|
||||
// and the value is the env value
|
||||
@ -218,7 +219,7 @@ func main() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
if *buildOnly {
|
||||
// Build the archive and exit
|
||||
remote.CreateTestArchive(suite, *systemSpecName)
|
||||
remote.CreateTestArchive(suite, *systemSpecName, *kubeletConfigFile)
|
||||
return
|
||||
}
|
||||
|
||||
@ -340,16 +341,16 @@ func main() {
|
||||
imageConfig := gceImages.images[shortName]
|
||||
fmt.Printf("Initializing e2e tests using image %s/%s/%s.\n", shortName, imageConfig.project, imageConfig.image)
|
||||
running++
|
||||
go func(image *internalGCEImage, junitFilePrefix string) {
|
||||
results <- testImage(image, junitFilePrefix)
|
||||
go func(image *internalGCEImage, junitFileName string) {
|
||||
results <- testImage(image, junitFileName)
|
||||
}(&imageConfig, shortName)
|
||||
}
|
||||
if *hosts != "" {
|
||||
for _, host := range strings.Split(*hosts, ",") {
|
||||
fmt.Printf("Initializing e2e tests using host %s.\n", host)
|
||||
running++
|
||||
go func(host string, junitFilePrefix string) {
|
||||
results <- testHost(host, *cleanup, "", junitFilePrefix, *ginkgoFlags)
|
||||
go func(host string, junitFileName string) {
|
||||
results <- testHost(host, *cleanup, "", junitFileName, *ginkgoFlags)
|
||||
}(host, host)
|
||||
}
|
||||
}
|
||||
@ -404,7 +405,7 @@ func callGubernator(gubernator bool) {
|
||||
}
|
||||
|
||||
func (a *Archive) getArchive() (string, error) {
|
||||
a.Do(func() { a.path, a.err = remote.CreateTestArchive(suite, *systemSpecName) })
|
||||
a.Do(func() { a.path, a.err = remote.CreateTestArchive(suite, *systemSpecName, *kubeletConfigFile) })
|
||||
return a.path, a.err
|
||||
}
|
||||
|
||||
@ -436,7 +437,7 @@ func getImageMetadata(input string) *compute.Metadata {
|
||||
}
|
||||
|
||||
// Run tests in archive against host
|
||||
func testHost(host string, deleteFiles bool, imageDesc, junitFilePrefix, ginkgoFlagsStr string) *TestResult {
|
||||
func testHost(host string, deleteFiles bool, imageDesc, junitFileName, ginkgoFlagsStr string) *TestResult {
|
||||
instance, err := computeService.Instances.Get(*project, *zone, host).Do()
|
||||
if err != nil {
|
||||
return &TestResult{
|
||||
@ -466,7 +467,7 @@ func testHost(host string, deleteFiles bool, imageDesc, junitFilePrefix, ginkgoF
|
||||
}
|
||||
}
|
||||
|
||||
output, exitOk, err := remote.RunRemote(suite, path, host, deleteFiles, imageDesc, junitFilePrefix, *testArgs, ginkgoFlagsStr, *systemSpecName, *extraEnvs, *runtimeConfig)
|
||||
output, exitOk, err := remote.RunRemote(suite, path, host, deleteFiles, imageDesc, junitFileName, *testArgs, ginkgoFlagsStr, *systemSpecName, *extraEnvs, *runtimeConfig)
|
||||
return &TestResult{
|
||||
output: output,
|
||||
err: err,
|
||||
@ -526,7 +527,7 @@ func getGCEImage(imageRegex, imageFamily string, project string) (string, error)
|
||||
|
||||
// Provision a gce instance using image and run the tests in archive against the instance.
|
||||
// Delete the instance afterward.
|
||||
func testImage(imageConfig *internalGCEImage, junitFilePrefix string) *TestResult {
|
||||
func testImage(imageConfig *internalGCEImage, junitFileName string) *TestResult {
|
||||
ginkgoFlagsStr := *ginkgoFlags
|
||||
// Check whether the test is for benchmark.
|
||||
if len(imageConfig.tests) > 0 {
|
||||
@ -552,7 +553,7 @@ func testImage(imageConfig *internalGCEImage, junitFilePrefix string) *TestResul
|
||||
// If we are going to delete the instance, don't bother with cleaning up the files
|
||||
deleteFiles := !*deleteInstances && *cleanup
|
||||
|
||||
result := testHost(host, deleteFiles, imageConfig.imageDesc, junitFilePrefix, ginkgoFlagsStr)
|
||||
result := testHost(host, deleteFiles, imageConfig.imageDesc, junitFileName, ginkgoFlagsStr)
|
||||
// This is a temporary solution to collect serial node serial log. Only port 1 contains useful information.
|
||||
// TODO(random-liu): Extract out and unify log collection logic with cluste e2e.
|
||||
serialPortOutput, err := computeService.Instances.GetSerialPortOutput(*project, *zone, host).Port(1).Do()
|
||||
|
@ -26,17 +26,19 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
cliflag "k8s.io/component-base/cli/flag"
|
||||
"k8s.io/klog/v2"
|
||||
kubeletconfigv1beta1 "k8s.io/kubelet/config/v1beta1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/cmd/kubelet/app/options"
|
||||
"k8s.io/kubernetes/pkg/cluster/ports"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kubeletconfig/configfiles"
|
||||
kubeletconfigcodec "k8s.io/kubernetes/pkg/kubelet/kubeletconfig/util/codec"
|
||||
utilfs "k8s.io/kubernetes/pkg/util/filesystem"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
"k8s.io/kubernetes/test/e2e_node/builder"
|
||||
"k8s.io/kubernetes/test/e2e_node/remote"
|
||||
@ -62,11 +64,11 @@ func (a *args) Set(value string) error {
|
||||
|
||||
// kubeletArgs is the override kubelet args specified by the test runner.
|
||||
var kubeletArgs args
|
||||
var genKubeletConfigFile bool
|
||||
var kubeletConfigFile string
|
||||
|
||||
func init() {
|
||||
flag.Var(&kubeletArgs, "kubelet-flags", "Kubelet flags passed to kubelet, this will override default kubelet flags in the test. Flags specified in multiple kubelet-flags will be concatenate.")
|
||||
flag.BoolVar(&genKubeletConfigFile, "generate-kubelet-config-file", true, "The test runner will generate a Kubelet config file containing test defaults instead of passing default flags to the Kubelet.")
|
||||
flag.Var(&kubeletArgs, "kubelet-flags", "Kubelet flags passed to kubelet, this will override default kubelet flags in the test. Flags specified in multiple kubelet-flags will be concatenate. Deprecated, see: --kubelet-config-file.")
|
||||
flag.StringVar(&kubeletConfigFile, "kubelet-config-file", "./kubeletconfig.yaml", "The base KubeletConfiguration to use when setting up the kubelet. This configuration will then be minimially modified to support requirements from the test suite.")
|
||||
}
|
||||
|
||||
// RunKubelet starts kubelet and waits for termination signal. Once receives the
|
||||
@ -94,6 +96,60 @@ const (
|
||||
kubeletHealthCheckURL = "http://127.0.0.1:" + kubeletReadOnlyPort + "/healthz"
|
||||
)
|
||||
|
||||
func baseKubeConfiguration(cfgPath string) (*kubeletconfig.KubeletConfiguration, error) {
|
||||
cfgPath, err := filepath.Abs(cfgPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = os.Stat(cfgPath)
|
||||
if err != nil {
|
||||
// If the kubeletconfig exists, but for some reason we can't read it, then
|
||||
// return an error to avoid silently skipping it.
|
||||
if !os.IsNotExist(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If the kubeletconfig file doesn't exist, then use a default configuration
|
||||
// as the base.
|
||||
kc, err := options.NewKubeletConfiguration()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// The following values should match the contents of
|
||||
// test/e2e_node/jenkins/default-kubelet-config.yaml. We can't use go embed
|
||||
// here to fallback as default config lives in a parallel directory.
|
||||
// TODO(endocrimes): Remove fallback for lack of kubelet config when all
|
||||
// uses of e2e_node switch to providing one (or move to
|
||||
// kubetest2 and pick up the default).
|
||||
kc.CgroupRoot = "/"
|
||||
kc.VolumeStatsAggPeriod = metav1.Duration{Duration: 10 * time.Second}
|
||||
kc.SerializeImagePulls = false
|
||||
kc.FileCheckFrequency = metav1.Duration{Duration: 10 * time.Second}
|
||||
kc.PodCIDR = "10.100.0.0/24"
|
||||
kc.EvictionPressureTransitionPeriod = metav1.Duration{Duration: 30 * time.Second}
|
||||
kc.EvictionHard = map[string]string{
|
||||
"memory.available": "250Mi",
|
||||
"nodefs.available": "10%",
|
||||
"nodefs.inodesFree": "5%",
|
||||
}
|
||||
kc.EvictionMinimumReclaim = map[string]string{
|
||||
"nodefs.available": "5%",
|
||||
"nodefs.inodesFree": "5%",
|
||||
}
|
||||
|
||||
return kc, nil
|
||||
}
|
||||
|
||||
loader, err := configfiles.NewFsLoader(&utilfs.DefaultFs{}, cfgPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return loader.Load()
|
||||
}
|
||||
|
||||
// startKubelet starts the Kubelet in a separate process or returns an error
|
||||
// if the Kubelet fails to start.
|
||||
func (e *E2EServices) startKubelet() (*server, error) {
|
||||
@ -127,53 +183,25 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// PLEASE NOTE: If you set new KubeletConfiguration values or stop setting values here,
|
||||
// you must also update the flag names in kubeletConfigFlags!
|
||||
kubeletConfigFlags := []string{}
|
||||
|
||||
// set up the default kubeletconfiguration
|
||||
kc, err := options.NewKubeletConfiguration()
|
||||
kc, err := baseKubeConfiguration(kubeletConfigFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to load base kubelet configuration: %v", err)
|
||||
}
|
||||
|
||||
kc.CgroupRoot = "/"
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "cgroup-root")
|
||||
// Apply overrides to allow access to the Kubelet API from the test suite.
|
||||
// These are insecure and should generally not be used outside of test infra.
|
||||
|
||||
kc.VolumeStatsAggPeriod = metav1.Duration{Duration: 10 * time.Second} // Aggregate volumes frequently so tests don't need to wait as long
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "volume-stats-agg-period")
|
||||
|
||||
kc.SerializeImagePulls = false
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "serialize-image-pulls")
|
||||
// --anonymous-auth
|
||||
kc.Authentication.Anonymous.Enabled = true
|
||||
// --authentication-token-webhook
|
||||
kc.Authentication.Webhook.Enabled = false
|
||||
// --authorization-mode
|
||||
kc.Authorization.Mode = kubeletconfig.KubeletAuthorizationModeAlwaysAllow
|
||||
// --read-only-port
|
||||
kc.ReadOnlyPort = ports.KubeletReadOnlyPort
|
||||
|
||||
// Static Pods are in a per-test location, so we override them for tests.
|
||||
kc.StaticPodPath = podPath
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "pod-manifest-path")
|
||||
|
||||
kc.FileCheckFrequency = metav1.Duration{Duration: 10 * time.Second} // Check file frequently so tests won't wait too long
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "file-check-frequency")
|
||||
|
||||
// Assign a fixed CIDR to the node because there is no node controller.
|
||||
// Note: this MUST be in sync with the IP in
|
||||
// - cluster/gce/config-test.sh and
|
||||
// - test/e2e_node/conformance/run_test.sh.
|
||||
kc.PodCIDR = "10.100.0.0/24"
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "pod-cidr")
|
||||
|
||||
kc.EvictionPressureTransitionPeriod = metav1.Duration{Duration: 30 * time.Second}
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "eviction-pressure-transition-period")
|
||||
|
||||
kc.EvictionHard = map[string]string{
|
||||
"memory.available": "250Mi",
|
||||
"nodefs.available": "10%",
|
||||
"nodefs.inodesFree": "5%",
|
||||
}
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "eviction-hard")
|
||||
|
||||
kc.EvictionMinimumReclaim = map[string]string{
|
||||
"nodefs.available": "5%",
|
||||
"nodefs.inodesFree": "5%",
|
||||
}
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "eviction-minimum-reclaim")
|
||||
|
||||
var killCommand, restartCommand *exec.Cmd
|
||||
var isSystemd bool
|
||||
@ -204,17 +232,14 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
restartCommand = exec.Command("systemctl", "restart", unitName)
|
||||
|
||||
kc.KubeletCgroups = "/kubelet.slice"
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "kubelet-cgroups")
|
||||
} else {
|
||||
cmdArgs = append(cmdArgs, builder.GetKubeletServerBin())
|
||||
// TODO(random-liu): Get rid of this docker specific thing.
|
||||
cmdArgs = append(cmdArgs, "--runtime-cgroups=/docker-daemon")
|
||||
|
||||
kc.KubeletCgroups = "/kubelet"
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "kubelet-cgroups")
|
||||
|
||||
kc.SystemCgroups = "/system"
|
||||
kubeletConfigFlags = append(kubeletConfigFlags, "system-cgroups")
|
||||
}
|
||||
cmdArgs = append(cmdArgs,
|
||||
"--kubeconfig", kubeconfigPath,
|
||||
@ -277,17 +302,11 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
cmdArgs = append(cmdArgs, "--image-service-endpoint", framework.TestContext.ImageServiceEndpoint)
|
||||
}
|
||||
|
||||
// Write config file or flags, depending on whether --generate-kubelet-config-file was provided
|
||||
if genKubeletConfigFile {
|
||||
if err := writeKubeletConfigFile(kc, kubeletConfigPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// add the flag to load config from a file
|
||||
cmdArgs = append(cmdArgs, "--config", kubeletConfigPath)
|
||||
} else {
|
||||
// generate command line flags from the default config, since --generate-kubelet-config-file was not provided
|
||||
addKubeletConfigFlags(&cmdArgs, kc, kubeletConfigFlags)
|
||||
if err := writeKubeletConfigFile(kc, kubeletConfigPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// add the flag to load config from a file
|
||||
cmdArgs = append(cmdArgs, "--config", kubeletConfigPath)
|
||||
|
||||
// Override the default kubelet flags.
|
||||
cmdArgs = append(cmdArgs, kubeletArgs...)
|
||||
@ -311,15 +330,6 @@ func (e *E2EServices) startKubelet() (*server, error) {
|
||||
return server, server.start()
|
||||
}
|
||||
|
||||
// addKubeletConfigFlags adds the flags we care about from the provided kubelet configuration object
|
||||
func addKubeletConfigFlags(cmdArgs *[]string, kc *kubeletconfig.KubeletConfiguration, flags []string) {
|
||||
fs := pflag.NewFlagSet("kubelet", pflag.ExitOnError)
|
||||
options.AddKubeletConfigFlags(fs, kc)
|
||||
for _, name := range flags {
|
||||
*cmdArgs = append(*cmdArgs, fmt.Sprintf("--%s=%s", name, fs.Lookup(name).Value.String()))
|
||||
}
|
||||
}
|
||||
|
||||
// writeKubeletConfigFile writes the kubelet config file based on the args and returns the filename
|
||||
func writeKubeletConfigFile(internal *kubeletconfig.KubeletConfiguration, path string) error {
|
||||
data, err := kubeletconfigcodec.EncodeKubeletConfig(internal, kubeletconfigv1beta1.SchemeGroupVersion)
|
||||
|
Loading…
Reference in New Issue
Block a user