mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Fix review feedback, bazel files, tests and the dnsmasq-metrics spec. Set --kubelet-preferred-address-types on v1.5 and higher clusters
This commit is contained in:
parent
810e9e107f
commit
b0603046b4
@ -14,8 +14,5 @@ go_binary(
|
||||
name = "kubeadm",
|
||||
srcs = ["kubeadm.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
],
|
||||
deps = ["//cmd/kubeadm/app:go_default_library"],
|
||||
)
|
||||
|
@ -18,7 +18,6 @@ go_library(
|
||||
"//cmd/kubeadm/app/apis/kubeadm/install:go_default_library",
|
||||
"//cmd/kubeadm/app/cmd:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/util/logs:go_default_library",
|
||||
"//vendor:github.com/spf13/pflag",
|
||||
],
|
||||
)
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
"html/template"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/renstrom/dedent"
|
||||
"github.com/spf13/cobra"
|
||||
@ -54,8 +53,8 @@ var (
|
||||
initDoneMsgf = dedent.Dedent(`
|
||||
Your Kubernetes master has initialized successfully!
|
||||
|
||||
But you still need to deploy a pod network to the cluster.
|
||||
You should "kubectl apply -f" some pod network yaml file that's listed at:
|
||||
You should now deploy a pod network to the cluster.
|
||||
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
|
||||
http://kubernetes.io/docs/admin/addons/
|
||||
|
||||
You can now join any number of machines by running the following on each node:
|
||||
@ -168,7 +167,7 @@ type Init struct {
|
||||
|
||||
func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight bool) (*Init, error) {
|
||||
|
||||
fmt.Println("[kubeadm] Bear in mind that kubeadm is in alpha, do not use it in production clusters.")
|
||||
fmt.Println("[kubeadm] WARNING: kubeadm is in alpha, please do not use it for production clusters.")
|
||||
|
||||
if cfgPath != "" {
|
||||
b, err := ioutil.ReadFile(cfgPath)
|
||||
@ -190,25 +189,23 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight
|
||||
}
|
||||
|
||||
if !skipPreFlight {
|
||||
fmt.Println("[preflight] Running pre-flight checks...")
|
||||
fmt.Println("[preflight] Running pre-flight checks")
|
||||
|
||||
// First, check if we're root separately from the other preflight checks and fail fast
|
||||
if err := preflight.RunChecks([]preflight.PreFlightCheck{preflight.IsRootCheck{}}, os.Stderr); err != nil {
|
||||
return nil, &preflight.PreFlightError{Msg: err.Error()}
|
||||
if err := preflight.RunRootCheckOnly(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Then continue with the others...
|
||||
if err := preflight.RunInitMasterChecks(cfg); err != nil {
|
||||
return nil, &preflight.PreFlightError{Msg: err.Error()}
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
fmt.Println("[preflight] Skipping pre-flight checks...")
|
||||
fmt.Println("[preflight] Skipping pre-flight checks")
|
||||
}
|
||||
|
||||
// Try to start the kubelet service in case it's inactive
|
||||
if err := preflight.TryStartKubelet(); err != nil {
|
||||
return nil, &preflight.PreFlightError{Msg: err.Error()}
|
||||
}
|
||||
preflight.TryStartKubelet()
|
||||
|
||||
// validate version argument
|
||||
ver, err := kubeadmutil.KubernetesReleaseVersion(cfg.KubernetesVersion)
|
||||
@ -220,7 +217,7 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight
|
||||
}
|
||||
}
|
||||
cfg.KubernetesVersion = ver
|
||||
fmt.Println("Using Kubernetes version:", ver)
|
||||
fmt.Println("[init] Using Kubernetes version:", ver)
|
||||
|
||||
// Warn about the limitations with the current cloudprovider solution.
|
||||
if cfg.CloudProvider != "" {
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/renstrom/dedent"
|
||||
"github.com/spf13/cobra"
|
||||
@ -96,7 +95,7 @@ type Join struct {
|
||||
|
||||
func NewJoin(cfgPath string, args []string, cfg *kubeadmapi.NodeConfiguration, skipPreFlight bool) (*Join, error) {
|
||||
|
||||
fmt.Println("[kubeadm] Bear in mind that kubeadm is in alpha, do not use it in production clusters.")
|
||||
fmt.Println("[kubeadm] WARNING: kubeadm is in alpha, please do not use it for production clusters.")
|
||||
|
||||
if cfgPath != "" {
|
||||
b, err := ioutil.ReadFile(cfgPath)
|
||||
@ -113,29 +112,27 @@ func NewJoin(cfgPath string, args []string, cfg *kubeadmapi.NodeConfiguration, s
|
||||
}
|
||||
cfg.MasterAddresses = append(cfg.MasterAddresses, args...)
|
||||
if len(cfg.MasterAddresses) > 1 {
|
||||
return nil, fmt.Errorf("Must not specify more than one master address (see --help)")
|
||||
return nil, fmt.Errorf("must not specify more than one master address (see --help)")
|
||||
}
|
||||
|
||||
if !skipPreFlight {
|
||||
fmt.Println("[preflight] Running pre-flight checks...")
|
||||
fmt.Println("[preflight] Running pre-flight checks")
|
||||
|
||||
// First, check if we're root separately from the other preflight checks and fail fast
|
||||
if err := preflight.RunChecks([]preflight.PreFlightCheck{preflight.IsRootCheck{}}, os.Stderr); err != nil {
|
||||
return nil, &preflight.PreFlightError{Msg: err.Error()}
|
||||
if err := preflight.RunRootCheckOnly(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Then continue with the others...
|
||||
if err := preflight.RunJoinNodeChecks(cfg); err != nil {
|
||||
return nil, &preflight.PreFlightError{Msg: err.Error()}
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
fmt.Println("[preflight] Skipping pre-flight checks...")
|
||||
fmt.Println("[preflight] Skipping pre-flight checks")
|
||||
}
|
||||
|
||||
// Try to start the kubelet service in case it's inactive
|
||||
if err := preflight.TryStartKubelet(); err != nil {
|
||||
return nil, &preflight.PreFlightError{Msg: err.Error()}
|
||||
}
|
||||
preflight.TryStartKubelet()
|
||||
|
||||
ok, err := kubeadmutil.UseGivenTokenIfValid(&cfg.Secrets)
|
||||
if !ok {
|
||||
|
@ -64,13 +64,13 @@ type Reset struct {
|
||||
|
||||
func NewReset(skipPreFlight, removeNode bool) (*Reset, error) {
|
||||
if !skipPreFlight {
|
||||
fmt.Println("[preflight] Running pre-flight checks...")
|
||||
fmt.Println("[preflight] Running pre-flight checks")
|
||||
|
||||
if err := preflight.RunChecks([]preflight.PreFlightCheck{preflight.IsRootCheck{}}, os.Stderr); err != nil {
|
||||
return nil, &preflight.PreFlightError{Msg: err.Error()}
|
||||
if err := preflight.RunRootCheckOnly(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
fmt.Println("[preflight] Skipping pre-flight checks...")
|
||||
fmt.Println("[preflight] Skipping pre-flight checks")
|
||||
}
|
||||
|
||||
return &Reset{
|
||||
@ -81,51 +81,36 @@ func NewReset(skipPreFlight, removeNode bool) (*Reset, error) {
|
||||
// Run reverts any changes made to this host by "kubeadm init" or "kubeadm join".
|
||||
func (r *Reset) Run(out io.Writer) error {
|
||||
|
||||
// Drain and maybe remove the node from the cluster
|
||||
// Try to drain and remove the node from the cluster
|
||||
err := drainAndRemoveNode(r.removeNode)
|
||||
if err != nil {
|
||||
fmt.Printf("[reset] Failed to cleanup node: [%v]\n", err)
|
||||
}
|
||||
|
||||
serviceToStop := "kubelet"
|
||||
// Try to stop the kubelet service
|
||||
initSystem, err := initsystem.GetInitSystem()
|
||||
if err != nil {
|
||||
fmt.Printf("[reset] Failed to detect init system and stop the kubelet service: %v\n", err)
|
||||
fmt.Println("[reset] WARNING: The kubelet service couldn't be stopped by kubeadm because no supported init system was detected.")
|
||||
fmt.Println("[reset] WARNING: Please ensure kubelet is stopped manually.")
|
||||
} else {
|
||||
fmt.Printf("[reset] Stopping the %s service...\n", serviceToStop)
|
||||
if err := initSystem.ServiceStop(serviceToStop); err != nil {
|
||||
fmt.Printf("[reset] Failed to stop the %s service\n", serviceToStop)
|
||||
fmt.Println("[reset] Stopping the kubelet service")
|
||||
if err := initSystem.ServiceStop("kubelet"); err != nil {
|
||||
fmt.Printf("[reset] WARNING: The kubelet service couldn't be stopped by kubeadm: [%v]\n", err)
|
||||
fmt.Println("[reset] WARNING: Please ensure kubelet is stopped manually.")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("[reset] Unmounting directories in /var/lib/kubelet...")
|
||||
// Try to unmount mounted directories under /var/lib/kubelet in order to be able to remove the /var/lib/kubelet directory later
|
||||
fmt.Printf("[reset] Unmounting mounted directories in %q\n", "/var/lib/kubelet")
|
||||
umountDirsCmd := "cat /proc/mounts | awk '{print $2}' | grep '/var/lib/kubelet' | xargs -r umount"
|
||||
umountOutputBytes, err := exec.Command("sh", "-c", umountDirsCmd).Output()
|
||||
if err != nil {
|
||||
fmt.Printf("[reset] Failed to unmount directories in /var/lib/kubelet: %s\n", string(umountOutputBytes))
|
||||
}
|
||||
|
||||
// Remove contents from the config and pki directories
|
||||
resetConfigDir(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmapi.GlobalEnvParams.HostPKIPath)
|
||||
|
||||
dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d"}
|
||||
|
||||
// Only clear etcd data when the etcd manifest is found. In case it is not found, we must assume that the user
|
||||
// provided external etcd endpoints. In that case, it is his own responsibility to reset etcd
|
||||
if _, err := os.Stat("/etc/kubernetes/manifests/etcd.json"); os.IsNotExist(err) {
|
||||
dirsToClean = append(dirsToClean, "/var/lib/etcd")
|
||||
} else {
|
||||
fmt.Printf("[reset] No etcd manifest found in %q, assuming external etcd.\n", "/etc/kubernetes/manifests/etcd.json")
|
||||
}
|
||||
|
||||
fmt.Printf("[reset] Deleting contents of stateful directories: %v\n", dirsToClean)
|
||||
for _, dir := range dirsToClean {
|
||||
cleanDir(dir)
|
||||
fmt.Printf("[reset] Failed to unmount mounted directories in /var/lib/kubelet: %s\n", string(umountOutputBytes))
|
||||
}
|
||||
|
||||
dockerCheck := preflight.ServiceCheck{Service: "docker"}
|
||||
if warnings, errors := dockerCheck.Check(); len(warnings) == 0 && len(errors) == 0 {
|
||||
fmt.Println("[reset] Stopping all running docker containers...")
|
||||
fmt.Println("[reset] Removing kubernetes-managed containers")
|
||||
if err := exec.Command("sh", "-c", "docker ps | grep 'k8s_' | awk '{print $1}' | xargs -r docker rm --force --volumes").Run(); err != nil {
|
||||
fmt.Println("[reset] Failed to stop the running containers")
|
||||
}
|
||||
@ -133,6 +118,26 @@ func (r *Reset) Run(out io.Writer) error {
|
||||
fmt.Println("[reset] docker doesn't seem to be running, skipping the removal of running kubernetes containers")
|
||||
}
|
||||
|
||||
dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d"}
|
||||
|
||||
// Only clear etcd data when the etcd manifest is found. In case it is not found, we must assume that the user
|
||||
// provided external etcd endpoints. In that case, it is his own responsibility to reset etcd
|
||||
etcdManifestPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests/etcd.json")
|
||||
if _, err := os.Stat(etcdManifestPath); err == nil {
|
||||
dirsToClean = append(dirsToClean, "/var/lib/etcd")
|
||||
} else {
|
||||
fmt.Printf("[reset] No etcd manifest found in %q, assuming external etcd.\n", etcdManifestPath)
|
||||
}
|
||||
|
||||
// Then clean contents from the stateful kubelet, etcd and cni directories
|
||||
fmt.Printf("[reset] Deleting contents of stateful directories: %v\n", dirsToClean)
|
||||
for _, dir := range dirsToClean {
|
||||
cleanDir(dir)
|
||||
}
|
||||
|
||||
// Remove contents from the config and pki directories
|
||||
resetConfigDir(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmapi.GlobalEnvParams.HostPKIPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -19,7 +19,7 @@ package cmd
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
|
||||
@ -147,14 +147,14 @@ func TestConfigDirCleaner(t *testing.T) {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
for _, createDir := range test.setupDirs {
|
||||
err := os.Mkdir(path.Join(tmpDir, createDir), 0700)
|
||||
err := os.Mkdir(filepath.Join(tmpDir, createDir), 0700)
|
||||
if err != nil {
|
||||
t.Errorf("Unable to setup test config directory: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, createFile := range test.setupFiles {
|
||||
fullPath := path.Join(tmpDir, createFile)
|
||||
fullPath := filepath.Join(tmpDir, createFile)
|
||||
f, err := os.Create(fullPath)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
@ -166,17 +166,17 @@ func TestConfigDirCleaner(t *testing.T) {
|
||||
|
||||
// Verify the files we cleanup implicitly in every test:
|
||||
assertExists(t, tmpDir)
|
||||
assertNotExists(t, path.Join(tmpDir, "admin.conf"))
|
||||
assertNotExists(t, path.Join(tmpDir, "kubelet.conf"))
|
||||
assertDirEmpty(t, path.Join(tmpDir, "manifests"))
|
||||
assertDirEmpty(t, path.Join(tmpDir, "pki"))
|
||||
assertNotExists(t, filepath.Join(tmpDir, "admin.conf"))
|
||||
assertNotExists(t, filepath.Join(tmpDir, "kubelet.conf"))
|
||||
assertDirEmpty(t, filepath.Join(tmpDir, "manifests"))
|
||||
assertDirEmpty(t, filepath.Join(tmpDir, "pki"))
|
||||
|
||||
// Verify the files as requested by the test:
|
||||
for _, path := range test.verifyExists {
|
||||
assertExists(t, path.Join(tmpDir, path))
|
||||
assertExists(t, filepath.Join(tmpDir, path))
|
||||
}
|
||||
for _, path := range test.verifyNotExists {
|
||||
assertNotExists(t, path.Join(tmpDir, path))
|
||||
assertNotExists(t, filepath.Join(tmpDir, path))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -42,6 +42,7 @@ go_library(
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
"//pkg/util/wait:go_default_library",
|
||||
"//vendor:github.com/blang/semver",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -85,6 +85,7 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
|
||||
|
||||
kubeDNSPort := int32(10053)
|
||||
dnsmasqPort := int32(53)
|
||||
dnsMasqMetricsUser := int64(0)
|
||||
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
@ -131,6 +132,7 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
|
||||
fmt.Sprintf("--domain=%s", cfg.Networking.DNSDomain),
|
||||
fmt.Sprintf("--dns-port=%d", kubeDNSPort),
|
||||
"--config-map=kube-dns",
|
||||
"--v=2",
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
@ -214,6 +216,13 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
|
||||
SuccessThreshold: 1,
|
||||
FailureThreshold: 5,
|
||||
},
|
||||
// The code below is a workaround for https://github.com/kubernetes/contrib/blob/master/dnsmasq-metrics/Dockerfile.in#L21
|
||||
// This is just the normal mode (to run with user 0), all other containers do it except for this one, which may lead to
|
||||
// that the DNS pod fails if the "nobody" _group_ doesn't exist. I think it's a typo in the Dockerfile manifest and
|
||||
// that it should be "USER nobody:nogroup" instead of "USER nobody:nobody". However, this fixes the problem.
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
RunAsUser: &dnsMasqMetricsUser,
|
||||
},
|
||||
Args: []string{
|
||||
"--v=2",
|
||||
"--logtostderr",
|
||||
@ -269,7 +278,7 @@ func createKubeDNSServiceSpec(cfg *kubeadmapi.MasterConfiguration) (*v1.ServiceS
|
||||
}
|
||||
ip, err := ipallocator.GetIndexedIP(n, 10)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to allocate IP address for kube-dns addon from the given CIDR (%q) [%v]", cfg.Networking.ServiceSubnet, err)
|
||||
return nil, fmt.Errorf("unable to allocate IP address for kube-dns addon from the given CIDR %q: [%v]", cfg.Networking.ServiceSubnet, err)
|
||||
}
|
||||
|
||||
return &v1.ServiceSpec{
|
||||
|
@ -271,6 +271,7 @@ func createDummyDeployment(client *clientset.Clientset) {
|
||||
|
||||
fmt.Println("[apiclient] Test deployment succeeded")
|
||||
|
||||
// TODO: In the future, make sure the ReplicaSet and Pod are garbage collected
|
||||
if err := client.Extensions().Deployments(api.NamespaceSystem).Delete("dummy", &v1.DeleteOptions{}); err != nil {
|
||||
fmt.Printf("[apiclient] Failed to delete test deployment [%v] (will ignore)\n", err)
|
||||
}
|
||||
|
@ -31,6 +31,8 @@ import (
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
|
||||
"github.com/blang/semver"
|
||||
)
|
||||
|
||||
// Static pod definitions in golang form are included below so that `kubeadm init` can get going.
|
||||
@ -49,6 +51,11 @@ const (
|
||||
kubeProxy = "kube-proxy"
|
||||
)
|
||||
|
||||
var (
|
||||
// Minimum version of kube-apiserver that supports --kubelet-preferred-address-types
|
||||
preferredAddressMinimumVersion = semver.MustParse("1.5.0-beta.2")
|
||||
)
|
||||
|
||||
// WriteStaticPodManifests builds manifest objects based on user provided configuration and then dumps it to disk
|
||||
// where kubelet will pick and schedule them.
|
||||
func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
@ -190,7 +197,7 @@ func isPkiVolumeMountNeeded() bool {
|
||||
|
||||
func pkiVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
|
||||
return api.Volume{
|
||||
Name: "k8s",
|
||||
Name: "pki",
|
||||
VolumeSource: api.VolumeSource{
|
||||
// TODO(phase1+) make path configurable
|
||||
HostPath: &api.HostPathVolumeSource{Path: "/etc/pki"},
|
||||
@ -291,6 +298,16 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
||||
command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0]))
|
||||
}
|
||||
|
||||
if len(cfg.KubernetesVersion) != 0 {
|
||||
// If the k8s version is v1.5-something, this argument is set and makes `kubectl logs` and `kubectl exec`
|
||||
// work on bare-metal where hostnames aren't usually resolvable
|
||||
// Omit the "v" in the beginning, otherwise semver will fail
|
||||
k8sVersion, err := semver.Parse(cfg.KubernetesVersion[1:])
|
||||
if err == nil && k8sVersion.GTE(preferredAddressMinimumVersion) {
|
||||
command = append(command, "--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname")
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the user decided to use an external etcd cluster
|
||||
if len(cfg.Etcd.Endpoints) > 0 {
|
||||
command = append(command, fmt.Sprintf("--etcd-servers=%s", strings.Join(cfg.Etcd.Endpoints, ",")))
|
||||
|
@ -369,15 +369,14 @@ func TestGetAPIServerCommand(t *testing.T) {
|
||||
},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--v=2",
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=bar",
|
||||
"--service-account-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--tls-cert-file=" + pkiDir + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + pkiDir + "/tokens.csv",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", 123),
|
||||
"--allow-privileged",
|
||||
"--etcd-servers=http://127.0.0.1:2379",
|
||||
@ -390,15 +389,14 @@ func TestGetAPIServerCommand(t *testing.T) {
|
||||
},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--v=2",
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=bar",
|
||||
"--service-account-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--tls-cert-file=" + pkiDir + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + pkiDir + "/tokens.csv",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", 123),
|
||||
"--allow-privileged",
|
||||
"--advertise-address=foo",
|
||||
@ -413,15 +411,14 @@ func TestGetAPIServerCommand(t *testing.T) {
|
||||
},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--v=2",
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=bar",
|
||||
"--service-account-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--tls-cert-file=" + pkiDir + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + pkiDir + "/tokens.csv",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", 123),
|
||||
"--allow-privileged",
|
||||
"--etcd-servers=http://127.0.0.1:2379",
|
||||
@ -429,6 +426,30 @@ func TestGetAPIServerCommand(t *testing.T) {
|
||||
"--etcd-keyfile=faz",
|
||||
},
|
||||
},
|
||||
// Make sure --kubelet-preferred-address-types
|
||||
{
|
||||
cfg: &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadm.API{BindPort: 123, AdvertiseAddresses: []string{"foo"}},
|
||||
Networking: kubeadm.Networking{ServiceSubnet: "bar"},
|
||||
KubernetesVersion: "v1.5.3",
|
||||
},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=bar",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", 123),
|
||||
"--allow-privileged",
|
||||
"--advertise-address=foo",
|
||||
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
|
||||
"--etcd-servers=http://127.0.0.1:2379",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
@ -454,15 +475,14 @@ func TestGetControllerManagerCommand(t *testing.T) {
|
||||
cfg: &kubeadmapi.MasterConfiguration{},
|
||||
expected: []string{
|
||||
"kube-controller-manager",
|
||||
"--v=2",
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name=" + DefaultClusterName,
|
||||
"--root-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--service-account-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + pkiDir + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + pkiDir + "/ca-key.pem",
|
||||
"--root-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--service-account-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
},
|
||||
},
|
||||
@ -470,15 +490,14 @@ func TestGetControllerManagerCommand(t *testing.T) {
|
||||
cfg: &kubeadmapi.MasterConfiguration{CloudProvider: "foo"},
|
||||
expected: []string{
|
||||
"kube-controller-manager",
|
||||
"--v=2",
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name=" + DefaultClusterName,
|
||||
"--root-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--service-account-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + pkiDir + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + pkiDir + "/ca-key.pem",
|
||||
"--root-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--service-account-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
"--cloud-provider=foo",
|
||||
},
|
||||
@ -487,15 +506,14 @@ func TestGetControllerManagerCommand(t *testing.T) {
|
||||
cfg: &kubeadmapi.MasterConfiguration{Networking: kubeadm.Networking{PodSubnet: "bar"}},
|
||||
expected: []string{
|
||||
"kube-controller-manager",
|
||||
"--v=2",
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name=" + DefaultClusterName,
|
||||
"--root-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--service-account-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + pkiDir + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + pkiDir + "/ca-key.pem",
|
||||
"--root-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--service-account-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
"--allocate-node-cidrs=true",
|
||||
"--cluster-cidr=bar",
|
||||
@ -526,7 +544,6 @@ func TestGetSchedulerCommand(t *testing.T) {
|
||||
cfg: &kubeadmapi.MasterConfiguration{},
|
||||
expected: []string{
|
||||
"kube-scheduler",
|
||||
"--v=2",
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
@ -557,7 +574,6 @@ func TestGetProxyCommand(t *testing.T) {
|
||||
cfg: &kubeadmapi.MasterConfiguration{},
|
||||
expected: []string{
|
||||
"kube-proxy",
|
||||
"--v=2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ func PerformTLSBootstrap(connection *ConnectionDetails) (*clientcmdapi.Config, e
|
||||
return nil, fmt.Errorf("failed to format certificate [%v]", err)
|
||||
}
|
||||
fmt.Printf("[csr] Received signed certificate from the API server:\n%s\n", fmtCert)
|
||||
fmt.Println("[csr] Generating kubelet configuration...")
|
||||
fmt.Println("[csr] Generating kubelet configuration")
|
||||
|
||||
bareClientConfig := kubeadmutil.CreateBasicClientConfig("kubernetes", connection.Endpoint, connection.CACert)
|
||||
finalConfig := kubeadmutil.MakeClientConfigWithCerts(
|
||||
|
@ -17,6 +17,7 @@ go_library(
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//pkg/api/validation:go_default_library",
|
||||
"//pkg/util/errors:go_default_library",
|
||||
"//pkg/util/initsystem:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//test/e2e_node/system:go_default_library",
|
||||
|
@ -342,6 +342,14 @@ func RunJoinNodeChecks(cfg *kubeadmapi.NodeConfiguration) error {
|
||||
return RunChecks(checks, os.Stderr)
|
||||
}
|
||||
|
||||
func RunRootCheckOnly() error {
|
||||
checks := []PreFlightCheck{
|
||||
IsRootCheck{},
|
||||
}
|
||||
|
||||
return RunChecks(checks, os.Stderr)
|
||||
}
|
||||
|
||||
// RunChecks runs each check, displays it's warnings/errors, and once all
|
||||
// are processed will exit if any errors occurred.
|
||||
func RunChecks(checks []PreFlightCheck, ww io.Writer) error {
|
||||
@ -349,7 +357,7 @@ func RunChecks(checks []PreFlightCheck, ww io.Writer) error {
|
||||
for _, c := range checks {
|
||||
warnings, errs := c.Check()
|
||||
for _, w := range warnings {
|
||||
io.WriteString(ww, fmt.Sprintf("[preflight] Warning: %s\n", w))
|
||||
io.WriteString(ww, fmt.Sprintf("[preflight] WARNING: %s\n", w))
|
||||
}
|
||||
for _, e := range errs {
|
||||
found = append(found, e)
|
||||
@ -360,21 +368,22 @@ func RunChecks(checks []PreFlightCheck, ww io.Writer) error {
|
||||
for _, i := range found {
|
||||
errs += "\t" + i.Error() + "\n"
|
||||
}
|
||||
return errors.New(errs)
|
||||
return &PreFlightError{Msg: errors.New(errs).Error()}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TryStartKubelet() error {
|
||||
func TryStartKubelet() {
|
||||
// If we notice that the kubelet service is inactive, try to start it
|
||||
initSystem, err := initsystem.GetInitSystem()
|
||||
if err != nil {
|
||||
fmt.Println("[preflight] No supported init system detected, won't check if kubelet is running")
|
||||
} else if !initSystem.ServiceIsActive("kubelet") {
|
||||
fmt.Printf("[preflight] Starting the kubelet service by running %q\n", "systemctl start kubelet")
|
||||
fmt.Println("[preflight] No supported init system detected, won't ensure kubelet is running.")
|
||||
} else if initSystem.ServiceExists("kubelet") && !initSystem.ServiceIsActive("kubelet") {
|
||||
|
||||
fmt.Println("[preflight] Starting the kubelet service")
|
||||
if err := initSystem.ServiceStart("kubelet"); err != nil {
|
||||
return fmt.Errorf("Couldn't start the kubelet service. Please start the kubelet service manually and try again.")
|
||||
fmt.Printf("[preflight] WARNING: Unable to start the kubelet service: [%v]\n", err)
|
||||
fmt.Println("[preflight] WARNING: Please ensure kubelet is running manually.")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -43,23 +43,23 @@ func TestRunChecks(t *testing.T) {
|
||||
output string
|
||||
}{
|
||||
{[]PreFlightCheck{}, true, ""},
|
||||
{[]PreFlightCheck{preflightCheckTest{"warning"}}, true, "[preflight] Warning: warning\n"}, // should just print warning
|
||||
{[]PreFlightCheck{preflightCheckTest{"warning"}}, true, "[preflight] WARNING: warning\n"}, // should just print warning
|
||||
{[]PreFlightCheck{preflightCheckTest{"error"}}, false, ""},
|
||||
{[]PreFlightCheck{preflightCheckTest{"test"}}, false, ""},
|
||||
}
|
||||
for _, rt := range tokenTest {
|
||||
buf := new(bytes.Buffer)
|
||||
actual := runChecks(rt.p, buf)
|
||||
actual := RunChecks(rt.p, buf)
|
||||
if (actual == nil) != rt.expected {
|
||||
t.Errorf(
|
||||
"failed runChecks:\n\texpected: %t\n\t actual: %t",
|
||||
"failed RunChecks:\n\texpected: %t\n\t actual: %t",
|
||||
rt.expected,
|
||||
(actual == nil),
|
||||
)
|
||||
}
|
||||
if buf.String() != rt.output {
|
||||
t.Errorf(
|
||||
"failed runChecks:\n\texpected: %s\n\t actual: %s",
|
||||
"failed RunChecks:\n\texpected: %s\n\t actual: %s",
|
||||
rt.output,
|
||||
buf.String(),
|
||||
)
|
||||
|
@ -24,8 +24,6 @@ go_library(
|
||||
"//cmd/kubeadm/app/preflight:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd/api:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/renstrom/dedent",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -35,7 +35,7 @@ func TestCheckErr(t *testing.T) {
|
||||
}{
|
||||
{nil, 0},
|
||||
{fmt.Errorf(""), DefaultErrorExitCode},
|
||||
{&preflight.PreFlightError{}, PreFlight},
|
||||
{&preflight.PreFlightError{}, PreFlightExitCode},
|
||||
}
|
||||
|
||||
for _, rt := range tokenTest {
|
||||
|
@ -64,7 +64,7 @@ func UseGivenTokenIfValid(s *kubeadmapi.Secrets) (bool, error) {
|
||||
if s.GivenToken == "" {
|
||||
return false, nil // not given
|
||||
}
|
||||
fmt.Println("[tokens] Validating provided token...")
|
||||
fmt.Println("[tokens] Validating provided token")
|
||||
givenToken := strings.Split(strings.ToLower(s.GivenToken), ".")
|
||||
// TODO(phase1+) could also print more specific messages in each case
|
||||
invalidErr := "[tokens] Provided token does not match expected <6 characters>.<16 characters> format - %s"
|
||||
|
@ -54,18 +54,18 @@ func KubernetesReleaseVersion(version string) (string, error) {
|
||||
url := fmt.Sprintf("%s/%s.txt", kubeReleaseBucketURL, version)
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error: unable to get URL %q: %s", url, err.Error())
|
||||
return "", fmt.Errorf("unable to get URL %q: %s", url, err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("Error: unable to fetch release information. URL: %q Status: %v", url, resp.Status)
|
||||
return "", fmt.Errorf("unable to fetch release information. URL: %q Status: %v", url, resp.Status)
|
||||
}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error: unable to read content of URL %q: %s", url, err.Error())
|
||||
return "", fmt.Errorf("unable to read content of URL %q: %s", url, err.Error())
|
||||
}
|
||||
// Re-validate received version and return.
|
||||
return KubernetesReleaseVersion(strings.Trim(string(body), " \t\n"))
|
||||
}
|
||||
return "", fmt.Errorf("Error: version %q doesn't match patterns for neither semantic version nor labels (stable, latest, ...)", version)
|
||||
return "", fmt.Errorf("version %q doesn't match patterns for neither semantic version nor labels (stable, latest, ...)", version)
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user