mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-24 20:24:09 +00:00
Merge pull request #37568 from luxas/various_kubeadm_improvements
Automatic merge from submit-queue (batch tested with PRs 37270, 38309, 37568, 34554) kubeadm UX improvements for the v1.5 stable release This PR targets the next stable kubeadm release. It's work in progress, but please comment on it and review, since there are many changes. I tried to group the commits logically, so you can review them separately. Q: Why this large PR? Why not many small? A: Because of the Submit Queue and the time it takes. PTAL @kubernetes/sig-cluster-lifecycle _Edit:_ This work was splitted up in three PRs in total
This commit is contained in:
commit
ac05e7135a
@ -14,8 +14,5 @@ go_binary(
|
||||
name = "kubeadm",
|
||||
srcs = ["kubeadm.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
],
|
||||
deps = ["//cmd/kubeadm/app:go_default_library"],
|
||||
)
|
||||
|
@ -18,7 +18,6 @@ go_library(
|
||||
"//cmd/kubeadm/app/apis/kubeadm/install:go_default_library",
|
||||
"//cmd/kubeadm/app/cmd:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/util/logs:go_default_library",
|
||||
"//vendor:github.com/spf13/pflag",
|
||||
],
|
||||
)
|
||||
|
@ -25,21 +25,18 @@ import (
|
||||
|
||||
var GlobalEnvParams = SetEnvParams()
|
||||
|
||||
// TODO(phase2) use componentconfig
|
||||
// TODO(phase1+) Move these paramaters to the API group
|
||||
// we need some params for testing etc, let's keep these hidden for now
|
||||
func SetEnvParams() *EnvParams {
|
||||
|
||||
envParams := map[string]string{
|
||||
// TODO(phase1+): Mode prefix and host_pki_path to another place as constants, and use them everywhere
|
||||
// Right now they're used here and there, but not consequently
|
||||
"kubernetes_dir": "/etc/kubernetes",
|
||||
"host_pki_path": "/etc/kubernetes/pki",
|
||||
"host_etcd_path": "/var/lib/etcd",
|
||||
"hyperkube_image": "",
|
||||
"repo_prefix": "gcr.io/google_containers",
|
||||
"discovery_image": fmt.Sprintf("gcr.io/google_containers/kube-discovery-%s:%s", runtime.GOARCH, "1.0"),
|
||||
"etcd_image": "",
|
||||
"component_loglevel": "--v=2",
|
||||
"kubernetes_dir": "/etc/kubernetes",
|
||||
"host_pki_path": "/etc/kubernetes/pki",
|
||||
"host_etcd_path": "/var/lib/etcd",
|
||||
"hyperkube_image": "",
|
||||
"repo_prefix": "gcr.io/google_containers",
|
||||
"discovery_image": fmt.Sprintf("gcr.io/google_containers/kube-discovery-%s:%s", runtime.GOARCH, "1.0"),
|
||||
"etcd_image": "",
|
||||
}
|
||||
|
||||
for k := range envParams {
|
||||
@ -49,13 +46,12 @@ func SetEnvParams() *EnvParams {
|
||||
}
|
||||
|
||||
return &EnvParams{
|
||||
KubernetesDir: envParams["kubernetes_dir"],
|
||||
HostPKIPath: envParams["host_pki_path"],
|
||||
HostEtcdPath: envParams["host_etcd_path"],
|
||||
HyperkubeImage: envParams["hyperkube_image"],
|
||||
RepositoryPrefix: envParams["repo_prefix"],
|
||||
DiscoveryImage: envParams["discovery_image"],
|
||||
EtcdImage: envParams["etcd_image"],
|
||||
ComponentLoglevel: envParams["component_loglevel"],
|
||||
KubernetesDir: envParams["kubernetes_dir"],
|
||||
HostPKIPath: envParams["host_pki_path"],
|
||||
HostEtcdPath: envParams["host_etcd_path"],
|
||||
HyperkubeImage: envParams["hyperkube_image"],
|
||||
RepositoryPrefix: envParams["repo_prefix"],
|
||||
DiscoveryImage: envParams["discovery_image"],
|
||||
EtcdImage: envParams["etcd_image"],
|
||||
}
|
||||
}
|
||||
|
@ -21,14 +21,13 @@ import (
|
||||
)
|
||||
|
||||
type EnvParams struct {
|
||||
KubernetesDir string
|
||||
HostPKIPath string
|
||||
HostEtcdPath string
|
||||
HyperkubeImage string
|
||||
RepositoryPrefix string
|
||||
DiscoveryImage string
|
||||
EtcdImage string
|
||||
ComponentLoglevel string
|
||||
KubernetesDir string
|
||||
HostPKIPath string
|
||||
HostEtcdPath string
|
||||
HyperkubeImage string
|
||||
RepositoryPrefix string
|
||||
DiscoveryImage string
|
||||
EtcdImage string
|
||||
}
|
||||
|
||||
type MasterConfiguration struct {
|
||||
|
@ -37,14 +37,14 @@ func NewKubeadmCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cob
|
||||
│ KUBEADM IS ALPHA, DO NOT USE IT FOR PRODUCTION CLUSTERS! │
|
||||
│ │
|
||||
│ But, please try it out! Give us feedback at: │
|
||||
│ https://github.com/kubernetes/kubernetes/issues │
|
||||
│ https://github.com/kubernetes/kubeadm/issues │
|
||||
│ and at-mention @kubernetes/sig-cluster-lifecycle │
|
||||
└──────────────────────────────────────────────────────────┘
|
||||
|
||||
Example usage:
|
||||
|
||||
Create a two-machine cluster with one master (which controls the cluster),
|
||||
and one node (where workloads, like pods and replica sets run).
|
||||
and one node (where your workloads, like Pods and ReplicaSets run).
|
||||
|
||||
┌──────────────────────────────────────────────────────────┐
|
||||
│ On the first machine │
|
||||
@ -69,11 +69,10 @@ func NewKubeadmCommand(f cmdutil.Factory, in io.Reader, out, err io.Writer) *cob
|
||||
//
|
||||
// TODO(phase2) create an abstraction that defines files and the content that needs to
|
||||
// be written to disc and write it all in one go at the end as we have a lot of
|
||||
// crapy little files written from different parts of this code; this could also
|
||||
// be useful for testing
|
||||
// by having this model we can allow users to create some files before `kubeadm init` runs, e.g. PKI assets, we
|
||||
// would then be able to look at files users has given an diff or validate if those are sane, we could also warn
|
||||
// if any of the files had been deprecated
|
||||
// crappy little files written from different parts of this code; this could also
|
||||
// be useful for testing by having this model we can allow users to create some files before
|
||||
// `kubeadm init` runs, e.g. PKI assets, we would then be able to look at files users has
|
||||
// given an diff or validate if those are sane, we could also warn if any of the files had been deprecated
|
||||
|
||||
cmds.ResetFlags()
|
||||
cmds.SetGlobalNormalizationFunc(flag.WarnWordSepNormalizeFunc)
|
||||
|
@ -51,7 +51,11 @@ const (
|
||||
|
||||
var (
|
||||
initDoneMsgf = dedent.Dedent(`
|
||||
Kubernetes master initialised successfully!
|
||||
Your Kubernetes master has initialized successfully!
|
||||
|
||||
You should now deploy a pod network to the cluster.
|
||||
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
|
||||
http://kubernetes.io/docs/admin/addons/
|
||||
|
||||
You can now join any number of machines by running the following on each node:
|
||||
|
||||
@ -162,6 +166,9 @@ type Init struct {
|
||||
}
|
||||
|
||||
func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight bool) (*Init, error) {
|
||||
|
||||
fmt.Println("[kubeadm] WARNING: kubeadm is in alpha, please do not use it for production clusters.")
|
||||
|
||||
if cfgPath != "" {
|
||||
b, err := ioutil.ReadFile(cfgPath)
|
||||
if err != nil {
|
||||
@ -174,7 +181,6 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight
|
||||
|
||||
// Auto-detect the IP
|
||||
if len(cfg.API.AdvertiseAddresses) == 0 {
|
||||
// TODO(phase1+) perhaps we could actually grab eth0 and eth1
|
||||
ip, err := netutil.ChooseHostInterface()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -183,15 +189,24 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight
|
||||
}
|
||||
|
||||
if !skipPreFlight {
|
||||
fmt.Println("Running pre-flight checks")
|
||||
err := preflight.RunInitMasterChecks(cfg)
|
||||
if err != nil {
|
||||
return nil, &preflight.PreFlightError{Msg: err.Error()}
|
||||
fmt.Println("[preflight] Running pre-flight checks")
|
||||
|
||||
// First, check if we're root separately from the other preflight checks and fail fast
|
||||
if err := preflight.RunRootCheckOnly(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Then continue with the others...
|
||||
if err := preflight.RunInitMasterChecks(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Skipping pre-flight checks")
|
||||
fmt.Println("[preflight] Skipping pre-flight checks")
|
||||
}
|
||||
|
||||
// Try to start the kubelet service in case it's inactive
|
||||
preflight.TryStartKubelet()
|
||||
|
||||
// validate version argument
|
||||
ver, err := kubeadmutil.KubernetesReleaseVersion(cfg.KubernetesVersion)
|
||||
if err != nil {
|
||||
@ -202,7 +217,7 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight
|
||||
}
|
||||
}
|
||||
cfg.KubernetesVersion = ver
|
||||
fmt.Println("Using Kubernetes version:", ver)
|
||||
fmt.Println("[init] Using Kubernetes version:", ver)
|
||||
|
||||
// Warn about the limitations with the current cloudprovider solution.
|
||||
if cfg.CloudProvider != "" {
|
||||
@ -245,7 +260,7 @@ func (i *Init) Run(out io.Writer) error {
|
||||
// write a file that has already been written (the kubelet will be up and
|
||||
// running in that case - they'd need to stop the kubelet, remove the file, and
|
||||
// start it again in that case).
|
||||
// TODO(phase1+) this is no longer the right place to guard agains foo-shooting,
|
||||
// TODO(phase1+) this is no longer the right place to guard against foo-shooting,
|
||||
// we need to decide how to handle existing files (it may be handy to support
|
||||
// importing existing files, may be we could even make our command idempotant,
|
||||
// or at least allow for external PKI and stuff)
|
||||
|
@ -94,6 +94,9 @@ type Join struct {
|
||||
}
|
||||
|
||||
func NewJoin(cfgPath string, args []string, cfg *kubeadmapi.NodeConfiguration, skipPreFlight bool) (*Join, error) {
|
||||
|
||||
fmt.Println("[kubeadm] WARNING: kubeadm is in alpha, please do not use it for production clusters.")
|
||||
|
||||
if cfgPath != "" {
|
||||
b, err := ioutil.ReadFile(cfgPath)
|
||||
if err != nil {
|
||||
@ -109,25 +112,34 @@ func NewJoin(cfgPath string, args []string, cfg *kubeadmapi.NodeConfiguration, s
|
||||
}
|
||||
cfg.MasterAddresses = append(cfg.MasterAddresses, args...)
|
||||
if len(cfg.MasterAddresses) > 1 {
|
||||
return nil, fmt.Errorf("Must not specify more than one master address (see --help)")
|
||||
return nil, fmt.Errorf("must not specify more than one master address (see --help)")
|
||||
}
|
||||
|
||||
if !skipPreFlight {
|
||||
fmt.Println("Running pre-flight checks")
|
||||
err := preflight.RunJoinNodeChecks(cfg)
|
||||
if err != nil {
|
||||
return nil, &preflight.PreFlightError{Msg: err.Error()}
|
||||
fmt.Println("[preflight] Running pre-flight checks")
|
||||
|
||||
// First, check if we're root separately from the other preflight checks and fail fast
|
||||
if err := preflight.RunRootCheckOnly(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Then continue with the others...
|
||||
if err := preflight.RunJoinNodeChecks(cfg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Skipping pre-flight checks")
|
||||
fmt.Println("[preflight] Skipping pre-flight checks")
|
||||
}
|
||||
|
||||
// Try to start the kubelet service in case it's inactive
|
||||
preflight.TryStartKubelet()
|
||||
|
||||
ok, err := kubeadmutil.UseGivenTokenIfValid(&cfg.Secrets)
|
||||
if !ok {
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("%v (see --help)\n", err)
|
||||
return nil, fmt.Errorf("%v (see --help)", err)
|
||||
}
|
||||
return nil, fmt.Errorf("Must specify --token (see --help)\n")
|
||||
return nil, fmt.Errorf("Must specify --token (see --help)")
|
||||
}
|
||||
|
||||
return &Join{cfg: cfg}, nil
|
||||
|
@ -64,13 +64,13 @@ type Reset struct {
|
||||
|
||||
func NewReset(skipPreFlight, removeNode bool) (*Reset, error) {
|
||||
if !skipPreFlight {
|
||||
fmt.Println("[preflight] Running pre-flight checks...")
|
||||
fmt.Println("[preflight] Running pre-flight checks")
|
||||
|
||||
if err := preflight.RunResetCheck(); err != nil {
|
||||
return nil, &preflight.PreFlightError{Msg: err.Error()}
|
||||
if err := preflight.RunRootCheckOnly(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
fmt.Println("[preflight] Skipping pre-flight checks...")
|
||||
fmt.Println("[preflight] Skipping pre-flight checks")
|
||||
}
|
||||
|
||||
return &Reset{
|
||||
@ -81,51 +81,36 @@ func NewReset(skipPreFlight, removeNode bool) (*Reset, error) {
|
||||
// Run reverts any changes made to this host by "kubeadm init" or "kubeadm join".
|
||||
func (r *Reset) Run(out io.Writer) error {
|
||||
|
||||
// Drain and maybe remove the node from the cluster
|
||||
// Try to drain and remove the node from the cluster
|
||||
err := drainAndRemoveNode(r.removeNode)
|
||||
if err != nil {
|
||||
fmt.Printf("[reset] Failed to cleanup node: [%v]\n", err)
|
||||
}
|
||||
|
||||
serviceToStop := "kubelet"
|
||||
// Try to stop the kubelet service
|
||||
initSystem, err := initsystem.GetInitSystem()
|
||||
if err != nil {
|
||||
fmt.Printf("[reset] Failed to detect init system and stop the kubelet service: %v\n", err)
|
||||
fmt.Println("[reset] WARNING: The kubelet service couldn't be stopped by kubeadm because no supported init system was detected.")
|
||||
fmt.Println("[reset] WARNING: Please ensure kubelet is stopped manually.")
|
||||
} else {
|
||||
fmt.Printf("[reset] Stopping the %s service...\n", serviceToStop)
|
||||
if err := initSystem.ServiceStop(serviceToStop); err != nil {
|
||||
fmt.Printf("[reset] Failed to stop the %s service\n", serviceToStop)
|
||||
fmt.Println("[reset] Stopping the kubelet service")
|
||||
if err := initSystem.ServiceStop("kubelet"); err != nil {
|
||||
fmt.Printf("[reset] WARNING: The kubelet service couldn't be stopped by kubeadm: [%v]\n", err)
|
||||
fmt.Println("[reset] WARNING: Please ensure kubelet is stopped manually.")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("[reset] Unmounting directories in /var/lib/kubelet...")
|
||||
// Try to unmount mounted directories under /var/lib/kubelet in order to be able to remove the /var/lib/kubelet directory later
|
||||
fmt.Printf("[reset] Unmounting mounted directories in %q\n", "/var/lib/kubelet")
|
||||
umountDirsCmd := "cat /proc/mounts | awk '{print $2}' | grep '/var/lib/kubelet' | xargs -r umount"
|
||||
umountOutputBytes, err := exec.Command("sh", "-c", umountDirsCmd).Output()
|
||||
if err != nil {
|
||||
fmt.Printf("[reset] Failed to unmount directories in /var/lib/kubelet: %s\n", string(umountOutputBytes))
|
||||
}
|
||||
|
||||
// Remove contents from the config and pki directories
|
||||
resetConfigDir(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmapi.GlobalEnvParams.HostPKIPath)
|
||||
|
||||
dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d"}
|
||||
|
||||
// Only clear etcd data when the etcd manifest is found. In case it is not found, we must assume that the user
|
||||
// provided external etcd endpoints. In that case, it is his own responsibility to reset etcd
|
||||
if _, err := os.Stat("/etc/kubernetes/manifests/etcd.json"); os.IsNotExist(err) {
|
||||
dirsToClean = append(dirsToClean, "/var/lib/etcd")
|
||||
} else {
|
||||
fmt.Printf("[reset] No etcd manifest found in %q, assuming external etcd.\n", "/etc/kubernetes/manifests/etcd.json")
|
||||
}
|
||||
|
||||
fmt.Printf("[reset] Deleting contents of stateful directories: %v\n", dirsToClean)
|
||||
for _, dir := range dirsToClean {
|
||||
cleanDir(dir)
|
||||
fmt.Printf("[reset] Failed to unmount mounted directories in /var/lib/kubelet: %s\n", string(umountOutputBytes))
|
||||
}
|
||||
|
||||
dockerCheck := preflight.ServiceCheck{Service: "docker"}
|
||||
if warnings, errors := dockerCheck.Check(); len(warnings) == 0 && len(errors) == 0 {
|
||||
fmt.Println("[reset] Stopping all running docker containers...")
|
||||
fmt.Println("[reset] Removing kubernetes-managed containers")
|
||||
if err := exec.Command("sh", "-c", "docker ps | grep 'k8s_' | awk '{print $1}' | xargs -r docker rm --force --volumes").Run(); err != nil {
|
||||
fmt.Println("[reset] Failed to stop the running containers")
|
||||
}
|
||||
@ -133,6 +118,26 @@ func (r *Reset) Run(out io.Writer) error {
|
||||
fmt.Println("[reset] docker doesn't seem to be running, skipping the removal of running kubernetes containers")
|
||||
}
|
||||
|
||||
dirsToClean := []string{"/var/lib/kubelet", "/etc/cni/net.d"}
|
||||
|
||||
// Only clear etcd data when the etcd manifest is found. In case it is not found, we must assume that the user
|
||||
// provided external etcd endpoints. In that case, it is his own responsibility to reset etcd
|
||||
etcdManifestPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests/etcd.json")
|
||||
if _, err := os.Stat(etcdManifestPath); err == nil {
|
||||
dirsToClean = append(dirsToClean, "/var/lib/etcd")
|
||||
} else {
|
||||
fmt.Printf("[reset] No etcd manifest found in %q, assuming external etcd.\n", etcdManifestPath)
|
||||
}
|
||||
|
||||
// Then clean contents from the stateful kubelet, etcd and cni directories
|
||||
fmt.Printf("[reset] Deleting contents of stateful directories: %v\n", dirsToClean)
|
||||
for _, dir := range dirsToClean {
|
||||
cleanDir(dir)
|
||||
}
|
||||
|
||||
// Remove contents from the config and pki directories
|
||||
resetConfigDir(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmapi.GlobalEnvParams.HostPKIPath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
@ -24,13 +24,9 @@ import (
|
||||
_ "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/cmd"
|
||||
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/util/logs"
|
||||
)
|
||||
|
||||
func Run() error {
|
||||
logs.InitLogs()
|
||||
defer logs.FlushLogs()
|
||||
|
||||
// We do not want these flags to show up in --help
|
||||
pflag.CommandLine.MarkHidden("google-json-key")
|
||||
pflag.CommandLine.MarkHidden("log-flush-frequency")
|
||||
|
@ -42,6 +42,7 @@ go_library(
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
"//pkg/util/wait:go_default_library",
|
||||
"//vendor:github.com/blang/semver",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -85,6 +85,7 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
|
||||
|
||||
kubeDNSPort := int32(10053)
|
||||
dnsmasqPort := int32(53)
|
||||
dnsMasqMetricsUser := int64(0)
|
||||
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
@ -131,6 +132,7 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
|
||||
fmt.Sprintf("--domain=%s", cfg.Networking.DNSDomain),
|
||||
fmt.Sprintf("--dns-port=%d", kubeDNSPort),
|
||||
"--config-map=kube-dns",
|
||||
"--v=2",
|
||||
},
|
||||
Env: []v1.EnvVar{
|
||||
{
|
||||
@ -214,6 +216,13 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
|
||||
SuccessThreshold: 1,
|
||||
FailureThreshold: 5,
|
||||
},
|
||||
// The code below is a workaround for https://github.com/kubernetes/contrib/blob/master/dnsmasq-metrics/Dockerfile.in#L21
|
||||
// This is just the normal mode (to run with user 0), all other containers do it except for this one, which may lead to
|
||||
// that the DNS pod fails if the "nobody" _group_ doesn't exist. I think it's a typo in the Dockerfile manifest and
|
||||
// that it should be "USER nobody:nogroup" instead of "USER nobody:nobody". However, this fixes the problem.
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
RunAsUser: &dnsMasqMetricsUser,
|
||||
},
|
||||
Args: []string{
|
||||
"--v=2",
|
||||
"--logtostderr",
|
||||
@ -269,7 +278,7 @@ func createKubeDNSServiceSpec(cfg *kubeadmapi.MasterConfiguration) (*v1.ServiceS
|
||||
}
|
||||
ip, err := ipallocator.GetIndexedIP(n, 10)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to allocate IP address for kube-dns addon from the given CIDR (%q) [%v]", cfg.Networking.ServiceSubnet, err)
|
||||
return nil, fmt.Errorf("unable to allocate IP address for kube-dns addon from the given CIDR %q: [%v]", cfg.Networking.ServiceSubnet, err)
|
||||
}
|
||||
|
||||
return &v1.ServiceSpec{
|
||||
@ -288,17 +297,17 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse
|
||||
SetNodeAffinity(&kubeProxyDaemonSet.Spec.Template.ObjectMeta, NativeArchitectureNodeAffinity())
|
||||
|
||||
if _, err := client.Extensions().DaemonSets(api.NamespaceSystem).Create(kubeProxyDaemonSet); err != nil {
|
||||
return fmt.Errorf("<master/addons> failed creating essential kube-proxy addon [%v]", err)
|
||||
return fmt.Errorf("failed creating essential kube-proxy addon [%v]", err)
|
||||
}
|
||||
|
||||
fmt.Println("<master/addons> created essential addon: kube-proxy")
|
||||
fmt.Println("[addons] Created essential addon: kube-proxy")
|
||||
|
||||
kubeDNSDeployment := NewDeployment("kube-dns", 1, createKubeDNSPodSpec(cfg))
|
||||
SetMasterTaintTolerations(&kubeDNSDeployment.Spec.Template.ObjectMeta)
|
||||
SetNodeAffinity(&kubeDNSDeployment.Spec.Template.ObjectMeta, NativeArchitectureNodeAffinity())
|
||||
|
||||
if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(kubeDNSDeployment); err != nil {
|
||||
return fmt.Errorf("<master/addons> failed creating essential kube-dns addon [%v]", err)
|
||||
return fmt.Errorf("failed creating essential kube-dns addon [%v]", err)
|
||||
}
|
||||
|
||||
kubeDNSServiceSpec, err := createKubeDNSServiceSpec(cfg)
|
||||
@ -309,10 +318,10 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse
|
||||
kubeDNSService := NewService("kube-dns", *kubeDNSServiceSpec)
|
||||
kubeDNSService.ObjectMeta.Labels["kubernetes.io/name"] = "KubeDNS"
|
||||
if _, err := client.Services(api.NamespaceSystem).Create(kubeDNSService); err != nil {
|
||||
return fmt.Errorf("<master/addons> failed creating essential kube-dns addon [%v]", err)
|
||||
return fmt.Errorf("failed creating essential kube-dns addon [%v]", err)
|
||||
}
|
||||
|
||||
fmt.Println("<master/addons> created essential addon: kube-dns")
|
||||
fmt.Println("[addons] Created essential addon: kube-dns")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -42,17 +42,15 @@ func CreateClientAndWaitForAPI(adminConfig *clientcmdapi.Config) (*clientset.Cli
|
||||
&clientcmd.ConfigOverrides{},
|
||||
).ClientConfig()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("<master/apiclient> failed to create API client configuration [%v]", err)
|
||||
return nil, fmt.Errorf("failed to create API client configuration [%v]", err)
|
||||
}
|
||||
|
||||
fmt.Println("<master/apiclient> created API client configuration")
|
||||
|
||||
client, err := clientset.NewForConfig(adminClientConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("<master/apiclient> failed to create API client [%v]", err)
|
||||
return nil, fmt.Errorf("failed to create API client [%v]", err)
|
||||
}
|
||||
|
||||
fmt.Println("<master/apiclient> created API client, waiting for the control plane to become ready")
|
||||
fmt.Println("[apiclient] Created API client, waiting for the control plane to become ready")
|
||||
|
||||
start := time.Now()
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
@ -62,28 +60,28 @@ func CreateClientAndWaitForAPI(adminConfig *clientcmdapi.Config) (*clientset.Cli
|
||||
}
|
||||
// TODO(phase2) must revisit this when we implement HA
|
||||
if len(cs.Items) < 3 {
|
||||
fmt.Println("<master/apiclient> not all control plane components are ready yet")
|
||||
fmt.Println("[apiclient] Not all control plane components are ready yet")
|
||||
return false, nil
|
||||
}
|
||||
for _, item := range cs.Items {
|
||||
for _, condition := range item.Conditions {
|
||||
if condition.Type != v1.ComponentHealthy {
|
||||
fmt.Printf("<master/apiclient> control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions)
|
||||
fmt.Printf("[apiclient] Control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("<master/apiclient> all control plane components are healthy after %f seconds\n", time.Since(start).Seconds())
|
||||
fmt.Printf("[apiclient] All control plane components are healthy after %f seconds\n", time.Since(start).Seconds())
|
||||
return true, nil
|
||||
})
|
||||
|
||||
fmt.Println("<master/apiclient> waiting for at least one node to register and become ready")
|
||||
fmt.Println("[apiclient] Waiting for at least one node to register and become ready")
|
||||
start = time.Now()
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
nodeList, err := client.Nodes().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Println("<master/apiclient> temporarily unable to list nodes (will retry)")
|
||||
fmt.Println("[apiclient] Temporarily unable to list nodes (will retry)")
|
||||
return false, nil
|
||||
}
|
||||
if len(nodeList.Items) < 1 {
|
||||
@ -91,11 +89,11 @@ func CreateClientAndWaitForAPI(adminConfig *clientcmdapi.Config) (*clientset.Cli
|
||||
}
|
||||
n := &nodeList.Items[0]
|
||||
if !v1.IsNodeReady(n) {
|
||||
fmt.Println("<master/apiclient> first node has registered, but is not ready yet")
|
||||
fmt.Println("[apiclient] First node has registered, but is not ready yet")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
fmt.Printf("<master/apiclient> first node is ready after %f seconds\n", time.Since(start).Seconds())
|
||||
fmt.Printf("[apiclient] First node is ready after %f seconds\n", time.Since(start).Seconds())
|
||||
return true, nil
|
||||
})
|
||||
|
||||
@ -180,7 +178,7 @@ func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, sched
|
||||
|
||||
if _, err := client.Nodes().Update(n); err != nil {
|
||||
if apierrs.IsConflict(err) {
|
||||
fmt.Println("<master/apiclient> temporarily unable to update master node metadata due to conflict (will retry)")
|
||||
fmt.Println("[apiclient] Temporarily unable to update master node metadata due to conflict (will retry)")
|
||||
time.Sleep(apiCallRetryInterval)
|
||||
attemptToUpdateMasterRoleLabelsAndTaints(client, schedulable)
|
||||
} else {
|
||||
@ -195,7 +193,7 @@ func UpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, schedulable bo
|
||||
// TODO(phase1+) use iterate instead of recursion
|
||||
err := attemptToUpdateMasterRoleLabelsAndTaints(client, schedulable)
|
||||
if err != nil {
|
||||
return fmt.Errorf("<master/apiclient> failed to update master node - %v", err)
|
||||
return fmt.Errorf("failed to update master node - [%v]", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -240,7 +238,7 @@ func NativeArchitectureNodeAffinity() v1.NodeSelectorRequirement {
|
||||
}
|
||||
|
||||
func createDummyDeployment(client *clientset.Clientset) {
|
||||
fmt.Println("<master/apiclient> attempting a test deployment")
|
||||
fmt.Println("[apiclient] Creating a test deployment")
|
||||
dummyDeployment := NewDeployment("dummy", 1, v1.PodSpec{
|
||||
HostNetwork: true,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
@ -253,7 +251,7 @@ func createDummyDeployment(client *clientset.Clientset) {
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
// TODO: we should check the error, as some cases may be fatal
|
||||
if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(dummyDeployment); err != nil {
|
||||
fmt.Printf("<master/apiclient> failed to create test deployment [%v] (will retry)", err)
|
||||
fmt.Printf("[apiclient] Failed to create test deployment [%v] (will retry)\n", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -262,7 +260,7 @@ func createDummyDeployment(client *clientset.Clientset) {
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
d, err := client.Extensions().Deployments(api.NamespaceSystem).Get("dummy", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("<master/apiclient> failed to get test deployment [%v] (will retry)", err)
|
||||
fmt.Printf("[apiclient] Failed to get test deployment [%v] (will retry)\n", err)
|
||||
return false, nil
|
||||
}
|
||||
if d.Status.AvailableReplicas < 1 {
|
||||
@ -271,9 +269,10 @@ func createDummyDeployment(client *clientset.Clientset) {
|
||||
return true, nil
|
||||
})
|
||||
|
||||
fmt.Println("<master/apiclient> test deployment succeeded")
|
||||
fmt.Println("[apiclient] Test deployment succeeded")
|
||||
|
||||
// TODO: In the future, make sure the ReplicaSet and Pod are garbage collected
|
||||
if err := client.Extensions().Deployments(api.NamespaceSystem).Delete("dummy", &v1.DeleteOptions{}); err != nil {
|
||||
fmt.Printf("<master/apiclient> failed to delete test deployment [%v] (will ignore)", err)
|
||||
fmt.Printf("[apiclient] Failed to delete test deployment [%v] (will ignore)\n", err)
|
||||
}
|
||||
}
|
||||
|
@ -124,13 +124,13 @@ func CreateDiscoveryDeploymentAndSecret(cfg *kubeadmapi.MasterConfiguration, cli
|
||||
kd := newKubeDiscovery(cfg, caCert)
|
||||
|
||||
if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(kd.Deployment); err != nil {
|
||||
return fmt.Errorf("<master/discovery> failed to create %q deployment [%v]", kubeDiscoveryName, err)
|
||||
return fmt.Errorf("failed to create %q deployment [%v]", kubeDiscoveryName, err)
|
||||
}
|
||||
if _, err := client.Secrets(api.NamespaceSystem).Create(kd.Secret); err != nil {
|
||||
return fmt.Errorf("<master/discovery> failed to create %q secret [%v]", kubeDiscoverySecretName, err)
|
||||
return fmt.Errorf("failed to create %q secret [%v]", kubeDiscoverySecretName, err)
|
||||
}
|
||||
|
||||
fmt.Println("<master/discovery> created essential addon: kube-discovery, waiting for it to become ready")
|
||||
fmt.Println("[token-discovery] Created the kube-discovery deployment, waiting for it to become ready")
|
||||
|
||||
start := time.Now()
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
@ -143,7 +143,7 @@ func CreateDiscoveryDeploymentAndSecret(cfg *kubeadmapi.MasterConfiguration, cli
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
fmt.Printf("<master/discovery> kube-discovery is ready after %f seconds\n", time.Since(start).Seconds())
|
||||
fmt.Printf("[token-discovery] kube-discovery is ready after %f seconds\n", time.Since(start).Seconds())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
|
||||
// TODO: "k8s.io/client-go/client/tools/clientcmd/api"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
@ -44,7 +43,7 @@ func CreateCertsAndConfigForClients(cfg kubeadmapi.API, clientNames []string, ca
|
||||
for _, client := range clientNames {
|
||||
key, cert, err := newClientKeyAndCert(caCert, caKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("<master/kubeconfig> failure while creating %s client certificate - %v", client, err)
|
||||
return nil, fmt.Errorf("failure while creating %s client certificate - [%v]", client, err)
|
||||
}
|
||||
config := kubeadmutil.MakeClientConfigWithCerts(
|
||||
basicClientConfig,
|
||||
|
@ -31,6 +31,8 @@ import (
|
||||
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
|
||||
cmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
|
||||
"github.com/blang/semver"
|
||||
)
|
||||
|
||||
// Static pod definitions in golang form are included below so that `kubeadm init` can get going.
|
||||
@ -47,7 +49,11 @@ const (
|
||||
kubeControllerManager = "kube-controller-manager"
|
||||
kubeScheduler = "kube-scheduler"
|
||||
kubeProxy = "kube-proxy"
|
||||
pkiDir = "/etc/kubernetes/pki"
|
||||
)
|
||||
|
||||
var (
|
||||
// Minimum version of kube-apiserver that supports --kubelet-preferred-address-types
|
||||
preferredAddressMinimumVersion = semver.MustParse("1.5.0-beta.2")
|
||||
)
|
||||
|
||||
// WriteStaticPodManifests builds manifest objects based on user provided configuration and then dumps it to disk
|
||||
@ -124,16 +130,16 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
|
||||
manifestsPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests")
|
||||
if err := os.MkdirAll(manifestsPath, 0700); err != nil {
|
||||
return fmt.Errorf("<master/manifests> failed to create directory %q [%v]", manifestsPath, err)
|
||||
return fmt.Errorf("failed to create directory %q [%v]", manifestsPath, err)
|
||||
}
|
||||
for name, spec := range staticPodSpecs {
|
||||
filename := path.Join(manifestsPath, name+".json")
|
||||
serialized, err := json.MarshalIndent(spec, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("<master/manifests> failed to marshall manifest for %q to JSON [%v]", name, err)
|
||||
return fmt.Errorf("failed to marshal manifest for %q to JSON [%v]", name, err)
|
||||
}
|
||||
if err := cmdutil.DumpReaderToFile(bytes.NewReader(serialized), filename); err != nil {
|
||||
return fmt.Errorf("<master/manifests> failed to create static pod manifest file for %q (%q) [%v]", name, filename, err)
|
||||
return fmt.Errorf("failed to create static pod manifest file for %q (%q) [%v]", name, filename, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@ -265,26 +271,24 @@ func componentPod(container api.Container, volumes ...api.Volume) api.Pod {
|
||||
}
|
||||
}
|
||||
|
||||
func getComponentBaseCommand(component string) (command []string) {
|
||||
func getComponentBaseCommand(component string) []string {
|
||||
if kubeadmapi.GlobalEnvParams.HyperkubeImage != "" {
|
||||
command = []string{"/hyperkube", component}
|
||||
} else {
|
||||
command = []string{"kube-" + component}
|
||||
return []string{"/hyperkube", component}
|
||||
}
|
||||
command = append(command, kubeadmapi.GlobalEnvParams.ComponentLoglevel)
|
||||
return
|
||||
|
||||
return []string{"kube-" + component}
|
||||
}
|
||||
|
||||
func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) (command []string) {
|
||||
command = append(getComponentBaseCommand(apiServer),
|
||||
func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
||||
command := append(getComponentBaseCommand(apiServer),
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range="+cfg.Networking.ServiceSubnet,
|
||||
"--service-account-key-file="+pkiDir+"/apiserver-key.pem",
|
||||
"--client-ca-file="+pkiDir+"/ca.pem",
|
||||
"--tls-cert-file="+pkiDir+"/apiserver.pem",
|
||||
"--tls-private-key-file="+pkiDir+"/apiserver-key.pem",
|
||||
"--token-auth-file="+pkiDir+"/tokens.csv",
|
||||
"--service-account-key-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/apiserver-key.pem",
|
||||
"--client-ca-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/ca.pem",
|
||||
"--tls-cert-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/apiserver.pem",
|
||||
"--tls-private-key-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/apiserver-key.pem",
|
||||
"--token-auth-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", cfg.API.BindPort),
|
||||
"--allow-privileged",
|
||||
)
|
||||
@ -294,6 +298,16 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) (command []string)
|
||||
command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0]))
|
||||
}
|
||||
|
||||
if len(cfg.KubernetesVersion) != 0 {
|
||||
// If the k8s version is v1.5-something, this argument is set and makes `kubectl logs` and `kubectl exec`
|
||||
// work on bare-metal where hostnames aren't usually resolvable
|
||||
// Omit the "v" in the beginning, otherwise semver will fail
|
||||
k8sVersion, err := semver.Parse(cfg.KubernetesVersion[1:])
|
||||
if err == nil && k8sVersion.GTE(preferredAddressMinimumVersion) {
|
||||
command = append(command, "--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname")
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the user decided to use an external etcd cluster
|
||||
if len(cfg.Etcd.Endpoints) > 0 {
|
||||
command = append(command, fmt.Sprintf("--etcd-servers=%s", strings.Join(cfg.Etcd.Endpoints, ",")))
|
||||
@ -320,19 +334,19 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) (command []string)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
return command
|
||||
}
|
||||
|
||||
func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) (command []string) {
|
||||
command = append(getComponentBaseCommand(controllerManager),
|
||||
func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
||||
command := append(getComponentBaseCommand(controllerManager),
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name="+DefaultClusterName,
|
||||
"--root-ca-file="+pkiDir+"/ca.pem",
|
||||
"--service-account-private-key-file="+pkiDir+"/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file="+pkiDir+"/ca.pem",
|
||||
"--cluster-signing-key-file="+pkiDir+"/ca-key.pem",
|
||||
"--root-ca-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/ca.pem",
|
||||
"--service-account-private-key-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/ca.pem",
|
||||
"--cluster-signing-key-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
)
|
||||
|
||||
@ -340,7 +354,6 @@ func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) (command [
|
||||
command = append(command, "--cloud-provider="+cfg.CloudProvider)
|
||||
|
||||
// Only append the --cloud-config option if there's a such file
|
||||
// TODO(phase1+) this won't work unless it's in one of the few directories we bind-mount
|
||||
if _, err := os.Stat(DefaultCloudConfigPath); err == nil {
|
||||
command = append(command, "--cloud-config="+DefaultCloudConfigPath)
|
||||
}
|
||||
@ -351,24 +364,19 @@ func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) (command [
|
||||
if cfg.Networking.PodSubnet != "" {
|
||||
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet)
|
||||
}
|
||||
|
||||
return
|
||||
return command
|
||||
}
|
||||
|
||||
func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration) (command []string) {
|
||||
command = append(getComponentBaseCommand(scheduler),
|
||||
func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
||||
return append(getComponentBaseCommand(scheduler),
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func getProxyCommand(cfg *kubeadmapi.MasterConfiguration) (command []string) {
|
||||
command = getComponentBaseCommand(proxy)
|
||||
|
||||
return
|
||||
func getProxyCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
||||
return getComponentBaseCommand(proxy)
|
||||
}
|
||||
|
||||
func getProxyEnvVars() []api.EnvVar {
|
||||
|
@ -369,15 +369,14 @@ func TestGetAPIServerCommand(t *testing.T) {
|
||||
},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--v=2",
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=bar",
|
||||
"--service-account-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--tls-cert-file=" + pkiDir + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + pkiDir + "/tokens.csv",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", 123),
|
||||
"--allow-privileged",
|
||||
"--etcd-servers=http://127.0.0.1:2379",
|
||||
@ -390,15 +389,14 @@ func TestGetAPIServerCommand(t *testing.T) {
|
||||
},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--v=2",
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=bar",
|
||||
"--service-account-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--tls-cert-file=" + pkiDir + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + pkiDir + "/tokens.csv",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", 123),
|
||||
"--allow-privileged",
|
||||
"--advertise-address=foo",
|
||||
@ -413,15 +411,14 @@ func TestGetAPIServerCommand(t *testing.T) {
|
||||
},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--v=2",
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=bar",
|
||||
"--service-account-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--tls-cert-file=" + pkiDir + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + pkiDir + "/tokens.csv",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", 123),
|
||||
"--allow-privileged",
|
||||
"--etcd-servers=http://127.0.0.1:2379",
|
||||
@ -429,6 +426,30 @@ func TestGetAPIServerCommand(t *testing.T) {
|
||||
"--etcd-keyfile=faz",
|
||||
},
|
||||
},
|
||||
// Make sure --kubelet-preferred-address-types
|
||||
{
|
||||
cfg: &kubeadmapi.MasterConfiguration{
|
||||
API: kubeadm.API{BindPort: 123, AdvertiseAddresses: []string{"foo"}},
|
||||
Networking: kubeadm.Networking{ServiceSubnet: "bar"},
|
||||
KubernetesVersion: "v1.5.3",
|
||||
},
|
||||
expected: []string{
|
||||
"kube-apiserver",
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=bar",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--token-auth-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", 123),
|
||||
"--allow-privileged",
|
||||
"--advertise-address=foo",
|
||||
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
|
||||
"--etcd-servers=http://127.0.0.1:2379",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
@ -454,15 +475,14 @@ func TestGetControllerManagerCommand(t *testing.T) {
|
||||
cfg: &kubeadmapi.MasterConfiguration{},
|
||||
expected: []string{
|
||||
"kube-controller-manager",
|
||||
"--v=2",
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name=" + DefaultClusterName,
|
||||
"--root-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--service-account-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + pkiDir + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + pkiDir + "/ca-key.pem",
|
||||
"--root-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--service-account-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
},
|
||||
},
|
||||
@ -470,15 +490,14 @@ func TestGetControllerManagerCommand(t *testing.T) {
|
||||
cfg: &kubeadmapi.MasterConfiguration{CloudProvider: "foo"},
|
||||
expected: []string{
|
||||
"kube-controller-manager",
|
||||
"--v=2",
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name=" + DefaultClusterName,
|
||||
"--root-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--service-account-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + pkiDir + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + pkiDir + "/ca-key.pem",
|
||||
"--root-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--service-account-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
"--cloud-provider=foo",
|
||||
},
|
||||
@ -487,15 +506,14 @@ func TestGetControllerManagerCommand(t *testing.T) {
|
||||
cfg: &kubeadmapi.MasterConfiguration{Networking: kubeadm.Networking{PodSubnet: "bar"}},
|
||||
expected: []string{
|
||||
"kube-controller-manager",
|
||||
"--v=2",
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name=" + DefaultClusterName,
|
||||
"--root-ca-file=" + pkiDir + "/ca.pem",
|
||||
"--service-account-private-key-file=" + pkiDir + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + pkiDir + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + pkiDir + "/ca-key.pem",
|
||||
"--root-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--service-account-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
"--allocate-node-cidrs=true",
|
||||
"--cluster-cidr=bar",
|
||||
@ -526,7 +544,6 @@ func TestGetSchedulerCommand(t *testing.T) {
|
||||
cfg: &kubeadmapi.MasterConfiguration{},
|
||||
expected: []string{
|
||||
"kube-scheduler",
|
||||
"--v=2",
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
@ -557,7 +574,6 @@ func TestGetProxyCommand(t *testing.T) {
|
||||
cfg: &kubeadmapi.MasterConfiguration{},
|
||||
expected: []string{
|
||||
"kube-proxy",
|
||||
"--v=2",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -24,7 +24,7 @@ import (
|
||||
"path"
|
||||
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
ipallocator "k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
)
|
||||
|
||||
@ -162,39 +162,32 @@ func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration) (*rsa.PrivateKey, *x50
|
||||
|
||||
caKey, caCert, err := newCertificateAuthority()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("<master/pki> failure while creating CA keys and certificate - %v", err)
|
||||
return nil, nil, fmt.Errorf("failure while creating CA keys and certificate [%v]", err)
|
||||
}
|
||||
|
||||
if err := writeKeysAndCert(pkiPath, "ca", caKey, caCert); err != nil {
|
||||
return nil, nil, fmt.Errorf("<master/pki> failure while saving CA keys and certificate - %v", err)
|
||||
return nil, nil, fmt.Errorf("failure while saving CA keys and certificate [%v]", err)
|
||||
}
|
||||
fmt.Printf("<master/pki> generated Certificate Authority key and certificate:\n%s\n", certutil.FormatCert(caCert))
|
||||
pub, prv, cert := pathsKeysCerts(pkiPath, "ca")
|
||||
fmt.Printf("Public: %s\nPrivate: %s\nCert: %s\n", pub, prv, cert)
|
||||
fmt.Println("[certificates] Generated Certificate Authority key and certificate.")
|
||||
|
||||
apiKey, apiCert, err := newServerKeyAndCert(cfg, caCert, caKey, altNames)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("<master/pki> failure while creating API server keys and certificate - %v", err)
|
||||
return nil, nil, fmt.Errorf("failure while creating API server keys and certificate [%v]", err)
|
||||
}
|
||||
|
||||
if err := writeKeysAndCert(pkiPath, "apiserver", apiKey, apiCert); err != nil {
|
||||
return nil, nil, fmt.Errorf("<master/pki> failure while saving API server keys and certificate - %v", err)
|
||||
return nil, nil, fmt.Errorf("failure while saving API server keys and certificate [%v]", err)
|
||||
}
|
||||
fmt.Printf("<master/pki> generated API Server key and certificate:\n%s\n", certutil.FormatCert(apiCert))
|
||||
pub, prv, cert = pathsKeysCerts(pkiPath, "apiserver")
|
||||
fmt.Printf("Public: %s\nPrivate: %s\nCert: %s\n", pub, prv, cert)
|
||||
fmt.Println("[certificates] Generated API Server key and certificate")
|
||||
|
||||
saKey, err := newServiceAccountKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("<master/pki> failure while creating service account signing keys [%v]", err)
|
||||
return nil, nil, fmt.Errorf("failure while creating service account signing keys [%v]", err)
|
||||
}
|
||||
if err := writeKeysAndCert(pkiPath, "sa", saKey, nil); err != nil {
|
||||
return nil, nil, fmt.Errorf("<master/pki> failure while saving service account signing keys - %v", err)
|
||||
return nil, nil, fmt.Errorf("failure while saving service account signing keys [%v]", err)
|
||||
}
|
||||
fmt.Printf("<master/pki> generated Service Account Signing keys:\n")
|
||||
pub, prv, _ = pathsKeysCerts(pkiPath, "sa")
|
||||
fmt.Printf("Public: %s\nPrivate: %s\n", pub, prv)
|
||||
|
||||
fmt.Printf("<master/pki> created keys and certificates in %q\n", pkiPath)
|
||||
fmt.Println("[certificates] Generated Service Account signing keys")
|
||||
fmt.Printf("[certificates] Created keys and certificates in %q\n", pkiPath)
|
||||
return caKey, caCert, nil
|
||||
}
|
||||
|
@ -39,9 +39,9 @@ func generateTokenIfNeeded(s *kubeadmapi.Secrets) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Printf("<master/tokens> generated token: %q\n", s.GivenToken)
|
||||
fmt.Printf("[tokens] Generated token: %q\n", s.GivenToken)
|
||||
} else {
|
||||
fmt.Println("<master/tokens> accepted provided token")
|
||||
fmt.Println("[tokens] Accepted provided token")
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -50,15 +50,15 @@ func generateTokenIfNeeded(s *kubeadmapi.Secrets) error {
|
||||
func CreateTokenAuthFile(s *kubeadmapi.Secrets) error {
|
||||
tokenAuthFilePath := path.Join(kubeadmapi.GlobalEnvParams.HostPKIPath, "tokens.csv")
|
||||
if err := generateTokenIfNeeded(s); err != nil {
|
||||
return fmt.Errorf("<master/tokens> failed to generate token(s) [%v]", err)
|
||||
return fmt.Errorf("failed to generate token(s) [%v]", err)
|
||||
}
|
||||
if err := os.MkdirAll(kubeadmapi.GlobalEnvParams.HostPKIPath, 0700); err != nil {
|
||||
return fmt.Errorf("<master/tokens> failed to create directory %q [%v]", kubeadmapi.GlobalEnvParams.HostPKIPath, err)
|
||||
return fmt.Errorf("failed to create directory %q [%v]", kubeadmapi.GlobalEnvParams.HostPKIPath, err)
|
||||
}
|
||||
serialized := []byte(fmt.Sprintf("%s,kubeadm-node-csr,%s,system:kubelet-bootstrap\n", s.BearerToken, uuid.NewUUID()))
|
||||
// DumpReaderToFile create a file with mode 0600
|
||||
if err := cmdutil.DumpReaderToFile(bytes.NewReader(serialized), tokenAuthFilePath); err != nil {
|
||||
return fmt.Errorf("<master/tokens> failed to save token auth file (%q) [%v]", tokenAuthFilePath, err)
|
||||
return fmt.Errorf("failed to save token auth file (%q) [%v]", tokenAuthFilePath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ const retryTimeout = 5
|
||||
func EstablishMasterConnection(s *kubeadmapi.NodeConfiguration, clusterInfo *kubeadmapi.ClusterInfo) (*ConnectionDetails, error) {
|
||||
hostName, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("<node/bootstrap> failed to get node hostname [%v]", err)
|
||||
return nil, fmt.Errorf("failed to get node hostname [%v]", err)
|
||||
}
|
||||
// TODO(phase1+) https://github.com/kubernetes/kubernetes/issues/33641
|
||||
nodeName := types.NodeName(hostName)
|
||||
@ -67,20 +67,20 @@ func EstablishMasterConnection(s *kubeadmapi.NodeConfiguration, clusterInfo *kub
|
||||
for _, endpoint := range endpoints {
|
||||
clientSet, err := createClients(caCert, endpoint, s.Secrets.BearerToken, nodeName)
|
||||
if err != nil {
|
||||
fmt.Printf("<node/bootstrap> warning: %s. Skipping endpoint %s\n", err, endpoint)
|
||||
fmt.Printf("[bootstrap] Warning: %s. Skipping endpoint %s\n", err, endpoint)
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func(apiEndpoint string) {
|
||||
defer wg.Done()
|
||||
wait.Until(func() {
|
||||
fmt.Printf("<node/bootstrap> trying to connect to endpoint %s\n", apiEndpoint)
|
||||
fmt.Printf("[bootstrap] Trying to connect to endpoint %s\n", apiEndpoint)
|
||||
err := checkAPIEndpoint(clientSet, apiEndpoint)
|
||||
if err != nil {
|
||||
fmt.Printf("<node/bootstrap> endpoint check failed [%v]\n", err)
|
||||
fmt.Printf("[bootstrap] Endpoint check failed [%v]\n", err)
|
||||
return
|
||||
}
|
||||
fmt.Printf("<node/bootstrap> successfully established connection with endpoint %s\n", apiEndpoint)
|
||||
fmt.Printf("[bootstrap] Successfully established connection with endpoint %q\n", apiEndpoint)
|
||||
// connection established, stop all wait threads
|
||||
close(stopChan)
|
||||
result <- &ConnectionDetails{
|
||||
@ -102,8 +102,7 @@ func EstablishMasterConnection(s *kubeadmapi.NodeConfiguration, clusterInfo *kub
|
||||
|
||||
establishedConnection, ok := <-result
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("<node/bootstrap> failed to create bootstrap clients " +
|
||||
"for any of the provided API endpoints")
|
||||
return nil, fmt.Errorf("failed to create bootstrap clients for any of the provided API endpoints")
|
||||
}
|
||||
return establishedConnection, nil
|
||||
}
|
||||
@ -122,7 +121,7 @@ func createClients(caCert []byte, endpoint, token string, nodeName types.NodeNam
|
||||
}
|
||||
clientSet, err := clientset.NewForConfig(bootstrapClientConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create clients for the API endpoint %s [%v]", endpoint, err)
|
||||
return nil, fmt.Errorf("failed to create clients for the API endpoint %q: [%v]", endpoint, err)
|
||||
}
|
||||
return clientSet, nil
|
||||
}
|
||||
@ -150,9 +149,9 @@ func checkAPIEndpoint(clientSet *clientset.Clientset, endpoint string) error {
|
||||
// check general connectivity
|
||||
version, err := clientSet.DiscoveryClient.ServerVersion()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to connect to %s [%v]", endpoint, err)
|
||||
return fmt.Errorf("failed to connect to %q [%v]", endpoint, err)
|
||||
}
|
||||
fmt.Printf("<node/bootstrap> detected server version %s\n", version.String())
|
||||
fmt.Printf("[bootstrap] Detected server version: %s\n", version.String())
|
||||
|
||||
// check certificates API
|
||||
serverGroups, err := clientSet.DiscoveryClient.ServerGroups()
|
||||
|
@ -30,22 +30,22 @@ import (
|
||||
func PerformTLSBootstrap(connection *ConnectionDetails) (*clientcmdapi.Config, error) {
|
||||
csrClient := connection.CertClient.CertificateSigningRequests()
|
||||
|
||||
fmt.Println("<node/csr> created API client to obtain unique certificate for this node, generating keys and certificate signing request")
|
||||
fmt.Println("[csr] Created API client to obtain unique certificate for this node, generating keys and certificate signing request")
|
||||
|
||||
key, err := certutil.MakeEllipticPrivateKeyPEM()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("<node/csr> failed to generating private key [%v]", err)
|
||||
return nil, fmt.Errorf("failed to generating private key [%v]", err)
|
||||
}
|
||||
cert, err := csr.RequestNodeCertificate(csrClient, key, connection.NodeName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("<node/csr> failed to request signed certificate from the API server [%v]", err)
|
||||
return nil, fmt.Errorf("failed to request signed certificate from the API server [%v]", err)
|
||||
}
|
||||
fmtCert, err := certutil.FormatBytesCert(cert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("<node/csr> failed to format certificate [%v]", err)
|
||||
return nil, fmt.Errorf("failed to format certificate [%v]", err)
|
||||
}
|
||||
fmt.Printf("<node/csr> received signed certificate from the API server:\n%s\n", fmtCert)
|
||||
fmt.Println("<node/csr> generating kubelet configuration")
|
||||
fmt.Printf("[csr] Received signed certificate from the API server:\n%s\n", fmtCert)
|
||||
fmt.Println("[csr] Generating kubelet configuration")
|
||||
|
||||
bareClientConfig := kubeadmutil.CreateBasicClientConfig("kubernetes", connection.Endpoint, connection.CACert)
|
||||
finalConfig := kubeadmutil.MakeClientConfigWithCerts(
|
||||
|
@ -37,16 +37,16 @@ func RetrieveTrustedClusterInfo(s *kubeadmapi.NodeConfiguration) (*kubeadmapi.Cl
|
||||
requestURL := fmt.Sprintf("http://%s:%d/cluster-info/v1/?token-id=%s", host, port, s.Secrets.TokenID)
|
||||
req, err := http.NewRequest("GET", requestURL, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("<node/discovery> failed to consturct an HTTP request [%v]", err)
|
||||
return nil, fmt.Errorf("failed to consturct an HTTP request [%v]", err)
|
||||
}
|
||||
|
||||
fmt.Printf("<node/discovery> created cluster info discovery client, requesting info from %q\n", requestURL)
|
||||
fmt.Printf("[discovery] Created cluster info discovery client, requesting info from %q\n", requestURL)
|
||||
|
||||
var res *http.Response
|
||||
wait.PollInfinite(discoveryRetryTimeout, func() (bool, error) {
|
||||
res, err = http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
fmt.Printf("<node/discovery> failed to request cluster info, will try again: [%s]\n", err)
|
||||
fmt.Printf("[discovery] Failed to request cluster info, will try again: [%s]\n", err)
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
@ -58,28 +58,28 @@ func RetrieveTrustedClusterInfo(s *kubeadmapi.NodeConfiguration) (*kubeadmapi.Cl
|
||||
|
||||
object, err := jose.ParseSigned(buf.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("<node/discovery> failed to parse response as JWS object [%v]", err)
|
||||
return nil, fmt.Errorf("failed to parse response as JWS object [%v]", err)
|
||||
}
|
||||
|
||||
fmt.Println("<node/discovery> cluster info object received, verifying signature using given token")
|
||||
fmt.Println("[discovery] Cluster info object received, verifying signature using given token")
|
||||
|
||||
output, err := object.Verify(s.Secrets.Token)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("<node/discovery> failed to verify JWS signature of received cluster info object [%v]", err)
|
||||
return nil, fmt.Errorf("failed to verify JWS signature of received cluster info object [%v]", err)
|
||||
}
|
||||
|
||||
clusterInfo := kubeadmapi.ClusterInfo{}
|
||||
|
||||
if err := json.Unmarshal(output, &clusterInfo); err != nil {
|
||||
return nil, fmt.Errorf("<node/discovery> failed to decode received cluster info object [%v]", err)
|
||||
return nil, fmt.Errorf("failed to decode received cluster info object [%v]", err)
|
||||
}
|
||||
|
||||
if len(clusterInfo.CertificateAuthorities) == 0 || len(clusterInfo.Endpoints) == 0 {
|
||||
return nil, fmt.Errorf("<node/discovery> cluster info object is invalid - no endpoint(s) and/or root CA certificate(s) found")
|
||||
return nil, fmt.Errorf("cluster info object is invalid - no endpoint(s) and/or root CA certificate(s) found")
|
||||
}
|
||||
|
||||
// TODO(phase1+) print summary info about the CA certificate, along with the the checksum signature
|
||||
// we also need an ability for the user to configure the client to validate received CA cert against a checksum
|
||||
fmt.Printf("<node/discovery> cluster info signature and contents are valid, will use API endpoints %v\n", clusterInfo.Endpoints)
|
||||
fmt.Printf("[discovery] Cluster info signature and contents are valid, will use API endpoints %v\n", clusterInfo.Endpoints)
|
||||
return &clusterInfo, nil
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ go_library(
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//pkg/api/validation:go_default_library",
|
||||
"//pkg/util/errors:go_default_library",
|
||||
"//pkg/util/initsystem:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//test/e2e_node/system:go_default_library",
|
||||
|
@ -25,9 +25,11 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/util/initsystem"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/test/e2e_node/system"
|
||||
@ -38,7 +40,7 @@ type PreFlightError struct {
|
||||
}
|
||||
|
||||
func (e *PreFlightError) Error() string {
|
||||
return fmt.Sprintf("preflight check errors:\n%s", e.Msg)
|
||||
return fmt.Sprintf("[preflight] Some fatal errors occurred:\n%s%s", e.Msg, "[preflight] If you know what you are doing, you can skip pre-flight checks with `--skip-preflight-checks`")
|
||||
}
|
||||
|
||||
// PreFlightCheck validates the state of the system to ensure kubeadm will be
|
||||
@ -51,7 +53,8 @@ type PreFlightCheck interface {
|
||||
// detect a supported init system however, all checks are skipped and a warning is
|
||||
// returned.
|
||||
type ServiceCheck struct {
|
||||
Service string
|
||||
Service string
|
||||
CheckIfActive bool
|
||||
}
|
||||
|
||||
func (sc ServiceCheck) Check() (warnings, errors []error) {
|
||||
@ -73,7 +76,7 @@ func (sc ServiceCheck) Check() (warnings, errors []error) {
|
||||
sc.Service, sc.Service))
|
||||
}
|
||||
|
||||
if !initSystem.ServiceIsActive(sc.Service) {
|
||||
if sc.CheckIfActive && !initSystem.ServiceIsActive(sc.Service) {
|
||||
errors = append(errors,
|
||||
fmt.Errorf("%s service is not active, please run 'systemctl start %s.service'",
|
||||
sc.Service, sc.Service))
|
||||
@ -128,9 +131,7 @@ func (poc PortOpenCheck) Check() (warnings, errors []error) {
|
||||
}
|
||||
|
||||
// IsRootCheck verifies user is root
|
||||
type IsRootCheck struct {
|
||||
root bool
|
||||
}
|
||||
type IsRootCheck struct{}
|
||||
|
||||
func (irc IsRootCheck) Check() (warnings, errors []error) {
|
||||
errors = []error{}
|
||||
@ -141,8 +142,7 @@ func (irc IsRootCheck) Check() (warnings, errors []error) {
|
||||
return nil, errors
|
||||
}
|
||||
|
||||
// DirAvailableCheck checks if the given directory either does not exist, or
|
||||
// is empty.
|
||||
// DirAvailableCheck checks if the given directory either does not exist, or is empty.
|
||||
type DirAvailableCheck struct {
|
||||
Path string
|
||||
}
|
||||
@ -245,13 +245,28 @@ type SystemVerificationCheck struct{}
|
||||
|
||||
func (sysver SystemVerificationCheck) Check() (warnings, errors []error) {
|
||||
// Create a buffered writer and choose a quite large value (1M) and suppose the output from the system verification test won't exceed the limit
|
||||
bufw := bufio.NewWriterSize(os.Stdout, 1*1024*1024)
|
||||
|
||||
// Run the system verification check, but write to out buffered writer instead of stdout
|
||||
err := system.Validate(system.DefaultSysSpec, &system.StreamReporter{WriteStream: bufw})
|
||||
bufw := bufio.NewWriterSize(os.Stdout, 1*1024*1024)
|
||||
reporter := &system.StreamReporter{WriteStream: bufw}
|
||||
|
||||
var errs []error
|
||||
// All the validators we'd like to run:
|
||||
var validators = []system.Validator{
|
||||
&system.OSValidator{Reporter: reporter},
|
||||
&system.KernelValidator{Reporter: reporter},
|
||||
&system.CgroupsValidator{Reporter: reporter},
|
||||
&system.DockerValidator{Reporter: reporter},
|
||||
}
|
||||
|
||||
// Run all validators
|
||||
for _, v := range validators {
|
||||
errs = append(errs, v.Validate(system.DefaultSysSpec))
|
||||
}
|
||||
|
||||
err := utilerrors.NewAggregate(errs)
|
||||
if err != nil {
|
||||
// Only print the output from the system verification check if the check failed
|
||||
fmt.Println("System verification failed. Printing the output from the verification...")
|
||||
fmt.Println("[preflight] The system verification failed. Printing the output from the verification:")
|
||||
bufw.Flush()
|
||||
return nil, []error{err}
|
||||
}
|
||||
@ -261,10 +276,10 @@ func (sysver SystemVerificationCheck) Check() (warnings, errors []error) {
|
||||
func RunInitMasterChecks(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
checks := []PreFlightCheck{
|
||||
SystemVerificationCheck{},
|
||||
IsRootCheck{root: true},
|
||||
IsRootCheck{},
|
||||
HostnameCheck{},
|
||||
ServiceCheck{Service: "kubelet"},
|
||||
ServiceCheck{Service: "docker"},
|
||||
ServiceCheck{Service: "kubelet", CheckIfActive: false},
|
||||
ServiceCheck{Service: "docker", CheckIfActive: true},
|
||||
FirewalldCheck{ports: []int{int(cfg.API.BindPort), int(cfg.Discovery.BindPort), 10250}},
|
||||
PortOpenCheck{port: int(cfg.API.BindPort)},
|
||||
PortOpenCheck{port: 8080},
|
||||
@ -273,18 +288,18 @@ func RunInitMasterChecks(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
PortOpenCheck{port: 10251},
|
||||
PortOpenCheck{port: 10252},
|
||||
HTTPProxyCheck{Proto: "https", Host: cfg.API.AdvertiseAddresses[0], Port: int(cfg.API.BindPort)},
|
||||
DirAvailableCheck{Path: "/etc/kubernetes/manifests"},
|
||||
DirAvailableCheck{Path: "/etc/kubernetes/pki"},
|
||||
DirAvailableCheck{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests")},
|
||||
DirAvailableCheck{Path: kubeadmapi.GlobalEnvParams.HostPKIPath},
|
||||
DirAvailableCheck{Path: "/var/lib/kubelet"},
|
||||
FileAvailableCheck{Path: "/etc/kubernetes/admin.conf"},
|
||||
FileAvailableCheck{Path: "/etc/kubernetes/kubelet.conf"},
|
||||
InPathCheck{executable: "ebtables", mandatory: true},
|
||||
InPathCheck{executable: "ethtool", mandatory: true},
|
||||
FileAvailableCheck{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "admin.conf")},
|
||||
FileAvailableCheck{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "kubelet.conf")},
|
||||
InPathCheck{executable: "ip", mandatory: true},
|
||||
InPathCheck{executable: "iptables", mandatory: true},
|
||||
InPathCheck{executable: "mount", mandatory: true},
|
||||
InPathCheck{executable: "nsenter", mandatory: true},
|
||||
InPathCheck{executable: "socat", mandatory: true},
|
||||
InPathCheck{executable: "ebtables", mandatory: false},
|
||||
InPathCheck{executable: "ethtool", mandatory: false},
|
||||
InPathCheck{executable: "socat", mandatory: false},
|
||||
InPathCheck{executable: "tc", mandatory: false},
|
||||
InPathCheck{executable: "touch", mandatory: false},
|
||||
}
|
||||
@ -297,52 +312,52 @@ func RunInitMasterChecks(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
)
|
||||
}
|
||||
|
||||
return runChecks(checks, os.Stderr)
|
||||
return RunChecks(checks, os.Stderr)
|
||||
}
|
||||
|
||||
func RunJoinNodeChecks(cfg *kubeadmapi.NodeConfiguration) error {
|
||||
checks := []PreFlightCheck{
|
||||
SystemVerificationCheck{},
|
||||
IsRootCheck{root: true},
|
||||
IsRootCheck{},
|
||||
HostnameCheck{},
|
||||
ServiceCheck{Service: "docker"},
|
||||
ServiceCheck{Service: "kubelet"},
|
||||
ServiceCheck{Service: "kubelet", CheckIfActive: false},
|
||||
ServiceCheck{Service: "docker", CheckIfActive: true},
|
||||
PortOpenCheck{port: 10250},
|
||||
HTTPProxyCheck{Proto: "https", Host: cfg.MasterAddresses[0], Port: int(cfg.APIPort)},
|
||||
HTTPProxyCheck{Proto: "http", Host: cfg.MasterAddresses[0], Port: int(cfg.DiscoveryPort)},
|
||||
DirAvailableCheck{Path: "/etc/kubernetes/manifests"},
|
||||
DirAvailableCheck{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests")},
|
||||
DirAvailableCheck{Path: "/var/lib/kubelet"},
|
||||
FileAvailableCheck{Path: "/etc/kubernetes/kubelet.conf"},
|
||||
InPathCheck{executable: "ebtables", mandatory: true},
|
||||
InPathCheck{executable: "ethtool", mandatory: true},
|
||||
FileAvailableCheck{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "kubelet.conf")},
|
||||
InPathCheck{executable: "ip", mandatory: true},
|
||||
InPathCheck{executable: "iptables", mandatory: true},
|
||||
InPathCheck{executable: "mount", mandatory: true},
|
||||
InPathCheck{executable: "nsenter", mandatory: true},
|
||||
InPathCheck{executable: "socat", mandatory: true},
|
||||
InPathCheck{executable: "ebtables", mandatory: false},
|
||||
InPathCheck{executable: "ethtool", mandatory: false},
|
||||
InPathCheck{executable: "socat", mandatory: false},
|
||||
InPathCheck{executable: "tc", mandatory: false},
|
||||
InPathCheck{executable: "touch", mandatory: false},
|
||||
}
|
||||
|
||||
return runChecks(checks, os.Stderr)
|
||||
return RunChecks(checks, os.Stderr)
|
||||
}
|
||||
|
||||
func RunResetCheck() error {
|
||||
func RunRootCheckOnly() error {
|
||||
checks := []PreFlightCheck{
|
||||
IsRootCheck{root: true},
|
||||
IsRootCheck{},
|
||||
}
|
||||
|
||||
return runChecks(checks, os.Stderr)
|
||||
return RunChecks(checks, os.Stderr)
|
||||
}
|
||||
|
||||
// runChecks runs each check, displays it's warnings/errors, and once all
|
||||
// RunChecks runs each check, displays it's warnings/errors, and once all
|
||||
// are processed will exit if any errors occurred.
|
||||
func runChecks(checks []PreFlightCheck, ww io.Writer) error {
|
||||
func RunChecks(checks []PreFlightCheck, ww io.Writer) error {
|
||||
found := []error{}
|
||||
for _, c := range checks {
|
||||
warnings, errs := c.Check()
|
||||
for _, w := range warnings {
|
||||
io.WriteString(ww, fmt.Sprintf("WARNING: %s\n", w))
|
||||
io.WriteString(ww, fmt.Sprintf("[preflight] WARNING: %s\n", w))
|
||||
}
|
||||
for _, e := range errs {
|
||||
found = append(found, e)
|
||||
@ -353,7 +368,22 @@ func runChecks(checks []PreFlightCheck, ww io.Writer) error {
|
||||
for _, i := range found {
|
||||
errs += "\t" + i.Error() + "\n"
|
||||
}
|
||||
return errors.New(errs)
|
||||
return &PreFlightError{Msg: errors.New(errs).Error()}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func TryStartKubelet() {
|
||||
// If we notice that the kubelet service is inactive, try to start it
|
||||
initSystem, err := initsystem.GetInitSystem()
|
||||
if err != nil {
|
||||
fmt.Println("[preflight] No supported init system detected, won't ensure kubelet is running.")
|
||||
} else if initSystem.ServiceExists("kubelet") && !initSystem.ServiceIsActive("kubelet") {
|
||||
|
||||
fmt.Println("[preflight] Starting the kubelet service")
|
||||
if err := initSystem.ServiceStart("kubelet"); err != nil {
|
||||
fmt.Printf("[preflight] WARNING: Unable to start the kubelet service: [%v]\n", err)
|
||||
fmt.Println("[preflight] WARNING: Please ensure kubelet is running manually.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -43,23 +43,23 @@ func TestRunChecks(t *testing.T) {
|
||||
output string
|
||||
}{
|
||||
{[]PreFlightCheck{}, true, ""},
|
||||
{[]PreFlightCheck{preflightCheckTest{"warning"}}, true, "WARNING: warning\n"}, // should just print warning
|
||||
{[]PreFlightCheck{preflightCheckTest{"warning"}}, true, "[preflight] WARNING: warning\n"}, // should just print warning
|
||||
{[]PreFlightCheck{preflightCheckTest{"error"}}, false, ""},
|
||||
{[]PreFlightCheck{preflightCheckTest{"test"}}, false, ""},
|
||||
}
|
||||
for _, rt := range tokenTest {
|
||||
buf := new(bytes.Buffer)
|
||||
actual := runChecks(rt.p, buf)
|
||||
actual := RunChecks(rt.p, buf)
|
||||
if (actual == nil) != rt.expected {
|
||||
t.Errorf(
|
||||
"failed runChecks:\n\texpected: %t\n\t actual: %t",
|
||||
"failed RunChecks:\n\texpected: %t\n\t actual: %t",
|
||||
rt.expected,
|
||||
(actual == nil),
|
||||
)
|
||||
}
|
||||
if buf.String() != rt.output {
|
||||
t.Errorf(
|
||||
"failed runChecks:\n\texpected: %s\n\t actual: %s",
|
||||
"failed RunChecks:\n\texpected: %s\n\t actual: %s",
|
||||
rt.output,
|
||||
buf.String(),
|
||||
)
|
||||
|
@ -24,8 +24,6 @@ go_library(
|
||||
"//cmd/kubeadm/app/preflight:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd/api:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/renstrom/dedent",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@ -22,37 +22,18 @@ import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/renstrom/dedent"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultErrorExitCode = 1
|
||||
PreFlight = 2
|
||||
PreFlightExitCode = 2
|
||||
)
|
||||
|
||||
var AlphaWarningOnExit = dedent.Dedent(`
|
||||
kubeadm: I am an alpha version, my authors welcome your feedback and bug reports
|
||||
kubeadm: please create an issue using https://github.com/kubernetes/kubernetes/issues/new
|
||||
kubeadm: and make sure to mention @kubernetes/sig-cluster-lifecycle. Thank you!
|
||||
`)
|
||||
|
||||
type debugError interface {
|
||||
DebugError() (msg string, args []interface{})
|
||||
}
|
||||
|
||||
var fatalErrHandler = fatal
|
||||
|
||||
// BehaviorOnFatal allows you to override the default behavior when a fatal
|
||||
// error occurs, which is to call os.Exit(code). You can pass 'panic' as a function
|
||||
// here if you prefer the panic() over os.Exit(1).
|
||||
func BehaviorOnFatal(f func(string, int)) {
|
||||
fatalErrHandler = f
|
||||
}
|
||||
|
||||
// fatal prints the message if set and then exits. If V(2) or greater, glog.Fatal
|
||||
// is invoked for extended information.
|
||||
// fatal prints the message if set and then exits.
|
||||
func fatal(msg string, code int) {
|
||||
if len(msg) > 0 {
|
||||
// add newline if needed
|
||||
@ -60,9 +41,6 @@ func fatal(msg string, code int) {
|
||||
msg += "\n"
|
||||
}
|
||||
|
||||
if glog.V(2) {
|
||||
glog.FatalDepth(2, msg)
|
||||
}
|
||||
fmt.Fprint(os.Stderr, msg)
|
||||
}
|
||||
os.Exit(code)
|
||||
@ -74,7 +52,7 @@ func fatal(msg string, code int) {
|
||||
// This method is generic to the command in use and may be used by non-Kubectl
|
||||
// commands.
|
||||
func CheckErr(err error) {
|
||||
checkErr("", err, fatalErrHandler)
|
||||
checkErr("", err, fatal)
|
||||
}
|
||||
|
||||
// checkErr formats a given error as a string and calls the passed handleErr
|
||||
@ -84,9 +62,8 @@ func checkErr(prefix string, err error, handleErr func(string, int)) {
|
||||
case nil:
|
||||
return
|
||||
case *preflight.PreFlightError:
|
||||
handleErr(err.Error(), PreFlight)
|
||||
handleErr(err.Error(), PreFlightExitCode)
|
||||
default:
|
||||
fmt.Printf(AlphaWarningOnExit)
|
||||
handleErr(err.Error(), DefaultErrorExitCode)
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ func TestCheckErr(t *testing.T) {
|
||||
}{
|
||||
{nil, 0},
|
||||
{fmt.Errorf(""), DefaultErrorExitCode},
|
||||
{&preflight.PreFlightError{}, PreFlight},
|
||||
{&preflight.PreFlightError{}, PreFlightExitCode},
|
||||
}
|
||||
|
||||
for _, rt := range tokenTest {
|
||||
|
@ -21,7 +21,6 @@ import (
|
||||
"os"
|
||||
"path"
|
||||
|
||||
// TODO: "k8s.io/client-go/client/tools/clientcmd/api"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
@ -77,21 +76,21 @@ func MakeClientConfigWithToken(config *clientcmdapi.Config, clusterName string,
|
||||
|
||||
func WriteKubeconfigIfNotExists(name string, kubeconfig *clientcmdapi.Config) error {
|
||||
if err := os.MkdirAll(kubeadmapi.GlobalEnvParams.KubernetesDir, 0700); err != nil {
|
||||
return fmt.Errorf("<util/kubeconfig> failed to create directory %q [%v]", kubeadmapi.GlobalEnvParams.KubernetesDir, err)
|
||||
return fmt.Errorf("failed to create directory %q [%v]", kubeadmapi.GlobalEnvParams.KubernetesDir, err)
|
||||
}
|
||||
|
||||
filename := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, fmt.Sprintf("%s.conf", name))
|
||||
// Create and open the file, only if it does not already exist.
|
||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_EXCL, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("<util/kubeconfig> failed to create %q, it already exists [%v]", filename, err)
|
||||
return fmt.Errorf("failed to create %q, it already exists [%v]", filename, err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
if err := clientcmd.WriteToFile(*kubeconfig, filename); err != nil {
|
||||
return fmt.Errorf("<util/kubeconfig> failed to write to %q [%v]", filename, err)
|
||||
return fmt.Errorf("failed to write to %q [%v]", filename, err)
|
||||
}
|
||||
|
||||
fmt.Printf("<util/kubeconfig> created %q\n", filename)
|
||||
fmt.Printf("[kubeconfig] Wrote KubeConfig file to disk: %q\n", filename)
|
||||
return nil
|
||||
}
|
||||
|
@ -64,10 +64,10 @@ func UseGivenTokenIfValid(s *kubeadmapi.Secrets) (bool, error) {
|
||||
if s.GivenToken == "" {
|
||||
return false, nil // not given
|
||||
}
|
||||
fmt.Println("<util/tokens> validating provided token")
|
||||
fmt.Println("[tokens] Validating provided token")
|
||||
givenToken := strings.Split(strings.ToLower(s.GivenToken), ".")
|
||||
// TODO(phase1+) could also print more specific messages in each case
|
||||
invalidErr := "<util/tokens> provided token does not match expected <6 characters>.<16 characters> format - %s"
|
||||
invalidErr := "[tokens] Provided token does not match expected <6 characters>.<16 characters> format - %s"
|
||||
if len(givenToken) != 2 {
|
||||
return false, fmt.Errorf(invalidErr, "not in 2-part dot-separated format")
|
||||
}
|
||||
|
@ -54,18 +54,18 @@ func KubernetesReleaseVersion(version string) (string, error) {
|
||||
url := fmt.Sprintf("%s/%s.txt", kubeReleaseBucketURL, version)
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error: unable to get URL %q: %s", url, err.Error())
|
||||
return "", fmt.Errorf("unable to get URL %q: %s", url, err.Error())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return "", fmt.Errorf("Error: unable to fetch release information. URL: %q Status: %v", url, resp.Status)
|
||||
return "", fmt.Errorf("unable to fetch release information. URL: %q Status: %v", url, resp.Status)
|
||||
}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Error: unable to read content of URL %q: %s", url, err.Error())
|
||||
return "", fmt.Errorf("unable to read content of URL %q: %s", url, err.Error())
|
||||
}
|
||||
// Re-validate received version and return.
|
||||
return KubernetesReleaseVersion(strings.Trim(string(body), " \t\n"))
|
||||
}
|
||||
return "", fmt.Errorf("Error: version %q doesn't match patterns for neither semantic version nor labels (stable, latest, ...)", version)
|
||||
return "", fmt.Errorf("version %q doesn't match patterns for neither semantic version nor labels (stable, latest, ...)", version)
|
||||
}
|
||||
|
@ -17,16 +17,13 @@ limitations under the License.
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := app.Run(); err != nil {
|
||||
fmt.Printf(util.AlphaWarningOnExit)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
|
Loading…
Reference in New Issue
Block a user