Merge pull request #34719 from errordeveloper/api-and-disco-ports

Automatic merge from submit-queue

Add flags for alternative API and discovery ports

**What this PR does / why we need it**:

We had many issues reported due to the fact that we are using port 443 by default, and we should allow users to override whatever defaults we pick. This doesn't touch on `localhost:8080` yet, which should just generally get rid of.

**Which issue this PR fixes**: fixes #34311 #34307 #33638

**Special notes for your reviewer**: cc @pires 

**Release note**:
```release-note
Add `kubeadm` flags `--api-port` and `--discovery-port`, change default API port to 6443
```
This commit is contained in:
Kubernetes Submit Queue 2016-10-17 05:54:38 -07:00 committed by GitHub
commit 714f816a34
13 changed files with 159 additions and 108 deletions

View File

@ -20,4 +20,6 @@ const (
DefaultServiceDNSDomain = "cluster.local"
DefaultServicesSubnet = "10.12.0.0/12"
DefaultKubernetesVersion = "v1.4.1"
DefaultAPIBindPort = 6443
DefaultDiscoveryBindPort = 9898
)

View File

@ -23,6 +23,7 @@ type MasterConfiguration struct {
Secrets Secrets
API API
Discovery Discovery
Etcd Etcd
Networking Networking
KubernetesVersion string
@ -32,6 +33,11 @@ type MasterConfiguration struct {
type API struct {
AdvertiseAddresses []string
ExternalDNSNames []string
BindPort int32
}
type Discovery struct {
BindPort int32
}
type Networking struct {
@ -59,6 +65,8 @@ type NodeConfiguration struct {
MasterAddresses []string
Secrets Secrets
APIPort int32
DiscoveryPort int32
}
// ClusterInfo TODO add description

View File

@ -24,6 +24,7 @@ type MasterConfiguration struct {
Secrets Secrets `json:"secrets"`
API API `json:"api"`
Etcd Etcd `json:"etcd"`
Discovery Discovery `json:"discovery"`
Networking Networking `json:"networking"`
KubernetesVersion string `json:"kubernetesVersion"`
CloudProvider string `json:"cloudProvider"`
@ -32,6 +33,11 @@ type MasterConfiguration struct {
type API struct {
AdvertiseAddresses []string `json:"advertiseAddresses"`
ExternalDNSNames []string `json:"externalDNSNames"`
BindPort int32 `json:"bindPort"`
}
type Discovery struct {
BindPort int32 `json:"bindPort"`
}
type Networking struct {
@ -59,6 +65,8 @@ type NodeConfiguration struct {
MasterAddresses []string `json:"masterAddresses"`
Secrets Secrets `json:"secrets"`
APIPort int32 `json:"apiPort"`
DiscoveryPort int32 `json:"discoveryPort"`
}
// ClusterInfo TODO add description

View File

@ -20,6 +20,7 @@ import (
"fmt"
"io"
"io/ioutil"
"strings"
"github.com/renstrom/dedent"
"github.com/spf13/cobra"
@ -41,7 +42,7 @@ var (
You can now join any number of machines by running the following on each node:
kubeadm join --token %s %s
kubeadm join %s
`)
)
@ -126,6 +127,16 @@ func NewCmdInit(out io.Writer) *cobra.Command {
"skip preflight checks normally run before modifying the system",
)
cmd.PersistentFlags().Int32Var(
&cfg.API.BindPort, "api-port", kubeadmapi.DefaultAPIBindPort,
"Port for API to bind to",
)
cmd.PersistentFlags().Int32Var(
&cfg.Discovery.BindPort, "discovery-port", kubeadmapi.DefaultDiscoveryBindPort,
"Port for JWS discovery service to bind to",
)
return cmd
}
@ -146,7 +157,7 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight
if !skipPreFlight {
fmt.Println("Running pre-flight checks")
err := preflight.RunInitMasterChecks()
err := preflight.RunInitMasterChecks(cfg)
if err != nil {
return nil, &preflight.PreFlightError{Msg: err.Error()}
}
@ -190,7 +201,7 @@ func (i *Init) Run(out io.Writer) error {
return err
}
kubeconfigs, err := kubemaster.CreateCertsAndConfigForClients(i.cfg.API.AdvertiseAddresses, []string{"kubelet", "admin"}, caKey, caCert)
kubeconfigs, err := kubemaster.CreateCertsAndConfigForClients(i.cfg.API, []string{"kubelet", "admin"}, caKey, caCert)
if err != nil {
return err
}
@ -228,11 +239,16 @@ func (i *Init) Run(out io.Writer) error {
return err
}
// TODO(phase1+) use templates to reference struct fields directly as order of args is fragile
fmt.Fprintf(out, initDoneMsgf,
i.cfg.Secrets.GivenToken,
i.cfg.API.AdvertiseAddresses[0],
)
// TODO(phase1+) we could probably use templates for this logic, and reference struct fields directly etc
joinArgs := []string{fmt.Sprintf("--token=%s", i.cfg.Secrets.GivenToken)}
if i.cfg.API.BindPort != kubeadmapi.DefaultAPIBindPort {
joinArgs = append(joinArgs, fmt.Sprintf("--api-port=%d", i.cfg.API.BindPort))
}
if i.cfg.Discovery.BindPort != kubeadmapi.DefaultDiscoveryBindPort {
joinArgs = append(joinArgs, fmt.Sprintf("--discovery-port=%d", i.cfg.Discovery.BindPort))
}
joinArgs = append(joinArgs, i.cfg.API.AdvertiseAddresses[0])
fmt.Fprintf(out, initDoneMsgf, strings.Join(joinArgs, " "))
return nil
}

View File

@ -70,6 +70,16 @@ func NewCmdJoin(out io.Writer) *cobra.Command {
"skip preflight checks normally run before modifying the system",
)
cmd.PersistentFlags().Int32Var(
&cfg.APIPort, "api-port", kubeadmapi.DefaultAPIBindPort,
"(optional) API server port on the master",
)
cmd.PersistentFlags().Int32Var(
&cfg.DiscoveryPort, "discovery-port", kubeadmapi.DefaultDiscoveryBindPort,
"(optional) Discovery port on the master",
)
return cmd
}

View File

@ -32,7 +32,7 @@ import (
)
// TODO(phase1+): kube-proxy should be a daemonset, three different daemonsets should not be here
func createKubeProxyPodSpec(s *kubeadmapi.MasterConfiguration, architecture string) api.PodSpec {
func createKubeProxyPodSpec(cfg *kubeadmapi.MasterConfiguration, architecture string) api.PodSpec {
envParams := kubeadmapi.GetEnvParams()
privilegedTrue := true
return api.PodSpec{
@ -42,8 +42,8 @@ func createKubeProxyPodSpec(s *kubeadmapi.MasterConfiguration, architecture stri
},
Containers: []api.Container{{
Name: kubeProxy,
Image: images.GetCoreImage(images.KubeProxyImage, s, envParams["hyperkube_image"]),
Command: append(getComponentCommand("proxy", s), "--kubeconfig=/run/kubeconfig"),
Image: images.GetCoreImage(images.KubeProxyImage, cfg, envParams["hyperkube_image"]),
Command: append(getComponentCommand("proxy", cfg), "--kubeconfig=/run/kubeconfig"),
SecurityContext: &api.SecurityContext{Privileged: &privilegedTrue},
VolumeMounts: []api.VolumeMount{
{
@ -85,7 +85,7 @@ func createKubeProxyPodSpec(s *kubeadmapi.MasterConfiguration, architecture stri
}
}
func createKubeDNSPodSpec(s *kubeadmapi.MasterConfiguration) api.PodSpec {
func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
dnsPodResources := api.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("100m"),
@ -100,7 +100,7 @@ func createKubeDNSPodSpec(s *kubeadmapi.MasterConfiguration) api.PodSpec {
kubeDNSPort := int32(10053)
dnsmasqPort := int32(53)
nslookup := fmt.Sprintf("nslookup kubernetes.default.svc.%s 127.0.0.1", s.Networking.DNSDomain)
nslookup := fmt.Sprintf("nslookup kubernetes.default.svc.%s 127.0.0.1", cfg.Networking.DNSDomain)
nslookup = fmt.Sprintf("-cmd=%s:%d >/dev/null && %s:%d >/dev/null",
nslookup, dnsmasqPort,
@ -121,7 +121,7 @@ func createKubeDNSPodSpec(s *kubeadmapi.MasterConfiguration) api.PodSpec {
Requests: dnsPodResources,
},
Args: []string{
fmt.Sprintf("--domain=%s", s.Networking.DNSDomain),
fmt.Sprintf("--domain=%s", cfg.Networking.DNSDomain),
fmt.Sprintf("--dns-port=%d", kubeDNSPort),
// TODO __PILLAR__FEDERATIONS__DOMAIN__MAP__
},
@ -214,14 +214,14 @@ func createKubeDNSPodSpec(s *kubeadmapi.MasterConfiguration) api.PodSpec {
}
func createKubeDNSServiceSpec(s *kubeadmapi.MasterConfiguration) (*api.ServiceSpec, error) {
_, n, err := net.ParseCIDR(s.Networking.ServiceSubnet)
func createKubeDNSServiceSpec(cfg *kubeadmapi.MasterConfiguration) (*api.ServiceSpec, error) {
_, n, err := net.ParseCIDR(cfg.Networking.ServiceSubnet)
if err != nil {
return nil, fmt.Errorf("could not parse %q: %v", s.Networking.ServiceSubnet, err)
return nil, fmt.Errorf("could not parse %q: %v", cfg.Networking.ServiceSubnet, err)
}
ip, err := ipallocator.GetIndexedIP(n, 10)
if err != nil {
return nil, fmt.Errorf("unable to allocate IP address for kube-dns addon from the given CIDR (%q) [%v]", s.Networking.ServiceSubnet, err)
return nil, fmt.Errorf("unable to allocate IP address for kube-dns addon from the given CIDR (%q) [%v]", cfg.Networking.ServiceSubnet, err)
}
svc := &api.ServiceSpec{
@ -236,11 +236,11 @@ func createKubeDNSServiceSpec(s *kubeadmapi.MasterConfiguration) (*api.ServiceSp
return svc, nil
}
func CreateEssentialAddons(s *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error {
func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error {
arches := [3]string{"amd64", "arm", "arm64"}
for _, arch := range arches {
kubeProxyDaemonSet := NewDaemonSet(kubeProxy+"-"+arch, createKubeProxyPodSpec(s, arch))
kubeProxyDaemonSet := NewDaemonSet(kubeProxy+"-"+arch, createKubeProxyPodSpec(cfg, arch))
SetMasterTaintTolerations(&kubeProxyDaemonSet.Spec.Template.ObjectMeta)
if _, err := client.Extensions().DaemonSets(api.NamespaceSystem).Create(kubeProxyDaemonSet); err != nil {
@ -250,14 +250,14 @@ func CreateEssentialAddons(s *kubeadmapi.MasterConfiguration, client *clientset.
fmt.Println("<master/addons> created essential addon: kube-proxy")
kubeDNSDeployment := NewDeployment("kube-dns", 1, createKubeDNSPodSpec(s))
kubeDNSDeployment := NewDeployment("kube-dns", 1, createKubeDNSPodSpec(cfg))
SetMasterTaintTolerations(&kubeDNSDeployment.Spec.Template.ObjectMeta)
if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(kubeDNSDeployment); err != nil {
return fmt.Errorf("<master/addons> failed creating essential kube-dns addon [%v]", err)
}
kubeDNSServiceSpec, err := createKubeDNSServiceSpec(s)
kubeDNSServiceSpec, err := createKubeDNSServiceSpec(cfg)
if err != nil {
return fmt.Errorf("<master/addons> failed creating essential kube-dns addon - %v", err)
}

View File

@ -40,18 +40,18 @@ const (
kubeDiscoverySecretName = "clusterinfo"
)
func encodeKubeDiscoverySecretData(s *kubeadmapi.MasterConfiguration, caCert *x509.Certificate) map[string][]byte {
func encodeKubeDiscoverySecretData(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certificate) map[string][]byte {
var (
data = map[string][]byte{}
endpointList = []string{}
tokenMap = map[string]string{}
)
for _, addr := range s.API.AdvertiseAddresses {
endpointList = append(endpointList, fmt.Sprintf("https://%s:443", addr))
for _, addr := range cfg.API.AdvertiseAddresses {
endpointList = append(endpointList, fmt.Sprintf("https://%s:%d", addr, cfg.API.BindPort))
}
tokenMap[s.Secrets.TokenID] = s.Secrets.BearerToken
tokenMap[cfg.Secrets.TokenID] = cfg.Secrets.BearerToken
data["endpoint-list.json"], _ = json.Marshal(endpointList)
data["token-map.json"], _ = json.Marshal(tokenMap)
@ -60,7 +60,7 @@ func encodeKubeDiscoverySecretData(s *kubeadmapi.MasterConfiguration, caCert *x5
return data
}
func newKubeDiscoveryPodSpec() api.PodSpec {
func newKubeDiscoveryPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
envParams := kubeadmapi.GetEnvParams()
return api.PodSpec{
// We have to use host network namespace, as `HostPort`/`HostIP` are Docker's
@ -80,7 +80,7 @@ func newKubeDiscoveryPodSpec() api.PodSpec {
Ports: []api.ContainerPort{
// TODO when CNI issue (#31307) is resolved, we should consider adding
// `HostIP: s.API.AdvertiseAddrs[0]`, if there is only one address`
{Name: "http", ContainerPort: 9898, HostPort: 9898},
{Name: "http", ContainerPort: kubeadmapi.DefaultDiscoveryBindPort, HostPort: cfg.Discovery.BindPort},
},
SecurityContext: &api.SecurityContext{
SELinuxOptions: &api.SELinuxOptions{
@ -101,13 +101,13 @@ func newKubeDiscoveryPodSpec() api.PodSpec {
}
}
func newKubeDiscovery(s *kubeadmapi.MasterConfiguration, caCert *x509.Certificate) kubeDiscovery {
func newKubeDiscovery(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certificate) kubeDiscovery {
kd := kubeDiscovery{
Deployment: NewDeployment(kubeDiscoveryName, 1, newKubeDiscoveryPodSpec()),
Deployment: NewDeployment(kubeDiscoveryName, 1, newKubeDiscoveryPodSpec(cfg)),
Secret: &api.Secret{
ObjectMeta: api.ObjectMeta{Name: kubeDiscoverySecretName},
Type: api.SecretTypeOpaque,
Data: encodeKubeDiscoverySecretData(s, caCert),
Data: encodeKubeDiscoverySecretData(cfg, caCert),
},
}
@ -117,8 +117,8 @@ func newKubeDiscovery(s *kubeadmapi.MasterConfiguration, caCert *x509.Certificat
return kd
}
func CreateDiscoveryDeploymentAndSecret(s *kubeadmapi.MasterConfiguration, client *clientset.Clientset, caCert *x509.Certificate) error {
kd := newKubeDiscovery(s, caCert)
func CreateDiscoveryDeploymentAndSecret(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, caCert *x509.Certificate) error {
kd := newKubeDiscovery(cfg, caCert)
if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(kd.Deployment); err != nil {
return fmt.Errorf("<master/discovery> failed to create %q deployment [%v]", kubeDiscoveryName, err)

View File

@ -22,19 +22,20 @@ import (
"fmt"
// TODO: "k8s.io/client-go/client/tools/clientcmd/api"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
certutil "k8s.io/kubernetes/pkg/util/cert"
)
func CreateCertsAndConfigForClients(advertiseAddresses, clientNames []string, caKey *rsa.PrivateKey, caCert *x509.Certificate) (map[string]*clientcmdapi.Config, error) {
func CreateCertsAndConfigForClients(cfg kubeadmapi.API, clientNames []string, caKey *rsa.PrivateKey, caCert *x509.Certificate) (map[string]*clientcmdapi.Config, error) {
basicClientConfig := kubeadmutil.CreateBasicClientConfig(
"kubernetes",
// TODO this is not great, but there is only one address we can use here
// so we'll pick the first one, there is much of chance to have an empty
// slice by the time this gets called
fmt.Sprintf("https://%s:443", advertiseAddresses[0]),
fmt.Sprintf("https://%s:%d", cfg.AdvertiseAddresses[0], cfg.BindPort),
certutil.EncodeCertPEM(caCert),
)

View File

@ -53,37 +53,37 @@ const (
// WriteStaticPodManifests builds manifest objects based on user provided configuration and then dumps it to disk
// where kubelet will pick and schedule them.
func WriteStaticPodManifests(s *kubeadmapi.MasterConfiguration) error {
func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
envParams := kubeadmapi.GetEnvParams()
// Prepare static pod specs
staticPodSpecs := map[string]api.Pod{
kubeAPIServer: componentPod(api.Container{
Name: kubeAPIServer,
Image: images.GetCoreImage(images.KubeAPIServerImage, s, envParams["hyperkube_image"]),
Command: getComponentCommand(apiServer, s),
Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, envParams["hyperkube_image"]),
Command: getComponentCommand(apiServer, cfg),
VolumeMounts: []api.VolumeMount{certsVolumeMount(), k8sVolumeMount()},
LivenessProbe: componentProbe(8080, "/healthz"),
Resources: componentResources("250m"),
}, certsVolume(s), k8sVolume(s)),
}, certsVolume(cfg), k8sVolume(cfg)),
kubeControllerManager: componentPod(api.Container{
Name: kubeControllerManager,
Image: images.GetCoreImage(images.KubeControllerManagerImage, s, envParams["hyperkube_image"]),
Command: getComponentCommand(controllerManager, s),
Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, envParams["hyperkube_image"]),
Command: getComponentCommand(controllerManager, cfg),
VolumeMounts: []api.VolumeMount{certsVolumeMount(), k8sVolumeMount()},
LivenessProbe: componentProbe(10252, "/healthz"),
Resources: componentResources("200m"),
}, certsVolume(s), k8sVolume(s)),
}, certsVolume(cfg), k8sVolume(cfg)),
kubeScheduler: componentPod(api.Container{
Name: kubeScheduler,
Image: images.GetCoreImage(images.KubeSchedulerImage, s, envParams["hyperkube_image"]),
Command: getComponentCommand(scheduler, s),
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, envParams["hyperkube_image"]),
Command: getComponentCommand(scheduler, cfg),
LivenessProbe: componentProbe(10251, "/healthz"),
Resources: componentResources("100m"),
}),
}
// Add etcd static pod spec only if external etcd is not configured
if len(s.Etcd.Endpoints) == 0 {
if len(cfg.Etcd.Endpoints) == 0 {
staticPodSpecs[etcd] = componentPod(api.Container{
Name: etcd,
Command: []string{
@ -93,7 +93,7 @@ func WriteStaticPodManifests(s *kubeadmapi.MasterConfiguration) error {
"--data-dir=/var/etcd/data",
},
VolumeMounts: []api.VolumeMount{certsVolumeMount(), etcdVolumeMount(), k8sVolumeMount()},
Image: images.GetCoreImage(images.KubeEtcdImage, s, envParams["etcd_image"]),
Image: images.GetCoreImage(images.KubeEtcdImage, cfg, envParams["etcd_image"]),
LivenessProbe: componentProbe(2379, "/health"),
Resources: componentResources("200m"),
SecurityContext: &api.SecurityContext{
@ -105,7 +105,7 @@ func WriteStaticPodManifests(s *kubeadmapi.MasterConfiguration) error {
Type: "unconfined_t",
},
},
}, certsVolume(s), etcdVolume(s), k8sVolume(s))
}, certsVolume(cfg), etcdVolume(cfg), k8sVolume(cfg))
}
manifestsPath := path.Join(envParams["kubernetes_dir"], "manifests")
@ -126,7 +126,7 @@ func WriteStaticPodManifests(s *kubeadmapi.MasterConfiguration) error {
}
// etcdVolume exposes a path on the host in order to guarantee data survival during reboot.
func etcdVolume(s *kubeadmapi.MasterConfiguration) api.Volume {
func etcdVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
envParams := kubeadmapi.GetEnvParams()
return api.Volume{
Name: "etcd",
@ -144,7 +144,7 @@ func etcdVolumeMount() api.VolumeMount {
}
// certsVolume exposes host SSL certificates to pod containers.
func certsVolume(s *kubeadmapi.MasterConfiguration) api.Volume {
func certsVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
return api.Volume{
Name: "certs",
VolumeSource: api.VolumeSource{
@ -161,7 +161,7 @@ func certsVolumeMount() api.VolumeMount {
}
}
func k8sVolume(s *kubeadmapi.MasterConfiguration) api.Volume {
func k8sVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
envParams := kubeadmapi.GetEnvParams()
return api.Volume{
Name: "pki",
@ -221,18 +221,18 @@ func componentPod(container api.Container, volumes ...api.Volume) api.Pod {
}
}
func getComponentCommand(component string, s *kubeadmapi.MasterConfiguration) (command []string) {
func getComponentCommand(component string, cfg *kubeadmapi.MasterConfiguration) (command []string) {
baseFlags := map[string][]string{
apiServer: {
"--insecure-bind-address=127.0.0.1",
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
"--service-cluster-ip-range=" + s.Networking.ServiceSubnet,
"--service-cluster-ip-range=" + cfg.Networking.ServiceSubnet,
"--service-account-key-file=" + pkiDir + "/apiserver-key.pem",
"--client-ca-file=" + pkiDir + "/ca.pem",
"--tls-cert-file=" + pkiDir + "/apiserver.pem",
"--tls-private-key-file=" + pkiDir + "/apiserver-key.pem",
"--token-auth-file=" + pkiDir + "/tokens.csv",
"--secure-port=443",
fmt.Sprintf("--secure-port=%d", cfg.API.BindPort),
"--allow-privileged",
},
controllerManager: {
@ -266,30 +266,30 @@ func getComponentCommand(component string, s *kubeadmapi.MasterConfiguration) (c
if component == apiServer {
// Use first address we are given
if len(s.API.AdvertiseAddresses) > 0 {
command = append(command, fmt.Sprintf("--advertise-address=%s", s.API.AdvertiseAddresses[0]))
if len(cfg.API.AdvertiseAddresses) > 0 {
command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0]))
}
// Check if the user decided to use an external etcd cluster
if len(s.Etcd.Endpoints) > 0 {
command = append(command, fmt.Sprintf("--etcd-servers=%s", strings.Join(s.Etcd.Endpoints, ",")))
if len(cfg.Etcd.Endpoints) > 0 {
command = append(command, fmt.Sprintf("--etcd-servers=%s", strings.Join(cfg.Etcd.Endpoints, ",")))
} else {
command = append(command, "--etcd-servers=http://127.0.0.1:2379")
}
// Is etcd secured?
if s.Etcd.CAFile != "" {
command = append(command, fmt.Sprintf("--etcd-cafile=%s", s.Etcd.CAFile))
if cfg.Etcd.CAFile != "" {
command = append(command, fmt.Sprintf("--etcd-cafile=%s", cfg.Etcd.CAFile))
}
if s.Etcd.CertFile != "" && s.Etcd.KeyFile != "" {
etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", s.Etcd.CertFile)
etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", s.Etcd.KeyFile)
if cfg.Etcd.CertFile != "" && cfg.Etcd.KeyFile != "" {
etcdClientFileArg := fmt.Sprintf("--etcd-certfile=%s", cfg.Etcd.CertFile)
etcdKeyFileArg := fmt.Sprintf("--etcd-keyfile=%s", cfg.Etcd.KeyFile)
command = append(command, etcdClientFileArg, etcdKeyFileArg)
}
}
if component == controllerManager {
if s.CloudProvider != "" {
command = append(command, "--cloud-provider="+s.CloudProvider)
if cfg.CloudProvider != "" {
command = append(command, "--cloud-provider="+cfg.CloudProvider)
// Only append the --cloud-config option if there's a such file
// TODO(phase1+) this won't work unless it's in one of the few directories we bind-mount
@ -299,8 +299,8 @@ func getComponentCommand(component string, s *kubeadmapi.MasterConfiguration) (c
}
// Let the controller-manager allocate Node CIDRs for the Pod network.
// Each node will get a subspace of the address CIDR provided with --pod-network-cidr.
if s.Networking.PodSubnet != "" {
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+s.Networking.PodSubnet)
if cfg.Networking.PodSubnet != "" {
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet)
}
}

View File

@ -46,7 +46,7 @@ func newCertificateAuthority() (*rsa.PrivateKey, *x509.Certificate, error) {
return key, cert, nil
}
func newServerKeyAndCert(s *kubeadmapi.MasterConfiguration, caCert *x509.Certificate, caKey *rsa.PrivateKey, altNames certutil.AltNames) (*rsa.PrivateKey, *x509.Certificate, error) {
func newServerKeyAndCert(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certificate, caKey *rsa.PrivateKey, altNames certutil.AltNames) (*rsa.PrivateKey, *x509.Certificate, error) {
key, err := certutil.NewPrivateKey()
if err != nil {
return nil, nil, fmt.Errorf("unabel to create private key [%v]", err)
@ -56,16 +56,16 @@ func newServerKeyAndCert(s *kubeadmapi.MasterConfiguration, caCert *x509.Certifi
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
fmt.Sprintf("kubernetes.default.svc.%s", s.Networking.DNSDomain),
fmt.Sprintf("kubernetes.default.svc.%s", cfg.Networking.DNSDomain),
}
_, n, err := net.ParseCIDR(s.Networking.ServiceSubnet)
_, n, err := net.ParseCIDR(cfg.Networking.ServiceSubnet)
if err != nil {
return nil, nil, fmt.Errorf("error parsing CIDR %q: %v", s.Networking.ServiceSubnet, err)
return nil, nil, fmt.Errorf("error parsing CIDR %q: %v", cfg.Networking.ServiceSubnet, err)
}
internalAPIServerVirtualIP, err := ipallocator.GetIndexedIP(n, 1)
if err != nil {
return nil, nil, fmt.Errorf("unable to allocate IP address for the API server from the given CIDR (%q) [%v]", &s.Networking.ServiceSubnet, err)
return nil, nil, fmt.Errorf("unable to allocate IP address for the API server from the given CIDR (%q) [%v]", &cfg.Networking.ServiceSubnet, err)
}
altNames.IPs = append(altNames.IPs, internalAPIServerVirtualIP)
@ -143,20 +143,20 @@ func newServiceAccountKey() (*rsa.PrivateKey, error) {
// It first generates a self-signed CA certificate, a server certificate (signed by the CA) and a key for
// signing service account tokens. It returns CA key and certificate, which is convenient for use with
// client config funcs.
func CreatePKIAssets(s *kubeadmapi.MasterConfiguration) (*rsa.PrivateKey, *x509.Certificate, error) {
func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration) (*rsa.PrivateKey, *x509.Certificate, error) {
var (
err error
altNames certutil.AltNames
)
for _, a := range s.API.AdvertiseAddresses {
for _, a := range cfg.API.AdvertiseAddresses {
if ip := net.ParseIP(a); ip != nil {
altNames.IPs = append(altNames.IPs, ip)
} else {
return nil, nil, fmt.Errorf("could not parse ip %q", a)
}
}
altNames.DNSNames = append(altNames.DNSNames, s.API.ExternalDNSNames...)
altNames.DNSNames = append(altNames.DNSNames, cfg.API.ExternalDNSNames...)
pkiPath := path.Join(kubeadmapi.GetEnvParams()["host_pki_path"])
@ -172,7 +172,7 @@ func CreatePKIAssets(s *kubeadmapi.MasterConfiguration) (*rsa.PrivateKey, *x509.
pub, prv, cert := pathsKeysCerts(pkiPath, "ca")
fmt.Printf("Public: %s\nPrivate: %s\nCert: %s\n", pub, prv, cert)
apiKey, apiCert, err := newServerKeyAndCert(s, caCert, caKey, altNames)
apiKey, apiCert, err := newServerKeyAndCert(cfg, caCert, caKey, altNames)
if err != nil {
return nil, nil, fmt.Errorf("<master/pki> failure while creating API server keys and certificate - %v", err)
}

View File

@ -33,7 +33,7 @@ import (
const discoveryRetryTimeout = 5 * time.Second
func RetrieveTrustedClusterInfo(s *kubeadmapi.NodeConfiguration) (*kubeadmapi.ClusterInfo, error) {
host, port := s.MasterAddresses[0], 9898
host, port := s.MasterAddresses[0], s.DiscoveryPort
requestURL := fmt.Sprintf("http://%s:%d/cluster-info/v1/?token-id=%s", host, port, s.Secrets.TokenID)
req, err := http.NewRequest("GET", requestURL, nil)
if err != nil {

View File

@ -24,6 +24,7 @@ import (
"os"
"os/exec"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
"k8s.io/kubernetes/pkg/util/initsystem"
)
@ -156,15 +157,16 @@ func (ipc InPathCheck) Check() (warnings, errors []error) {
return nil, nil
}
func RunInitMasterChecks() error {
func RunInitMasterChecks(cfg *kubeadmapi.MasterConfiguration) error {
// TODO: Some of these ports should come from kubeadm config eventually:
checks := []PreFlightCheck{
IsRootCheck{root: true},
ServiceCheck{Service: "kubelet"},
ServiceCheck{Service: "docker"},
PortOpenCheck{port: 443},
PortOpenCheck{port: int(cfg.API.BindPort)},
PortOpenCheck{port: 2379},
PortOpenCheck{port: 8080},
PortOpenCheck{port: int(cfg.Discovery.BindPort)},
PortOpenCheck{port: 10250},
PortOpenCheck{port: 10251},
PortOpenCheck{port: 10252},

View File

@ -11,22 +11,26 @@ allow-privileged
allowed-not-ready-nodes
anonymous-auth
api-advertise-addresses
api-external-dns-names
api-burst
api-external-dns-names
api-port
api-prefix
api-rate
apiserver-count
api-server-port
api-servers
api-token
api-version
apiserver-count
apiserver-count
audit-log-maxage
audit-log-maxbackup
audit-log-maxsize
audit-log-path
auth-path
auth-path
auth-provider
auth-provider
auth-provider-arg
auth-provider-arg
authentication-token-webhook-cache-ttl
authentication-token-webhook-config-file
@ -36,9 +40,6 @@ authorization-rbac-super-user
authorization-webhook-cache-authorized-ttl
authorization-webhook-cache-unauthorized-ttl
authorization-webhook-config-file
auth-path
auth-provider
auth-provider-arg
babysit-daemons
basic-auth-file
bench-pods
@ -55,8 +56,8 @@ build-tag
cadvisor-port
cert-dir
certificate-authority
cgroup-root
cgroup-driver
cgroup-root
cgroups-per-qos
chaos-chance
clean-start
@ -79,17 +80,17 @@ cluster-monitor-period
cluster-name
cluster-signing-cert-file
cluster-signing-key-file
cluster-tag
cni-bin-dir
cni-conf-dir
cluster-tag
concurrent-deployment-syncs
concurrent-endpoint-syncs
concurrent-gc-syncs
concurrent-namespace-syncs
concurrent-replicaset-syncs
concurrent-resource-quota-syncs
concurrent-serviceaccount-token-syncs
concurrent-service-syncs
concurrent-serviceaccount-token-syncs
config-sync-period
configure-cbr0
configure-cloud-routes
@ -100,10 +101,10 @@ conntrack-tcp-timeout-established
consumer-port
consumer-service-name
consumer-service-namespace
contain-pod-resources
container-port
container-runtime
container-runtime-endpoint
contain-pod-resources
controller-start-interval
cors-allowed-origins
cpu-cfs-quota
@ -129,17 +130,18 @@ deserialization-cache-size
dest-file
disable-filter
disable-kubenet
discovery-port
dns-bind-address
dns-port
dns-provider
dns-provider-config
dockercfg-path
docker-email
docker-endpoint
docker-exec-handler
docker-password
docker-server
docker-username
dockercfg-path
driver-port
drop-embedded-fields
dry-run
@ -153,10 +155,10 @@ enable-debugging-handlers
enable-dynamic-provisioning
enable-garbage-collector
enable-garbage-collector
enable-garbage-collector
enable-hostpath-provisioner
enable-server
enable-swagger-ui
enable-garbage-collector
etcd-address
etcd-cafile
etcd-certfile
@ -204,8 +206,8 @@ federated-api-qps
federated-kube-context
federation-name
file-check-frequency
file_content_in_loop
file-suffix
file_content_in_loop
flex-volume-plugin-dir
forward-services
framework-name
@ -215,6 +217,7 @@ from-file
from-literal
func-dest
fuzz-iters
garbage-collector-enabled
gather-logs-sizes
gather-metrics-at-teardown
gather-resource-usage
@ -234,10 +237,10 @@ healthz-bind-address
healthz-port
horizontal-pod-autoscaler-sync-period
host-ipc-sources
hostname-override
host-network-sources
host-pid-sources
host-port-endpoints
hostname-override
http-check-frequency
http-port
ignore-daemonsets
@ -249,8 +252,8 @@ image-project
image-pull-policy
image-service-endpoint
include-extended-apis
included-types-overrides
include-extended-apis
included-types-overrides
input-base
input-dirs
insecure-allow-any-token
@ -286,6 +289,10 @@ kops-zones
kube-api-burst
kube-api-content-type
kube-api-qps
kube-master
kube-master
kube-master-url
kube-reserved
kubecfg-file
kubectl-path
kubelet-address
@ -307,10 +314,6 @@ kubelet-read-only-port
kubelet-root-dir
kubelet-sync-frequency
kubelet-timeout
kube-master
kube-master
kube-master-url
kube-reserved
kubernetes-service-node-port
label-columns
large-cluster-size-threshold
@ -337,8 +340,6 @@ master-os-distro
master-service-namespace
max-concurrency
max-connection-bytes-per-sec
maximum-dead-containers
maximum-dead-containers-per-container
max-log-age
max-log-backups
max-log-size
@ -347,6 +348,8 @@ max-outgoing-burst
max-outgoing-qps
max-pods
max-requests-inflight
maximum-dead-containers
maximum-dead-containers-per-container
mesos-authentication-principal
mesos-authentication-provider
mesos-authentication-secret-file
@ -360,20 +363,22 @@ mesos-launch-grace-period
mesos-master
mesos-sandbox-overlay
mesos-user
min-pr-number
min-request-timeout
min-resync-period
minimum-container-ttl-duration
minimum-image-ttl-duration
minion-max-log-age
minion-max-log-backups
minion-max-log-size
minion-path-override
min-pr-number
min-request-timeout
min-resync-period
namespace-sync-period
network-plugin
network-plugin-dir
network-plugin-mtu
no-headers
no-headers
no-suggestions
no-suggestions
node-cidr-mask-size
node-eviction-rate
@ -392,9 +397,7 @@ node-port
node-startup-grace-period
node-status-update-frequency
node-sync-period
no-headers
non-masquerade-cidr
no-suggestions
num-nodes
oidc-ca-file
oidc-client-id
@ -403,6 +406,7 @@ oidc-issuer-url
oidc-username-claim
only-idl
oom-score-adj
out-version
outofdisk-transition-frequency
output-base
output-directory
@ -410,7 +414,6 @@ output-file-base
output-package
output-print-type
output-version
out-version
path-override
pod-cidr
pod-eviction-timeout
@ -433,7 +436,6 @@ proxy-logv
proxy-mode
proxy-port-range
public-address-override
pvclaimbinder-sync-period
pv-recycler-increment-timeout-nfs
pv-recycler-maximum-retry
pv-recycler-minimum-timeout-hostpath
@ -441,6 +443,7 @@ pv-recycler-minimum-timeout-nfs
pv-recycler-pod-template-filepath-hostpath
pv-recycler-pod-template-filepath-nfs
pv-recycler-timeout-increment-hostpath
pvclaimbinder-sync-period
read-only-port
really-crash-for-testing
reconcile-cidr
@ -459,8 +462,8 @@ replication-controller-lookup-cache-size
repo-root
report-dir
report-prefix
required-contexts
require-kubeconfig
required-contexts
resolv-conf
resource-container
resource-quota-sync-period
@ -494,8 +497,8 @@ service-account-key-file
service-account-lookup
service-account-private-key-file
service-address
service-cluster-ip-range
service-cidr
service-cluster-ip-range
service-dns-domain
service-generator
service-node-port-range
@ -545,9 +548,9 @@ test-timeout
tls-ca-file
tls-cert-file
tls-private-key-file
to-version
token-auth-file
ttl-keys-prefix
to-version
ttl-secs
type-src
udp-port
@ -561,6 +564,7 @@ use-kubernetes-cluster-service
use-kubernetes-version
user-whitelist
verify-only
viper-config
volume-dir
volume-plugin-dir
volume-stats-agg-period