mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-31 23:37:01 +00:00
cmd/kubeadm
This commit is contained in:
parent
eca157588d
commit
e3cf383181
@ -25,21 +25,23 @@ import (
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/images"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
ipallocator "k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
func createKubeProxyPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
func createKubeProxyPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
|
||||
privilegedTrue := true
|
||||
return api.PodSpec{
|
||||
SecurityContext: &api.PodSecurityContext{HostNetwork: true},
|
||||
Containers: []api.Container{{
|
||||
return v1.PodSpec{
|
||||
HostNetwork: true,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Containers: []v1.Container{{
|
||||
Name: kubeProxy,
|
||||
Image: images.GetCoreImage(images.KubeProxyImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||
Command: append(getProxyCommand(cfg), "--kubeconfig=/run/kubeconfig"),
|
||||
SecurityContext: &api.SecurityContext{Privileged: &privilegedTrue},
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
SecurityContext: &v1.SecurityContext{Privileged: &privilegedTrue},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "dbus",
|
||||
MountPath: "/var/run/dbus",
|
||||
@ -62,33 +64,33 @@ func createKubeProxyPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
},
|
||||
},
|
||||
}},
|
||||
Volumes: []api.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "kubeconfig",
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "kubelet.conf")},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "kubelet.conf")},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "dbus",
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{Path: "/var/run/dbus"},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: "/var/run/dbus"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
|
||||
|
||||
dnsPodResources := api.ResourceList{
|
||||
api.ResourceName(api.ResourceCPU): resource.MustParse("100m"),
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("170Mi"),
|
||||
dnsPodResources := v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("100m"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("170Mi"),
|
||||
}
|
||||
|
||||
healthzPodResources := api.ResourceList{
|
||||
api.ResourceName(api.ResourceCPU): resource.MustParse("10m"),
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("50Mi"),
|
||||
healthzPodResources := v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10m"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("50Mi"),
|
||||
}
|
||||
|
||||
kubeDNSPort := int32(10053)
|
||||
@ -101,13 +103,13 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
nslookup, kubeDNSPort,
|
||||
)
|
||||
|
||||
return api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
return v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
// DNS server
|
||||
{
|
||||
Name: "kube-dns",
|
||||
Image: images.GetAddonImage(images.KubeDNSImage),
|
||||
Resources: api.ResourceRequirements{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: dnsPodResources,
|
||||
Requests: dnsPodResources,
|
||||
},
|
||||
@ -116,12 +118,12 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
fmt.Sprintf("--dns-port=%d", kubeDNSPort),
|
||||
// TODO __PILLAR__FEDERATIONS__DOMAIN__MAP__
|
||||
},
|
||||
LivenessProbe: &api.Probe{
|
||||
Handler: api.Handler{
|
||||
HTTPGet: &api.HTTPGetAction{
|
||||
LivenessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/healthz",
|
||||
Port: intstr.FromInt(8080),
|
||||
Scheme: api.URISchemeHTTP,
|
||||
Scheme: v1.URISchemeHTTP,
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 60,
|
||||
@ -131,27 +133,27 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
},
|
||||
// # we poll on pod startup for the Kubernetes master service and
|
||||
// # only setup the /readiness HTTP server once that's available.
|
||||
ReadinessProbe: &api.Probe{
|
||||
Handler: api.Handler{
|
||||
HTTPGet: &api.HTTPGetAction{
|
||||
ReadinessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/readiness",
|
||||
Port: intstr.FromInt(8081),
|
||||
Scheme: api.URISchemeHTTP,
|
||||
Scheme: v1.URISchemeHTTP,
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 30,
|
||||
TimeoutSeconds: 5,
|
||||
},
|
||||
Ports: []api.ContainerPort{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: kubeDNSPort,
|
||||
Name: "dns-local",
|
||||
Protocol: api.ProtocolUDP,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
},
|
||||
{
|
||||
ContainerPort: kubeDNSPort,
|
||||
Name: "dns-tcp-local",
|
||||
Protocol: api.ProtocolTCP,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -159,7 +161,7 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
{
|
||||
Name: "dnsmasq",
|
||||
Image: images.GetAddonImage(images.KubeDNSmasqImage),
|
||||
Resources: api.ResourceRequirements{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: dnsPodResources,
|
||||
Requests: dnsPodResources,
|
||||
},
|
||||
@ -168,16 +170,16 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
"--no-resolv",
|
||||
fmt.Sprintf("--server=127.0.0.1#%d", kubeDNSPort),
|
||||
},
|
||||
Ports: []api.ContainerPort{
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
ContainerPort: dnsmasqPort,
|
||||
Name: "dns",
|
||||
Protocol: api.ProtocolUDP,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
},
|
||||
{
|
||||
ContainerPort: dnsmasqPort,
|
||||
Name: "dns-tcp",
|
||||
Protocol: api.ProtocolTCP,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -185,7 +187,7 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
{
|
||||
Name: "healthz",
|
||||
Image: images.GetAddonImage(images.KubeExechealthzImage),
|
||||
Resources: api.ResourceRequirements{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: healthzPodResources,
|
||||
Requests: healthzPodResources,
|
||||
},
|
||||
@ -194,18 +196,18 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
"-port=8080",
|
||||
"-quiet",
|
||||
},
|
||||
Ports: []api.ContainerPort{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
ContainerPort: 8080,
|
||||
Protocol: api.ProtocolTCP,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}},
|
||||
},
|
||||
},
|
||||
DNSPolicy: api.DNSDefault,
|
||||
DNSPolicy: v1.DNSDefault,
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func createKubeDNSServiceSpec(cfg *kubeadmapi.MasterConfiguration) (*api.ServiceSpec, error) {
|
||||
func createKubeDNSServiceSpec(cfg *kubeadmapi.MasterConfiguration) (*v1.ServiceSpec, error) {
|
||||
_, n, err := net.ParseCIDR(cfg.Networking.ServiceSubnet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not parse %q: %v", cfg.Networking.ServiceSubnet, err)
|
||||
@ -215,11 +217,11 @@ func createKubeDNSServiceSpec(cfg *kubeadmapi.MasterConfiguration) (*api.Service
|
||||
return nil, fmt.Errorf("unable to allocate IP address for kube-dns addon from the given CIDR (%q) [%v]", cfg.Networking.ServiceSubnet, err)
|
||||
}
|
||||
|
||||
svc := &api.ServiceSpec{
|
||||
svc := &v1.ServiceSpec{
|
||||
Selector: map[string]string{"name": "kube-dns"},
|
||||
Ports: []api.ServicePort{
|
||||
{Name: "dns", Port: 53, Protocol: api.ProtocolUDP},
|
||||
{Name: "dns-tcp", Port: 53, Protocol: api.ProtocolTCP},
|
||||
Ports: []v1.ServicePort{
|
||||
{Name: "dns", Port: 53, Protocol: v1.ProtocolUDP},
|
||||
{Name: "dns-tcp", Port: 53, Protocol: v1.ProtocolTCP},
|
||||
},
|
||||
ClusterIP: ip.String(),
|
||||
}
|
||||
|
@ -26,8 +26,9 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrs "k8s.io/kubernetes/pkg/api/errors"
|
||||
unversionedapi "k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@ -55,7 +56,7 @@ func CreateClientAndWaitForAPI(adminConfig *clientcmdapi.Config) (*clientset.Cli
|
||||
|
||||
start := time.Now()
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
cs, err := client.ComponentStatuses().List(api.ListOptions{})
|
||||
cs, err := client.ComponentStatuses().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
@ -66,7 +67,7 @@ func CreateClientAndWaitForAPI(adminConfig *clientcmdapi.Config) (*clientset.Cli
|
||||
}
|
||||
for _, item := range cs.Items {
|
||||
for _, condition := range item.Conditions {
|
||||
if condition.Type != api.ComponentHealthy {
|
||||
if condition.Type != v1.ComponentHealthy {
|
||||
fmt.Printf("<master/apiclient> control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions)
|
||||
return false, nil
|
||||
}
|
||||
@ -80,7 +81,7 @@ func CreateClientAndWaitForAPI(adminConfig *clientcmdapi.Config) (*clientset.Cli
|
||||
fmt.Println("<master/apiclient> waiting for at least one node to register and become ready")
|
||||
start = time.Now()
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
nodeList, err := client.Nodes().List(api.ListOptions{})
|
||||
nodeList, err := client.Nodes().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Println("<master/apiclient> temporarily unable to list nodes (will retry)")
|
||||
return false, nil
|
||||
@ -89,7 +90,7 @@ func CreateClientAndWaitForAPI(adminConfig *clientcmdapi.Config) (*clientset.Cli
|
||||
return false, nil
|
||||
}
|
||||
n := &nodeList.Items[0]
|
||||
if !api.IsNodeReady(n) {
|
||||
if !v1.IsNodeReady(n) {
|
||||
fmt.Println("<master/apiclient> first node has registered, but is not ready yet")
|
||||
return false, nil
|
||||
}
|
||||
@ -110,24 +111,24 @@ func standardLabels(n string) map[string]string {
|
||||
}
|
||||
}
|
||||
|
||||
func NewDaemonSet(daemonName string, podSpec api.PodSpec) *extensions.DaemonSet {
|
||||
func NewDaemonSet(daemonName string, podSpec v1.PodSpec) *extensions.DaemonSet {
|
||||
l := standardLabels(daemonName)
|
||||
return &extensions.DaemonSet{
|
||||
ObjectMeta: api.ObjectMeta{Name: daemonName},
|
||||
ObjectMeta: v1.ObjectMeta{Name: daemonName},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Selector: &unversionedapi.LabelSelector{MatchLabels: l},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{Labels: l},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: l},
|
||||
Spec: podSpec,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewService(serviceName string, spec api.ServiceSpec) *api.Service {
|
||||
func NewService(serviceName string, spec v1.ServiceSpec) *v1.Service {
|
||||
l := standardLabels(serviceName)
|
||||
return &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
return &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Labels: l,
|
||||
},
|
||||
@ -135,15 +136,15 @@ func NewService(serviceName string, spec api.ServiceSpec) *api.Service {
|
||||
}
|
||||
}
|
||||
|
||||
func NewDeployment(deploymentName string, replicas int32, podSpec api.PodSpec) *extensions.Deployment {
|
||||
func NewDeployment(deploymentName string, replicas int32, podSpec v1.PodSpec) *extensions.Deployment {
|
||||
l := standardLabels(deploymentName)
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: api.ObjectMeta{Name: deploymentName},
|
||||
ObjectMeta: v1.ObjectMeta{Name: deploymentName},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: replicas,
|
||||
Replicas: &replicas,
|
||||
Selector: &unversionedapi.LabelSelector{MatchLabels: l},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{Labels: l},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: l},
|
||||
Spec: podSpec,
|
||||
},
|
||||
},
|
||||
@ -152,8 +153,8 @@ func NewDeployment(deploymentName string, replicas int32, podSpec api.PodSpec) *
|
||||
|
||||
// It's safe to do this for alpha, as we don't have HA and there is no way we can get
|
||||
// more then one node here (TODO(phase1+) use os.Hostname)
|
||||
func findMyself(client *clientset.Clientset) (*api.Node, error) {
|
||||
nodeList, err := client.Nodes().List(api.ListOptions{})
|
||||
func findMyself(client *clientset.Clientset) (*v1.Node, error) {
|
||||
nodeList, err := client.Nodes().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list nodes [%v]", err)
|
||||
}
|
||||
@ -173,8 +174,8 @@ func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, sched
|
||||
n.ObjectMeta.Labels[unversionedapi.NodeLabelKubeadmAlphaRole] = unversionedapi.NodeLabelRoleMaster
|
||||
|
||||
if !schedulable {
|
||||
taintsAnnotation, _ := json.Marshal([]api.Taint{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}})
|
||||
n.ObjectMeta.Annotations[api.TaintsAnnotationKey] = string(taintsAnnotation)
|
||||
taintsAnnotation, _ := json.Marshal([]v1.Taint{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}})
|
||||
n.ObjectMeta.Annotations[v1.TaintsAnnotationKey] = string(taintsAnnotation)
|
||||
}
|
||||
|
||||
if _, err := client.Nodes().Update(n); err != nil {
|
||||
@ -199,50 +200,51 @@ func UpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, schedulable bo
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetMasterTaintTolerations(meta *api.ObjectMeta) {
|
||||
tolerationsAnnotation, _ := json.Marshal([]api.Toleration{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}})
|
||||
func SetMasterTaintTolerations(meta *v1.ObjectMeta) {
|
||||
tolerationsAnnotation, _ := json.Marshal([]v1.Toleration{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}})
|
||||
if meta.Annotations == nil {
|
||||
meta.Annotations = map[string]string{}
|
||||
}
|
||||
meta.Annotations[api.TolerationsAnnotationKey] = string(tolerationsAnnotation)
|
||||
meta.Annotations[v1.TolerationsAnnotationKey] = string(tolerationsAnnotation)
|
||||
}
|
||||
|
||||
// SetNodeAffinity is a basic helper to set meta.Annotations[api.AffinityAnnotationKey] for one or more api.NodeSelectorRequirement(s)
|
||||
func SetNodeAffinity(meta *api.ObjectMeta, expr ...api.NodeSelectorRequirement) {
|
||||
nodeAffinity := &api.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &api.NodeSelector{
|
||||
NodeSelectorTerms: []api.NodeSelectorTerm{{MatchExpressions: expr}},
|
||||
// SetNodeAffinity is a basic helper to set meta.Annotations[v1.AffinityAnnotationKey] for one or more v1.NodeSelectorRequirement(s)
|
||||
func SetNodeAffinity(meta *v1.ObjectMeta, expr ...v1.NodeSelectorRequirement) {
|
||||
nodeAffinity := &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{{MatchExpressions: expr}},
|
||||
},
|
||||
}
|
||||
affinityAnnotation, _ := json.Marshal(api.Affinity{NodeAffinity: nodeAffinity})
|
||||
affinityAnnotation, _ := json.Marshal(v1.Affinity{NodeAffinity: nodeAffinity})
|
||||
if meta.Annotations == nil {
|
||||
meta.Annotations = map[string]string{}
|
||||
}
|
||||
meta.Annotations[api.AffinityAnnotationKey] = string(affinityAnnotation)
|
||||
meta.Annotations[v1.AffinityAnnotationKey] = string(affinityAnnotation)
|
||||
}
|
||||
|
||||
// MasterNodeAffinity returns api.NodeSelectorRequirement to be used with SetNodeAffinity to set affinity to master node
|
||||
func MasterNodeAffinity() api.NodeSelectorRequirement {
|
||||
return api.NodeSelectorRequirement{
|
||||
// MasterNodeAffinity returns v1.NodeSelectorRequirement to be used with SetNodeAffinity to set affinity to master node
|
||||
func MasterNodeAffinity() v1.NodeSelectorRequirement {
|
||||
return v1.NodeSelectorRequirement{
|
||||
Key: unversionedapi.NodeLabelKubeadmAlphaRole,
|
||||
Operator: api.NodeSelectorOpIn,
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{unversionedapi.NodeLabelRoleMaster},
|
||||
}
|
||||
}
|
||||
|
||||
// NativeArchitectureNodeAffinity returns api.NodeSelectorRequirement to be used with SetNodeAffinity to nodes with CPU architecture
|
||||
// NativeArchitectureNodeAffinity returns v1.NodeSelectorRequirement to be used with SetNodeAffinity to nodes with CPU architecture
|
||||
// the same as master node
|
||||
func NativeArchitectureNodeAffinity() api.NodeSelectorRequirement {
|
||||
return api.NodeSelectorRequirement{
|
||||
Key: "beta.kubernetes.io/arch", Operator: api.NodeSelectorOpIn, Values: []string{runtime.GOARCH},
|
||||
func NativeArchitectureNodeAffinity() v1.NodeSelectorRequirement {
|
||||
return v1.NodeSelectorRequirement{
|
||||
Key: "beta.kubernetes.io/arch", Operator: v1.NodeSelectorOpIn, Values: []string{runtime.GOARCH},
|
||||
}
|
||||
}
|
||||
|
||||
func createDummyDeployment(client *clientset.Clientset) {
|
||||
fmt.Println("<master/apiclient> attempting a test deployment")
|
||||
dummyDeployment := NewDeployment("dummy", 1, api.PodSpec{
|
||||
SecurityContext: &api.PodSecurityContext{HostNetwork: true},
|
||||
Containers: []api.Container{{
|
||||
dummyDeployment := NewDeployment("dummy", 1, v1.PodSpec{
|
||||
HostNetwork: true,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Containers: []v1.Container{{
|
||||
Name: "dummy",
|
||||
Image: images.GetAddonImage("pause"),
|
||||
}},
|
||||
@ -271,7 +273,7 @@ func createDummyDeployment(client *clientset.Clientset) {
|
||||
|
||||
fmt.Println("<master/apiclient> test deployment succeeded")
|
||||
|
||||
if err := client.Extensions().Deployments(api.NamespaceSystem).Delete("dummy", &api.DeleteOptions{}); err != nil {
|
||||
if err := client.Extensions().Deployments(api.NamespaceSystem).Delete("dummy", &v1.DeleteOptions{}); err != nil {
|
||||
fmt.Printf("<master/apiclient> failed to delete test deployment [%v] (will ignore)", err)
|
||||
}
|
||||
}
|
||||
|
@ -25,15 +25,16 @@ import (
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
type kubeDiscovery struct {
|
||||
Deployment *extensions.Deployment
|
||||
Secret *api.Secret
|
||||
Secret *v1.Secret
|
||||
}
|
||||
|
||||
const (
|
||||
@ -61,29 +62,30 @@ func encodeKubeDiscoverySecretData(cfg *kubeadmapi.MasterConfiguration, caCert *
|
||||
return data
|
||||
}
|
||||
|
||||
func newKubeDiscoveryPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
return api.PodSpec{
|
||||
func newKubeDiscoveryPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
// We have to use host network namespace, as `HostPort`/`HostIP` are Docker's
|
||||
// buisness and CNI support isn't quite there yet (except for kubenet)
|
||||
// (see https://github.com/kubernetes/kubernetes/issues/31307)
|
||||
// TODO update this when #31307 is resolved
|
||||
SecurityContext: &api.PodSecurityContext{HostNetwork: true},
|
||||
Containers: []api.Container{{
|
||||
HostNetwork: true,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
Containers: []v1.Container{{
|
||||
Name: kubeDiscoveryName,
|
||||
Image: kubeadmapi.GlobalEnvParams.DiscoveryImage,
|
||||
Command: []string{"/usr/local/bin/kube-discovery"},
|
||||
VolumeMounts: []api.VolumeMount{{
|
||||
VolumeMounts: []v1.VolumeMount{{
|
||||
Name: kubeDiscoverySecretName,
|
||||
MountPath: "/tmp/secret", // TODO use a shared constant
|
||||
ReadOnly: true,
|
||||
}},
|
||||
Ports: []api.ContainerPort{
|
||||
Ports: []v1.ContainerPort{
|
||||
// TODO when CNI issue (#31307) is resolved, we should consider adding
|
||||
// `HostIP: s.API.AdvertiseAddrs[0]`, if there is only one address`
|
||||
{Name: "http", ContainerPort: kubeadmapiext.DefaultDiscoveryBindPort, HostPort: cfg.Discovery.BindPort},
|
||||
},
|
||||
SecurityContext: &api.SecurityContext{
|
||||
SELinuxOptions: &api.SELinuxOptions{
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
SELinuxOptions: &v1.SELinuxOptions{
|
||||
// TODO: This implies our discovery container is not being restricted by
|
||||
// SELinux. This is not optimal and would be nice to adjust in future
|
||||
// so it can read /tmp/secret, but for now this avoids recommending
|
||||
@ -92,10 +94,10 @@ func newKubeDiscoveryPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
},
|
||||
},
|
||||
}},
|
||||
Volumes: []api.Volume{{
|
||||
Volumes: []v1.Volume{{
|
||||
Name: kubeDiscoverySecretName,
|
||||
VolumeSource: api.VolumeSource{
|
||||
Secret: &api.SecretVolumeSource{SecretName: kubeDiscoverySecretName},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{SecretName: kubeDiscoverySecretName},
|
||||
}},
|
||||
},
|
||||
}
|
||||
@ -104,9 +106,9 @@ func newKubeDiscoveryPodSpec(cfg *kubeadmapi.MasterConfiguration) api.PodSpec {
|
||||
func newKubeDiscovery(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certificate) kubeDiscovery {
|
||||
kd := kubeDiscovery{
|
||||
Deployment: NewDeployment(kubeDiscoveryName, 1, newKubeDiscoveryPodSpec(cfg)),
|
||||
Secret: &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{Name: kubeDiscoverySecretName},
|
||||
Type: api.SecretTypeOpaque,
|
||||
Secret: &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{Name: kubeDiscoverySecretName},
|
||||
Type: v1.SecretTypeOpaque,
|
||||
Data: encodeKubeDiscoverySecretData(cfg, caCert),
|
||||
},
|
||||
}
|
||||
|
@ -25,8 +25,8 @@ import (
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
certclient "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/certificates/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
certclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/certificates/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@ -34,7 +34,7 @@ import (
|
||||
|
||||
// ConnectionDetails represents a master API endpoint connection
|
||||
type ConnectionDetails struct {
|
||||
CertClient *certclient.CertificatesClient
|
||||
CertClient *certclient.CertificatesV1alpha1Client
|
||||
Endpoint string
|
||||
CACert []byte
|
||||
NodeName types.NodeName
|
||||
@ -82,7 +82,7 @@ func EstablishMasterConnection(s *kubeadmapi.NodeConfiguration, clusterInfo *kub
|
||||
// connection established, stop all wait threads
|
||||
close(stopChan)
|
||||
result <- &ConnectionDetails{
|
||||
CertClient: clientSet.CertificatesClient,
|
||||
CertClient: clientSet.CertificatesV1alpha1Client,
|
||||
Endpoint: apiEndpoint,
|
||||
CACert: caCert,
|
||||
NodeName: nodeName,
|
||||
|
Loading…
Reference in New Issue
Block a user