Merge pull request #44143 from ivan4th/fix-panic-in-kubeadm-master-node-setup

Automatic merge from submit-queue (batch tested with PRs 44143, 44133)

Fix panic in kubeadm master node setup

The problem was [caught](https://travis-ci.org/Mirantis/kubeadm-dind-cluster/jobs/218999640#L3249) by kubeadm-dind-cluster CI.
```
[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.
[init] Using Kubernetes version: v1.6.1
[init] Using Authorization mode: RBAC
[preflight] Skipping pre-flight checks
[certificates] Generated CA certificate and key.
[certificates] Generated API server certificate and key.
[certificates] API Server serving cert is signed for DNS names [kube-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 10.192.0.2]
[certificates] Generated API server kubelet client certificate and key.
[certificates] Generated service account token signing key and public key.
[certificates] Generated front-proxy CA certificate and key.
[certificates] Generated front-proxy client certificate and key.
[certificates] Valid certificates and keys now exist in "/etc/kubernetes/pki"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/scheduler.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "/etc/kubernetes/controller-manager.conf"
[apiclient] Created API client, waiting for the control plane to become ready
[apiclient] All control plane components are healthy after 19.017839 seconds
panic: assignment to entry in nil map

goroutine 1 [running]:
panic(0x1b62140, 0xc4203f0380)
	/usr/local/go/src/runtime/panic.go:500 +0x1a1
k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig.attemptToUpdateMasterRoleLabelsAndTaints(0xc420b18be0, 0x4e, 0x0)
	/go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/setupmaster.go:57 +0x15b
k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig.UpdateMasterRoleLabelsAndTaints(0xc420b18be0, 0x1a, 0xc420b18be0)
	/go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/setupmaster.go:86 +0x2f
k8s.io/kubernetes/cmd/kubeadm/app/cmd.(*Init).Run(0xc4201a4040, 0x29886e0, 0xc420022010, 0x1c73d01, 0xc4201a4040)
	/go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/cmd/kubeadm/app/cmd/init.go:220 +0x29c
k8s.io/kubernetes/cmd/kubeadm/app/cmd.NewCmdInit.func1(0xc4203a46c0, 0xc420660680, 0x0, 0x2)
	/go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/cmd/kubeadm/app/cmd/init.go:86 +0x197
k8s.io/kubernetes/vendor/github.com/spf13/cobra.(*Command).execute(0xc4203a46c0, 0xc420660560, 0x2, 0x2, 0xc4203a46c0, 0xc420660560)
	/go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/spf13/cobra/command.go:603 +0x439
k8s.io/kubernetes/vendor/github.com/spf13/cobra.(*Command).ExecuteC(0xc4203b1d40, 0xc4203a4b40, 0xc4203a46c0, 0xc4203a4000)
	/go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/spf13/cobra/command.go:689 +0x367
k8s.io/kubernetes/vendor/github.com/spf13/cobra.(*Command).Execute(0xc4203b1d40, 0xc42046c420, 0x29886a0)
	/go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/vendor/github.com/spf13/cobra/command.go:648 +0x2b
k8s.io/kubernetes/cmd/kubeadm/app.Run(0xc420627f70, 0xc4200001a0)
	/go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/cmd/kubeadm/app/kubeadm.go:35 +0xe8
main.main()
	/go/src/k8s.io/kubernetes/_output/dockerized/go/src/k8s.io/kubernetes/cmd/kubeadm/kubeadm.go:26 +0x22
```
This commit is contained in:
Kubernetes Submit Queue 2017-04-06 11:19:22 -07:00 committed by GitHub
commit a30339ba49

View File

@ -43,9 +43,12 @@ func attemptToUpdateMasterRoleLabelsAndTaints(client *clientset.Clientset) error
wait.PollInfinite(kubeadmconstants.APICallRetryInterval, func() (bool, error) {
var err error
if n, err = client.Nodes().Get(node.GetHostname(""), metav1.GetOptions{}); err != nil {
return true, nil
return false, nil
}
return false, nil
// The node may appear to have no labels at first,
// so we wait for it to get hostname label.
_, found := n.ObjectMeta.Labels[metav1.LabelHostname]
return found, nil
})
oldData, err := json.Marshal(n)