diff --git a/cmd/kubeadm/app/cmd/upgrade/node.go b/cmd/kubeadm/app/cmd/upgrade/node.go index 662068eb849..29f00401360 100644 --- a/cmd/kubeadm/app/cmd/upgrade/node.go +++ b/cmd/kubeadm/app/cmd/upgrade/node.go @@ -22,14 +22,20 @@ import ( "os" "path/filepath" + "github.com/golang/glog" "github.com/spf13/cobra" + netutil "k8s.io/apimachinery/pkg/util/net" "k8s.io/kubernetes/cmd/kubeadm/app/cmd/options" cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util" "k8s.io/kubernetes/cmd/kubeadm/app/constants" kubeletphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubelet" + "k8s.io/kubernetes/cmd/kubeadm/app/phases/upgrade" kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util" + "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" + configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config" dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun" + "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/normalizer" "k8s.io/kubernetes/pkg/util/version" ) @@ -57,6 +63,13 @@ type nodeUpgradeFlags struct { dryRun bool } +type controlplaneUpgradeFlags struct { + kubeConfigPath string + advertiseAddress string + nodeName string + dryRun bool +} + // NewCmdNode returns the cobra command for `kubeadm upgrade node` func NewCmdNode() *cobra.Command { cmd := &cobra.Command{ @@ -65,6 +78,7 @@ func NewCmdNode() *cobra.Command { RunE: cmdutil.SubCmdRunE("node"), } cmd.AddCommand(NewCmdUpgradeNodeConfig()) + cmd.AddCommand(NewCmdUpgradeControlPlane()) return cmd } @@ -94,6 +108,55 @@ func NewCmdUpgradeNodeConfig() *cobra.Command { return cmd } +// NewCmdUpgradeControlPlane returns the cobra.Command for upgrading the controlplane instance on this node +func NewCmdUpgradeControlPlane() *cobra.Command { + + flags := &controlplaneUpgradeFlags{ + kubeConfigPath: constants.GetKubeletKubeConfigPath(), + advertiseAddress: "", + dryRun: false, + } + + cmd := &cobra.Command{ + Use: "experimental-control-plane", + Short: "Upgrades the control plane instance deployed on this node. IMPORTANT. This command should be executed after executing `kubeadm upgrade apply` on another control plane instance", + Long: upgradeNodeConfigLongDesc, + Example: upgradeNodeConfigExample, + Run: func(cmd *cobra.Command, args []string) { + + if flags.nodeName == "" { + glog.V(1).Infoln("[upgrade] found NodeName empty; considered OS hostname as NodeName") + } + nodeName, err := node.GetHostname(flags.nodeName) + if err != nil { + kubeadmutil.CheckErr(err) + } + flags.nodeName = nodeName + + if flags.advertiseAddress == "" { + ip, err := netutil.ChooseBindAddress(nil) + if err != nil { + kubeadmutil.CheckErr(err) + return + } + + flags.advertiseAddress = ip.String() + } + + err = RunUpgradeControlPlane(flags) + kubeadmutil.CheckErr(err) + }, + } + + options.AddKubeConfigFlag(cmd.Flags(), &flags.kubeConfigPath) + cmd.Flags().BoolVar(&flags.dryRun, "dry-run", flags.dryRun, "Do not change any state, just output the actions that would be performed.") + + //TODO: following values should retrieved form the kubeadm-config config map; remove as soon as the new config wil be in place + cmd.Flags().StringVar(&flags.advertiseAddress, "apiserver-advertise-address", flags.advertiseAddress, "If the node is joining as a master, the IP address the API Server will advertise it's listening on.") + cmd.Flags().StringVar(&flags.nodeName, "node-name", flags.nodeName, "Specify the node name.") + return cmd +} + // RunUpgradeNodeConfig is executed when `kubeadm upgrade node config` runs. func RunUpgradeNodeConfig(flags *nodeUpgradeFlags) error { if len(flags.kubeletVersionStr) == 0 { @@ -156,3 +219,43 @@ func printFilesIfDryRunning(dryRun bool, kubeletDir string) error { } return dryrunutil.PrintDryRunFiles([]dryrunutil.FileToPrint{fileToPrint}, os.Stdout) } + +// RunUpgradeControlPlane is executed when `kubeadm upgrade node controlplane` runs. +func RunUpgradeControlPlane(flags *controlplaneUpgradeFlags) error { + + client, err := getClient(flags.kubeConfigPath, flags.dryRun) + if err != nil { + return fmt.Errorf("Couldn't create a Kubernetes client from file %q: %v", flags.kubeConfigPath, err) + } + + waiter := apiclient.NewKubeWaiter(client, upgrade.UpgradeManifestTimeout, os.Stdout) + + // Fetches the cluster configuration + cfg, err := configutil.FetchConfigFromFileOrCluster(client, os.Stdout, "upgrade", "") + if err != nil { + return fmt.Errorf("Unable to fetch the kubeadm-config ConfigMap: %v", err) + } + + //TODO: as soon as the new config wil be in place check if the node is a known control plane instance + // and retrive corresponding infos (now are temporary managed as flag) + cfg.NodeRegistration.Name = flags.nodeName + cfg.API.AdvertiseAddress = flags.advertiseAddress + + // Rotate API server certificate if needed + if err := upgrade.BackupAPIServerCertIfNeeded(cfg, flags.dryRun); err != nil { + return fmt.Errorf("Unable to rotate API server certificate: %v", err) + } + + // Upgrade the control plane + fmt.Printf("[upgrade] Upgrading your Static Pod-hosted control plane instance to version %q...\n", cfg.KubernetesVersion) + if flags.dryRun { + return DryRunStaticPodUpgrade(cfg) + } + + if err := PerformStaticPodUpgrade(client, waiter, cfg, false); err != nil { + return fmt.Errorf("Couldn't complete the static pod upgrade: %v", err) + } + + fmt.Println("[upgrade] The control plane instance for this node was successfully updated!") + return nil +} diff --git a/cmd/kubeadm/app/phases/upgrade/postupgrade.go b/cmd/kubeadm/app/phases/upgrade/postupgrade.go index dd51e745b95..08f95268cdd 100644 --- a/cmd/kubeadm/app/phases/upgrade/postupgrade.go +++ b/cmd/kubeadm/app/phases/upgrade/postupgrade.go @@ -108,7 +108,7 @@ func PerformPostUpgradeTasks(client clientset.Interface, cfg *kubeadmapi.InitCon } // Rotate the kube-apiserver cert and key if needed - if err := backupAPIServerCertIfNeeded(cfg, dryRun); err != nil { + if err := BackupAPIServerCertIfNeeded(cfg, dryRun); err != nil { errs = append(errs, err) } @@ -172,7 +172,8 @@ func upgradeToSelfHosting(client clientset.Interface, cfg *kubeadmapi.InitConfig return nil } -func backupAPIServerCertIfNeeded(cfg *kubeadmapi.InitConfiguration, dryRun bool) error { +// BackupAPIServerCertIfNeeded rotates the kube-apiserver certificate if older than 180 days +func BackupAPIServerCertIfNeeded(cfg *kubeadmapi.InitConfiguration, dryRun bool) error { certAndKeyDir := kubeadmapiv1alpha3.DefaultCertificatesDir shouldBackup, err := shouldBackupAPIServerCertAndKey(certAndKeyDir) if err != nil { diff --git a/cmd/kubeadm/app/phases/upgrade/prepull.go b/cmd/kubeadm/app/phases/upgrade/prepull.go index 5f872cf01c4..1dabd7daaaa 100644 --- a/cmd/kubeadm/app/phases/upgrade/prepull.go +++ b/cmd/kubeadm/app/phases/upgrade/prepull.go @@ -159,6 +159,11 @@ func buildPrePullDaemonSet(component, image string) *apps.DaemonSet { Namespace: metav1.NamespaceSystem, }, Spec: apps.DaemonSetSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "k8s-app": addPrepullPrefix(component), + }, + }, Template: v1.PodTemplateSpec{ ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{ diff --git a/cmd/kubeadm/app/phases/upgrade/staticpods.go b/cmd/kubeadm/app/phases/upgrade/staticpods.go index 200327f1945..900b39c98bd 100644 --- a/cmd/kubeadm/app/phases/upgrade/staticpods.go +++ b/cmd/kubeadm/app/phases/upgrade/staticpods.go @@ -185,28 +185,29 @@ func upgradeComponent(component string, waiter apiclient.Waiter, pathMgr StaticP } } - // ensure etcd certs are generated for etcd and kube-apiserver - if component == constants.Etcd || component == constants.KubeAPIServer { + if cfg.Etcd.Local != nil { + // ensure etcd certs are generated for etcd and kube-apiserver + if component == constants.Etcd || component == constants.KubeAPIServer { + caCert, caKey, err := certsphase.KubeadmCertEtcdCA.CreateAsCA(cfg) + if err != nil { + return fmt.Errorf("failed to upgrade the %s CA certificate and key: %v", constants.Etcd, err) + } - caCert, caKey, err := certsphase.KubeadmCertEtcdCA.CreateAsCA(cfg) - if err != nil { - return fmt.Errorf("failed to upgrade the %s CA certificate and key: %v", constants.Etcd, err) - } - - if component == constants.Etcd { - if err := certsphase.KubeadmCertEtcdServer.CreateFromCA(cfg, caCert, caKey); err != nil { - return fmt.Errorf("failed to upgrade the %s certificate and key: %v", constants.Etcd, err) + if component == constants.Etcd { + if err := certsphase.KubeadmCertEtcdServer.CreateFromCA(cfg, caCert, caKey); err != nil { + return fmt.Errorf("failed to upgrade the %s certificate and key: %v", constants.Etcd, err) + } + if err := certsphase.KubeadmCertEtcdPeer.CreateFromCA(cfg, caCert, caKey); err != nil { + return fmt.Errorf("failed to upgrade the %s peer certificate and key: %v", constants.Etcd, err) + } + if err := certsphase.KubeadmCertEtcdHealthcheck.CreateFromCA(cfg, caCert, caKey); err != nil { + return fmt.Errorf("failed to upgrade the %s healthcheck certificate and key: %v", constants.Etcd, err) + } } - if err := certsphase.KubeadmCertEtcdPeer.CreateFromCA(cfg, caCert, caKey); err != nil { - return fmt.Errorf("failed to upgrade the %s peer certificate and key: %v", constants.Etcd, err) - } - if err := certsphase.KubeadmCertEtcdHealthcheck.CreateFromCA(cfg, caCert, caKey); err != nil { - return fmt.Errorf("failed to upgrade the %s healthcheck certificate and key: %v", constants.Etcd, err) - } - } - if component == constants.KubeAPIServer { - if err := certsphase.KubeadmCertEtcdAPIClient.CreateFromCA(cfg, caCert, caKey); err != nil { - return fmt.Errorf("failed to upgrade the %s %s-client certificate and key: %v", constants.KubeAPIServer, constants.Etcd, err) + if component == constants.KubeAPIServer { + if err := certsphase.KubeadmCertEtcdAPIClient.CreateFromCA(cfg, caCert, caKey); err != nil { + return fmt.Errorf("failed to upgrade the %s %s-client certificate and key: %v", constants.KubeAPIServer, constants.Etcd, err) + } } } } diff --git a/cmd/kubeadm/app/phases/uploadconfig/uploadconfig.go b/cmd/kubeadm/app/phases/uploadconfig/uploadconfig.go index 0377ec54c6a..fa4f99d37e5 100644 --- a/cmd/kubeadm/app/phases/uploadconfig/uploadconfig.go +++ b/cmd/kubeadm/app/phases/uploadconfig/uploadconfig.go @@ -31,10 +31,10 @@ import ( ) const ( - // BootstrapDiscoveryClusterRoleName sets the name for the ClusterRole that allows + // NodesKubeadmConfigClusterRoleName sets the name for the ClusterRole that allows // the bootstrap tokens to access the kubeadm-config ConfigMap during the node bootstrap/discovery - // phase for additional master nodes - BootstrapDiscoveryClusterRoleName = "kubeadm:bootstrap-discovery-kubeadm-config" + // or during upgrade nodes + NodesKubeadmConfigClusterRoleName = "kubeadm:nodes-kubeadm-config" ) // UploadConfiguration saves the InitConfiguration used for later reference (when upgrading for instance) @@ -75,10 +75,10 @@ func UploadConfiguration(cfg *kubeadmapi.InitConfiguration, client clientset.Int return err } - // Ensure that the BootstrapDiscoveryClusterRole exists + // Ensure that the NodesKubeadmConfigClusterRoleName exists err = apiclient.CreateOrUpdateRole(client, &rbac.Role{ ObjectMeta: metav1.ObjectMeta{ - Name: BootstrapDiscoveryClusterRoleName, + Name: NodesKubeadmConfigClusterRoleName, Namespace: metav1.NamespaceSystem, }, Rules: []rbac.PolicyRule{ @@ -89,23 +89,28 @@ func UploadConfiguration(cfg *kubeadmapi.InitConfiguration, client clientset.Int return err } - // Binds the BootstrapDiscoveryClusterRole to all the bootstrap tokens + // Binds the NodesKubeadmConfigClusterRoleName to all the bootstrap tokens // that are members of the system:bootstrappers:kubeadm:default-node-token group + // and to all nodes return apiclient.CreateOrUpdateRoleBinding(client, &rbac.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ - Name: BootstrapDiscoveryClusterRoleName, + Name: NodesKubeadmConfigClusterRoleName, Namespace: metav1.NamespaceSystem, }, RoleRef: rbac.RoleRef{ APIGroup: rbac.GroupName, Kind: "Role", - Name: BootstrapDiscoveryClusterRoleName, + Name: NodesKubeadmConfigClusterRoleName, }, Subjects: []rbac.Subject{ { Kind: rbac.GroupKind, Name: kubeadmconstants.NodeBootstrapTokenAuthGroup, }, + { + Kind: rbac.GroupKind, + Name: kubeadmconstants.NodesGroup, + }, }, }) }