Merge pull request #51122 from luxas/kubeadm_impl_dryrun

Automatic merge from submit-queue (batch tested with PRs 51134, 51122, 50562, 50971, 51327)

kubeadm: Fully implement --dry-run

**What this PR does / why we need it**:

Finishes the work begun in #50631 
 - Implements dry-run functionality for phases certs/kubeconfig/controlplane/etcd as well by making the outDir configurable
 - Prints the controlplane manifests to stdout, but not the certs/kubeconfig files due to the sensitive nature. However, kubeadm outputs the directory to go and look in for those.
 - Fixes a small yaml marshal error where `apiVersion` and `kind` wasn't printed earlier.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #

fixes: https://github.com/kubernetes/kubeadm/issues/389

**Special notes for your reviewer**:

Full `kubeadm init --dry-run` output:

```
[kubeadm] WARNING: kubeadm is in beta, please do not use it for production clusters.
[init] Using Kubernetes version: v1.7.4
[init] Using Authorization mode: [Node RBAC]
[preflight] Running pre-flight checks
[preflight] WARNING: docker service is not enabled, please run 'systemctl enable docker.service'
[preflight] Starting the kubelet service
[kubeadm] WARNING: starting in 1.8, tokens expire after 24 hours by default (if you require a non-expiring token use --token-ttl 0)
[certificates] Generated ca certificate and key.
[certificates] Generated apiserver certificate and key.
[certificates] apiserver serving cert is signed for DNS names [thegopher kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local] and IPs [10.96.0.1 192.168.200.101]
[certificates] Generated apiserver-kubelet-client certificate and key.
[certificates] Generated sa key and public key.
[certificates] Generated front-proxy-ca certificate and key.
[certificates] Generated front-proxy-client certificate and key.
[certificates] Valid certificates and keys now exist in "/tmp/kubeadm-init-dryrun477531930"
[kubeconfig] Wrote KubeConfig file to disk: "admin.conf"
[kubeconfig] Wrote KubeConfig file to disk: "kubelet.conf"
[kubeconfig] Wrote KubeConfig file to disk: "controller-manager.conf"
[kubeconfig] Wrote KubeConfig file to disk: "scheduler.conf"
[dryrun] Wrote certificates, kubeconfig files and control plane manifests to "/tmp/kubeadm-init-dryrun477531930"
[dryrun] Won't print certificates or kubeconfig files due to the sensitive nature of them
[dryrun] Please go and examine the "/tmp/kubeadm-init-dryrun477531930" directory for details about what would be written
[dryrun] Would write file "/etc/kubernetes/manifests/kube-apiserver.yaml" with content:
	apiVersion: v1
	kind: Pod
	metadata:
	  annotations:
	    scheduler.alpha.kubernetes.io/critical-pod: ""
	  creationTimestamp: null
	  labels:
	    component: kube-apiserver
	    tier: control-plane
	  name: kube-apiserver
	  namespace: kube-system
	spec:
	  containers:
	  - command:
	    - kube-apiserver
	    - --allow-privileged=true
	    - --kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname
	    - --requestheader-extra-headers-prefix=X-Remote-Extra-
	    - --service-cluster-ip-range=10.96.0.0/12
	    - --tls-cert-file=/etc/kubernetes/pki/apiserver.crt
	    - --tls-private-key-file=/etc/kubernetes/pki/apiserver.key
	    - --proxy-client-cert-file=/etc/kubernetes/pki/front-proxy-client.crt
	    - --admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota
	    - --experimental-bootstrap-token-auth=true
	    - --client-ca-file=/etc/kubernetes/pki/ca.crt
	    - --kubelet-client-key=/etc/kubernetes/pki/apiserver-kubelet-client.key
	    - --secure-port=6443
	    - --proxy-client-key-file=/etc/kubernetes/pki/front-proxy-client.key
	    - --insecure-port=0
	    - --requestheader-username-headers=X-Remote-User
	    - --requestheader-group-headers=X-Remote-Group
	    - --requestheader-allowed-names=front-proxy-client
	    - --advertise-address=192.168.200.101
	    - --service-account-key-file=/etc/kubernetes/pki/sa.pub
	    - --kubelet-client-certificate=/etc/kubernetes/pki/apiserver-kubelet-client.crt
	    - --requestheader-client-ca-file=/etc/kubernetes/pki/front-proxy-ca.crt
	    - --authorization-mode=Node,RBAC
	    - --etcd-servers=http://127.0.0.1:2379
	    image: gcr.io/google_containers/kube-apiserver-amd64:v1.7.4
	    livenessProbe:
	      failureThreshold: 8
	      httpGet:
	        host: 127.0.0.1
	        path: /healthz
	        port: 6443
	        scheme: HTTPS
	      initialDelaySeconds: 15
	      timeoutSeconds: 15
	    name: kube-apiserver
	    resources:
	      requests:
	        cpu: 250m
	    volumeMounts:
	    - mountPath: /etc/kubernetes/pki
	      name: k8s-certs
	      readOnly: true
	    - mountPath: /etc/ssl/certs
	      name: ca-certs
	      readOnly: true
	    - mountPath: /etc/pki
	      name: ca-certs-etc-pki
	      readOnly: true
	  hostNetwork: true
	  volumes:
	  - hostPath:
	      path: /etc/kubernetes/pki
	    name: k8s-certs
	  - hostPath:
	      path: /etc/ssl/certs
	    name: ca-certs
	  - hostPath:
	      path: /etc/pki
	    name: ca-certs-etc-pki
	status: {}
[dryrun] Would write file "/etc/kubernetes/manifests/kube-controller-manager.yaml" with content:
	apiVersion: v1
	kind: Pod
	metadata:
	  annotations:
	    scheduler.alpha.kubernetes.io/critical-pod: ""
	  creationTimestamp: null
	  labels:
	    component: kube-controller-manager
	    tier: control-plane
	  name: kube-controller-manager
	  namespace: kube-system
	spec:
	  containers:
	  - command:
	    - kube-controller-manager
	    - --address=127.0.0.1
	    - --kubeconfig=/etc/kubernetes/controller-manager.conf
	    - --cluster-signing-cert-file=/etc/kubernetes/pki/ca.crt
	    - --cluster-signing-key-file=/etc/kubernetes/pki/ca.key
	    - --leader-elect=true
	    - --use-service-account-credentials=true
	    - --controllers=*,bootstrapsigner,tokencleaner
	    - --root-ca-file=/etc/kubernetes/pki/ca.crt
	    - --service-account-private-key-file=/etc/kubernetes/pki/sa.key
	    image: gcr.io/google_containers/kube-controller-manager-amd64:v1.7.4
	    livenessProbe:
	      failureThreshold: 8
	      httpGet:
	        host: 127.0.0.1
	        path: /healthz
	        port: 10252
	        scheme: HTTP
	      initialDelaySeconds: 15
	      timeoutSeconds: 15
	    name: kube-controller-manager
	    resources:
	      requests:
	        cpu: 200m
	    volumeMounts:
	    - mountPath: /etc/kubernetes/pki
	      name: k8s-certs
	      readOnly: true
	    - mountPath: /etc/ssl/certs
	      name: ca-certs
	      readOnly: true
	    - mountPath: /etc/kubernetes/controller-manager.conf
	      name: kubeconfig
	      readOnly: true
	    - mountPath: /etc/pki
	      name: ca-certs-etc-pki
	      readOnly: true
	  hostNetwork: true
	  volumes:
	  - hostPath:
	      path: /etc/kubernetes/pki
	    name: k8s-certs
	  - hostPath:
	      path: /etc/ssl/certs
	    name: ca-certs
	  - hostPath:
	      path: /etc/kubernetes/controller-manager.conf
	    name: kubeconfig
	  - hostPath:
	      path: /etc/pki
	    name: ca-certs-etc-pki
	status: {}
[dryrun] Would write file "/etc/kubernetes/manifests/kube-scheduler.yaml" with content:
	apiVersion: v1
	kind: Pod
	metadata:
	  annotations:
	    scheduler.alpha.kubernetes.io/critical-pod: ""
	  creationTimestamp: null
	  labels:
	    component: kube-scheduler
	    tier: control-plane
	  name: kube-scheduler
	  namespace: kube-system
	spec:
	  containers:
	  - command:
	    - kube-scheduler
	    - --leader-elect=true
	    - --kubeconfig=/etc/kubernetes/scheduler.conf
	    - --address=127.0.0.1
	    image: gcr.io/google_containers/kube-scheduler-amd64:v1.7.4
	    livenessProbe:
	      failureThreshold: 8
	      httpGet:
	        host: 127.0.0.1
	        path: /healthz
	        port: 10251
	        scheme: HTTP
	      initialDelaySeconds: 15
	      timeoutSeconds: 15
	    name: kube-scheduler
	    resources:
	      requests:
	        cpu: 100m
	    volumeMounts:
	    - mountPath: /etc/kubernetes/scheduler.conf
	      name: kubeconfig
	      readOnly: true
	  hostNetwork: true
	  volumes:
	  - hostPath:
	      path: /etc/kubernetes/scheduler.conf
	    name: kubeconfig
	status: {}
[markmaster] Will mark node thegopher as master by adding a label and a taint
[dryrun] Would perform action GET on resource "nodes" in API group "core/v1"
[dryrun] Resource name: "thegopher"
[dryrun] Would perform action PATCH on resource "nodes" in API group "core/v1"
[dryrun] Resource name: "thegopher"
[dryrun] Attached patch:
	{"metadata":{"labels":{"node-role.kubernetes.io/master":""}},"spec":{"taints":[{"effect":"NoSchedule","key":"node-role.kubernetes.io/master","timeAdded":null}]}}
[markmaster] Master thegopher tainted and labelled with key/value: node-role.kubernetes.io/master=""
[token] Using token: 96efd6.98bbb2f4603c026b
[dryrun] Would perform action GET on resource "secrets" in API group "core/v1"
[dryrun] Resource name: "bootstrap-token-96efd6"
[dryrun] Would perform action CREATE on resource "secrets" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	data:
	  description: VGhlIGRlZmF1bHQgYm9vdHN0cmFwIHRva2VuIGdlbmVyYXRlZCBieSAna3ViZWFkbSBpbml0Jy4=
	  expiration: MjAxNy0wOC0yM1QyMzoxOTozNCswMzowMA==
	  token-id: OTZlZmQ2
	  token-secret: OThiYmIyZjQ2MDNjMDI2Yg==
	  usage-bootstrap-authentication: dHJ1ZQ==
	  usage-bootstrap-signing: dHJ1ZQ==
	kind: Secret
	metadata:
	  creationTimestamp: null
	  name: bootstrap-token-96efd6
	type: bootstrap.kubernetes.io/token
[bootstraptoken] Configured RBAC rules to allow Node Bootstrap tokens to post CSRs in order for nodes to get long term certificate credentials
[dryrun] Would perform action CREATE on resource "clusterrolebindings" in API group "rbac.authorization.k8s.io/v1beta1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1beta1
	kind: ClusterRoleBinding
	metadata:
	  creationTimestamp: null
	  name: kubeadm:kubelet-bootstrap
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: ClusterRole
	  name: system:node-bootstrapper
	subjects:
	- kind: Group
	  name: system:bootstrappers
[bootstraptoken] Configured RBAC rules to allow the csrapprover controller automatically approve CSRs from a Node Bootstrap Token
[dryrun] Would perform action CREATE on resource "clusterroles" in API group "rbac.authorization.k8s.io/v1beta1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1beta1
	kind: ClusterRole
	metadata:
	  creationTimestamp: null
	  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
	rules:
	- apiGroups:
	  - certificates.k8s.io
	  resources:
	  - certificatesigningrequests/nodeclient
	  verbs:
	  - create
[dryrun] Would perform action CREATE on resource "clusterrolebindings" in API group "rbac.authorization.k8s.io/v1beta1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1beta1
	kind: ClusterRoleBinding
	metadata:
	  creationTimestamp: null
	  name: kubeadm:node-autoapprove-bootstrap
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: ClusterRole
	  name: system:certificates.k8s.io:certificatesigningrequests:nodeclient
	subjects:
	- kind: Group
	  name: system:bootstrappers
[bootstraptoken] Creating the "cluster-info" ConfigMap in the "kube-public" namespace
[dryrun] Would perform action CREATE on resource "configmaps" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	data:
	  kubeconfig: |
	    apiVersion: v1
	    clusters:
	    - cluster:
	        certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFM01EZ3lNakl3TVRrek1Gb1hEVEkzTURneU1ESXdNVGt6TUZvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBTFk0CnZWZ1FSN3pva3VzbWVvQ3JwZ1lFdEFHSldhSWVVUXE0ZE8wcVA4TDFKQk10ZTdHcXVHeXlWdVlyejBBeXdGdkMKaEh3Tm1pbmpIWFdNYkgrQVdIUXJOZmtZMmRBdnVuL0NYZWd6RlRZZG56M1JzYU5EaW0wazVXaVhEamQwM21YVApicGpvMGxpT2ZtY0xlOHpYUXZNaHpmN2FMV24wOVJoN05Ld0M0eW84cis5MDNHNjVxRW56cnUybmJKTEJ1TFk0CkFsL3UxTElVSGV4dmExZjgzampOQ1NmQXJScGh1d0oyS1NTWXhoaEJpNHBJMzd0ZEFpN3diTUF0cG4zdU9rVEQKU0dtdGpkbFZoUlAzV1dHQzNQTjF3M1JRakpmTW5weFFZbFFmalU2UE9Pbzg4ODBwN3dnUXFDUU11bjU5UWlBWgpwNkI1c3lrUitMemhoZVpkMWtjQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFHaTVrcUJzMTdOMU5pRWx2RGJaWGFSeXk5anUKR3ZuRjRjSnczQ0dPR2hpdHgySmdxRkt5WXRIdlJUSFNYRXpBNTlteEs2RlJWUWpBZmJMdjhSZUNKUjYrSzdRdQo0U21uTVVxVXRTZFUzaHozVXZlMjVOTHVwMnhsYVpZbzVwdVRrOWhZdUszd09MbWgxZTFoRzcyUFpoZE5yOGd5Ck5lTFN3bjI4OEVUSlNCcWpob0FkV2w0YzZtcnpwWll4ekNrcEpUSDFPWnBCQzFUYmY3QW5HenVwRzB1Q1RSYWsKWTBCSERyL01uVGJKKzM5NEJyMXBId0NtQ3ZrWUY0RjVEeW9UTFQ0UFhGTnJSV3UweU9rMXdDdEFKbEs3eFlUOAp5Z015cUlRSG4rNjYrUGlsSUprcU81ODRoVm5ENURva1dLcEdISFlYNmNpRGYwU1hYZUI1d09YQ0xjaz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
	        server: https://192.168.200.101:6443
	      name: ""
	    contexts: []
	    current-context: ""
	    kind: Config
	    preferences: {}
	    users: []
	kind: ConfigMap
	metadata:
	  creationTimestamp: null
	  name: cluster-info
	  namespace: kube-public
[dryrun] Would perform action CREATE on resource "roles" in API group "rbac.authorization.k8s.io/v1beta1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1beta1
	kind: Role
	metadata:
	  creationTimestamp: null
	  name: kubeadm:bootstrap-signer-clusterinfo
	  namespace: kube-public
	rules:
	- apiGroups:
	  - ""
	  resourceNames:
	  - cluster-info
	  resources:
	  - configmaps
	  verbs:
	  - get
[dryrun] Would perform action CREATE on resource "rolebindings" in API group "rbac.authorization.k8s.io/v1beta1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1beta1
	kind: RoleBinding
	metadata:
	  creationTimestamp: null
	  name: kubeadm:bootstrap-signer-clusterinfo
	  namespace: kube-public
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: Role
	  name: kubeadm:bootstrap-signer-clusterinfo
	subjects:
	- kind: User
	  name: system:anonymous
[uploadconfig] Storing the configuration used in ConfigMap "kubeadm-config" in the "kube-system" Namespace
[dryrun] Would perform action CREATE on resource "configmaps" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	data:
	  MasterConfiguration: |
	    api:
	      advertiseAddress: 192.168.200.101
	      bindPort: 6443
	    apiServerCertSANs: []
	    apiServerExtraArgs: null
	    authorizationModes:
	    - Node
	    - RBAC
	    certificatesDir: /etc/kubernetes/pki
	    cloudProvider: ""
	    controllerManagerExtraArgs: null
	    etcd:
	      caFile: ""
	      certFile: ""
	      dataDir: /var/lib/etcd
	      endpoints: []
	      extraArgs: null
	      image: ""
	      keyFile: ""
	    featureFlags: null
	    imageRepository: gcr.io/google_containers
	    kubernetesVersion: v1.7.4
	    networking:
	      dnsDomain: cluster.local
	      podSubnet: ""
	      serviceSubnet: 10.96.0.0/12
	    nodeName: thegopher
	    schedulerExtraArgs: null
	    token: 96efd6.98bbb2f4603c026b
	    tokenTTL: 86400000000000
	    unifiedControlPlaneImage: ""
	kind: ConfigMap
	metadata:
	  creationTimestamp: null
	  name: kubeadm-config
	  namespace: kube-system
[dryrun] Would perform action GET on resource "clusterrolebindings" in API group "rbac.authorization.k8s.io/v1beta1"
[dryrun] Resource name: "system:node"
[dryrun] Would perform action CREATE on resource "serviceaccounts" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	kind: ServiceAccount
	metadata:
	  creationTimestamp: null
	  name: kube-dns
	  namespace: kube-system
[dryrun] Would perform action GET on resource "services" in API group "core/v1"
[dryrun] Resource name: "kubernetes"
[dryrun] Would perform action CREATE on resource "deployments" in API group "extensions/v1beta1"
[dryrun] Attached object:
	apiVersion: extensions/v1beta1
	kind: Deployment
	metadata:
	  creationTimestamp: null
	  labels:
	    k8s-app: kube-dns
	  name: kube-dns
	  namespace: kube-system
	spec:
	  selector:
	    matchLabels:
	      k8s-app: kube-dns
	  strategy:
	    rollingUpdate:
	      maxSurge: 10%
	      maxUnavailable: 0
	  template:
	    metadata:
	      creationTimestamp: null
	      labels:
	        k8s-app: kube-dns
	    spec:
	      affinity:
	        nodeAffinity:
	          requiredDuringSchedulingIgnoredDuringExecution:
	            nodeSelectorTerms:
	            - matchExpressions:
	              - key: beta.kubernetes.io/arch
	                operator: In
	                values:
	                - amd64
	      containers:
	      - args:
	        - --domain=cluster.local.
	        - --dns-port=10053
	        - --config-dir=/kube-dns-config
	        - --v=2
	        env:
	        - name: PROMETHEUS_PORT
	          value: "10055"
	        image: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.4
	        imagePullPolicy: IfNotPresent
	        livenessProbe:
	          failureThreshold: 5
	          httpGet:
	            path: /healthcheck/kubedns
	            port: 10054
	            scheme: HTTP
	          initialDelaySeconds: 60
	          successThreshold: 1
	          timeoutSeconds: 5
	        name: kubedns
	        ports:
	        - containerPort: 10053
	          name: dns-local
	          protocol: UDP
	        - containerPort: 10053
	          name: dns-tcp-local
	          protocol: TCP
	        - containerPort: 10055
	          name: metrics
	          protocol: TCP
	        readinessProbe:
	          httpGet:
	            path: /readiness
	            port: 8081
	            scheme: HTTP
	          initialDelaySeconds: 3
	          timeoutSeconds: 5
	        resources:
	          limits:
	            memory: 170Mi
	          requests:
	            cpu: 100m
	            memory: 70Mi
	        volumeMounts:
	        - mountPath: /kube-dns-config
	          name: kube-dns-config
	      - args:
	        - -v=2
	        - -logtostderr
	        - -configDir=/etc/k8s/dns/dnsmasq-nanny
	        - -restartDnsmasq=true
	        - --
	        - -k
	        - --cache-size=1000
	        - --log-facility=-
	        - --server=/cluster.local/127.0.0.1#10053
	        - --server=/in-addr.arpa/127.0.0.1#10053
	        - --server=/ip6.arpa/127.0.0.1#10053
	        image: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.4
	        imagePullPolicy: IfNotPresent
	        livenessProbe:
	          failureThreshold: 5
	          httpGet:
	            path: /healthcheck/dnsmasq
	            port: 10054
	            scheme: HTTP
	          initialDelaySeconds: 60
	          successThreshold: 1
	          timeoutSeconds: 5
	        name: dnsmasq
	        ports:
	        - containerPort: 53
	          name: dns
	          protocol: UDP
	        - containerPort: 53
	          name: dns-tcp
	          protocol: TCP
	        resources:
	          requests:
	            cpu: 150m
	            memory: 20Mi
	        volumeMounts:
	        - mountPath: /etc/k8s/dns/dnsmasq-nanny
	          name: kube-dns-config
	      - args:
	        - --v=2
	        - --logtostderr
	        - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
	        - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
	        image: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.4
	        imagePullPolicy: IfNotPresent
	        livenessProbe:
	          failureThreshold: 5
	          httpGet:
	            path: /metrics
	            port: 10054
	            scheme: HTTP
	          initialDelaySeconds: 60
	          successThreshold: 1
	          timeoutSeconds: 5
	        name: sidecar
	        ports:
	        - containerPort: 10054
	          name: metrics
	          protocol: TCP
	        resources:
	          requests:
	            cpu: 10m
	            memory: 20Mi
	      dnsPolicy: Default
	      serviceAccountName: kube-dns
	      tolerations:
	      - key: CriticalAddonsOnly
	        operator: Exists
	      - effect: NoSchedule
	        key: node-role.kubernetes.io/master
	      volumes:
	      - configMap:
	          name: kube-dns
	          optional: true
	        name: kube-dns-config
	status: {}
[dryrun] Would perform action CREATE on resource "services" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	kind: Service
	metadata:
	  creationTimestamp: null
	  labels:
	    k8s-app: kube-dns
	    kubernetes.io/cluster-service: "true"
	    kubernetes.io/name: KubeDNS
	  name: kube-dns
	  namespace: kube-system
	  resourceVersion: "0"
	spec:
	  clusterIP: 10.96.0.10
	  ports:
	  - name: dns
	    port: 53
	    protocol: UDP
	    targetPort: 53
	  - name: dns-tcp
	    port: 53
	    protocol: TCP
	    targetPort: 53
	  selector:
	    k8s-app: kube-dns
	status:
	  loadBalancer: {}
[addons] Applied essential addon: kube-dns
[dryrun] Would perform action CREATE on resource "serviceaccounts" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	kind: ServiceAccount
	metadata:
	  creationTimestamp: null
	  name: kube-proxy
	  namespace: kube-system
[dryrun] Would perform action CREATE on resource "configmaps" in API group "core/v1"
[dryrun] Attached object:
	apiVersion: v1
	data:
	  kubeconfig.conf: |
	    apiVersion: v1
	    kind: Config
	    clusters:
	    - cluster:
	        certificate-authority: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
	        server: https://192.168.200.101:6443
	      name: default
	    contexts:
	    - context:
	        cluster: default
	        namespace: default
	        user: default
	      name: default
	    current-context: default
	    users:
	    - name: default
	      user:
	        tokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
	kind: ConfigMap
	metadata:
	  creationTimestamp: null
	  labels:
	    app: kube-proxy
	  name: kube-proxy
	  namespace: kube-system
[dryrun] Would perform action CREATE on resource "daemonsets" in API group "extensions/v1beta1"
[dryrun] Attached object:
	apiVersion: extensions/v1beta1
	kind: DaemonSet
	metadata:
	  creationTimestamp: null
	  labels:
	    k8s-app: kube-proxy
	  name: kube-proxy
	  namespace: kube-system
	spec:
	  selector:
	    matchLabels:
	      k8s-app: kube-proxy
	  template:
	    metadata:
	      creationTimestamp: null
	      labels:
	        k8s-app: kube-proxy
	    spec:
	      containers:
	      - command:
	        - /usr/local/bin/kube-proxy
	        - --kubeconfig=/var/lib/kube-proxy/kubeconfig.conf
	        image: gcr.io/google_containers/kube-proxy-amd64:v1.7.4
	        imagePullPolicy: IfNotPresent
	        name: kube-proxy
	        resources: {}
	        securityContext:
	          privileged: true
	        volumeMounts:
	        - mountPath: /var/lib/kube-proxy
	          name: kube-proxy
	        - mountPath: /run/xtables.lock
	          name: xtables-lock
	      hostNetwork: true
	      serviceAccountName: kube-proxy
	      tolerations:
	      - effect: NoSchedule
	        key: node-role.kubernetes.io/master
	      - effect: NoSchedule
	        key: node.cloudprovider.kubernetes.io/uninitialized
	        value: "true"
	      volumes:
	      - configMap:
	          name: kube-proxy
	        name: kube-proxy
	      - hostPath:
	          path: /run/xtables.lock
	        name: xtables-lock
	  updateStrategy:
	    type: RollingUpdate
	status:
	  currentNumberScheduled: 0
	  desiredNumberScheduled: 0
	  numberMisscheduled: 0
	  numberReady: 0
[dryrun] Would perform action CREATE on resource "clusterrolebindings" in API group "rbac.authorization.k8s.io/v1beta1"
[dryrun] Attached object:
	apiVersion: rbac.authorization.k8s.io/v1beta1
	kind: ClusterRoleBinding
	metadata:
	  creationTimestamp: null
	  name: kubeadm:node-proxier
	roleRef:
	  apiGroup: rbac.authorization.k8s.io
	  kind: ClusterRole
	  name: system:node-proxier
	subjects:
	- kind: ServiceAccount
	  name: kube-proxy
	  namespace: kube-system
[addons] Applied essential addon: kube-proxy

Your Kubernetes master has initialized successfully!

To start using your cluster, you need to run (as a regular user):

  mkdir -p $HOME/.kube
  sudo cp -i /tmp/kubeadm-init-dryrun477531930/admin.conf $HOME/.kube/config
  sudo chown $(id -u):$(id -g) $HOME/.kube/config

You should now deploy a pod network to the cluster.
Run "kubectl apply -f [podnetwork].yaml" with one of the options listed at:
  http://kubernetes.io/docs/admin/addons/

You can now join any number of machines by running the following on each node
as root:

  kubeadm join --token 96efd6.98bbb2f4603c026b 192.168.200.101:6443 --discovery-token-ca-cert-hash sha256:ccb794198ae65cb3c9e997be510c18023e0e9e064225a588997b9e6c64ebf9f1

```

**Release note**:

```release-note
kubeadm: Implement a `--dry-run` mode and flag for `kubeadm`
```
@kubernetes/sig-cluster-lifecycle-pr-reviews @ncdc @sttts
This commit is contained in:
Kubernetes Submit Queue
2017-08-25 14:01:27 -07:00
committed by GitHub
14 changed files with 318 additions and 61 deletions

View File

@@ -45,6 +45,7 @@ go_library(
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//cmd/kubeadm/app/util/config:go_default_library",
"//cmd/kubeadm/app/util/dryrun:go_default_library",
"//cmd/kubeadm/app/util/kubeconfig:go_default_library",
"//cmd/kubeadm/app/util/pubkeypin:go_default_library",
"//cmd/kubeadm/app/util/token:go_default_library",

View File

@@ -139,6 +139,8 @@ func NewCmdConfigUploadFromFlags(out io.Writer, kubeConfigFile *string) *cobra.C
Using from-flags, you can upload configuration to the ConfigMap in the cluster using the same flags you'd give to kubeadm init.
If you initialized your cluster using a v1.7.x or lower kubeadm client and set some flag; you need to run this command with the
same flags before upgrading to v1.8 using 'kubeadm upgrade'.
The configuration is located in the %q namespace in the %q ConfigMap
`), metav1.NamespaceSystem, constants.MasterConfigurationConfigMap),
Run: func(cmd *cobra.Command, args []string) {
var err error

View File

@@ -21,6 +21,7 @@ import (
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"text/template"
"time"
@@ -53,6 +54,7 @@ import (
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
dryrunutil "k8s.io/kubernetes/cmd/kubeadm/app/util/dryrun"
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
"k8s.io/kubernetes/cmd/kubeadm/app/util/pubkeypin"
"k8s.io/kubernetes/pkg/api"
@@ -264,33 +266,67 @@ func (i *Init) Run(out io.Writer) error {
return fmt.Errorf("couldn't parse kubernetes version %q: %v", i.cfg.KubernetesVersion, err)
}
// Get directories to write files to; can be faked if we're dry-running
realCertsDir := i.cfg.CertificatesDir
certsDirToWriteTo, kubeConfigDir, manifestDir, err := getDirectoriesToUse(i.dryRun, i.cfg.CertificatesDir)
if err != nil {
return err
}
// certsDirToWriteTo is gonna equal cfg.CertificatesDir in the normal case, but gonna be a temp directory if dryrunning
i.cfg.CertificatesDir = certsDirToWriteTo
adminKubeConfigPath := filepath.Join(kubeConfigDir, kubeadmconstants.AdminKubeConfigFileName)
// PHASE 1: Generate certificates
if err := certsphase.CreatePKIAssets(i.cfg); err != nil {
return err
}
// PHASE 2: Generate kubeconfig files for the admin and the kubelet
if err := kubeconfigphase.CreateInitKubeConfigFiles(kubeadmconstants.KubernetesDir, i.cfg); err != nil {
if err := kubeconfigphase.CreateInitKubeConfigFiles(kubeConfigDir, i.cfg); err != nil {
return err
}
// Temporarily set cfg.CertificatesDir to the "real value" when writing controlplane manifests
// This is needed for writing the right kind of manifests
i.cfg.CertificatesDir = realCertsDir
// PHASE 3: Bootstrap the control plane
manifestPath := kubeadmconstants.GetStaticPodDirectory()
if err := controlplanephase.CreateInitStaticPodManifestFiles(manifestPath, i.cfg); err != nil {
if err := controlplanephase.CreateInitStaticPodManifestFiles(manifestDir, i.cfg); err != nil {
return err
}
// Add etcd static pod spec only if external etcd is not configured
if len(i.cfg.Etcd.Endpoints) == 0 {
if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(manifestPath, i.cfg); err != nil {
if err := etcdphase.CreateLocalEtcdStaticPodManifestFile(manifestDir, i.cfg); err != nil {
return err
}
}
client, err := createClientsetAndOptionallyWaitForReady(i.cfg, i.dryRun)
// Revert the earlier CertificatesDir assignment to the directory that can be written to
i.cfg.CertificatesDir = certsDirToWriteTo
// If we're dry-running, print the generated manifests
if err := printFilesIfDryRunning(i.dryRun, manifestDir); err != nil {
return err
}
// Create a kubernetes client and wait for the API server to be healthy (if not dryrunning)
client, err := createClient(i.cfg, i.dryRun)
if err != nil {
return err
}
// waiter holds the apiclient.Waiter implementation of choice, responsible for querying the API server in various ways and waiting for conditions to be fulfilled
waiter := getWaiter(i.dryRun, client)
fmt.Printf("[init] Waiting for the kubelet to boot up the control plane as Static Pods from directory %q\n", kubeadmconstants.GetStaticPodDirectory())
fmt.Println("[init] This process often takes about a minute to perform or longer if the control plane images have to be pulled...")
// TODO: Adjust this timeout or start polling the kubelet API
// TODO: Make this timeout more realistic when we do create some more complex logic about the interaction with the kubelet
if err := waiter.WaitForAPI(); err != nil {
return err
}
// PHASE 4: Mark the master with the right label/taint
if err := markmasterphase.MarkMaster(client, i.cfg.NodeName); err != nil {
return err
@@ -316,7 +352,7 @@ func (i *Init) Run(out io.Writer) error {
}
// Create the cluster-info ConfigMap with the associated RBAC rules
if err := clusterinfophase.CreateBootstrapConfigMapIfNotExists(client, kubeadmconstants.GetAdminKubeConfigPath()); err != nil {
if err := clusterinfophase.CreateBootstrapConfigMapIfNotExists(client, adminKubeConfigPath); err != nil {
return err
}
if err := clusterinfophase.CreateClusterInfoRBACRules(client); err != nil {
@@ -347,11 +383,17 @@ func (i *Init) Run(out io.Writer) error {
// Temporary control plane is up, now we create our self hosted control
// plane components and remove the static manifests:
fmt.Println("[self-hosted] Creating self-hosted control plane...")
if err := selfhostingphase.CreateSelfHostedControlPlane(i.cfg, client); err != nil {
if err := selfhostingphase.CreateSelfHostedControlPlane(manifestDir, kubeConfigDir, i.cfg, client, waiter); err != nil {
return err
}
}
// Exit earlier if we're dryrunning
if i.dryRun {
fmt.Println("[dryrun] Finished dry-running successfully; above are the resources that would be created.")
return nil
}
// Load the CA certificate from so we can pin its public key
caCert, err := pkiutil.TryLoadCertFromDisk(i.cfg.CertificatesDir, kubeadmconstants.CACertAndKeyBaseName)
@@ -362,8 +404,7 @@ func (i *Init) Run(out io.Writer) error {
}
ctx := map[string]string{
"KubeConfigPath": kubeadmconstants.GetAdminKubeConfigPath(),
"KubeConfigName": kubeadmconstants.AdminKubeConfigFileName,
"KubeConfigPath": adminKubeConfigPath,
"Token": i.cfg.Token,
"CAPubKeyPin": pubkeypin.Hash(caCert),
"MasterHostPort": masterHostPort,
@@ -375,24 +416,59 @@ func (i *Init) Run(out io.Writer) error {
return initDoneTempl.Execute(out, ctx)
}
func createClientsetAndOptionallyWaitForReady(cfg *kubeadmapi.MasterConfiguration, dryRun bool) (clientset.Interface, error) {
// createClient creates a clientset.Interface object
func createClient(cfg *kubeadmapi.MasterConfiguration, dryRun bool) (clientset.Interface, error) {
if dryRun {
// If we're dry-running; we should create a faked client that answers some GETs in order to be able to do the full init flow and just logs the rest of requests
dryRunGetter := apiclient.NewInitDryRunGetter(cfg.NodeName, cfg.Networking.ServiceSubnet)
return apiclient.NewDryRunClient(dryRunGetter, os.Stdout), nil
}
// If we're acting for real,we should create a connection to the API server and wait for it to come up
client, err := kubeconfigutil.ClientSetFromFile(kubeadmconstants.GetAdminKubeConfigPath())
if err != nil {
return nil, err
// If we're acting for real, we should create a connection to the API server and wait for it to come up
return kubeconfigutil.ClientSetFromFile(kubeadmconstants.GetAdminKubeConfigPath())
}
// getDirectoriesToUse returns the (in order) certificates, kubeconfig and Static Pod manifest directories, followed by a possible error
// This behaves differently when dry-running vs the normal flow
func getDirectoriesToUse(dryRun bool, defaultPkiDir string) (string, string, string, error) {
if dryRun {
dryRunDir, err := ioutil.TempDir("", "kubeadm-init-dryrun")
if err != nil {
return "", "", "", fmt.Errorf("couldn't create a temporary directory: %v", err)
}
// Use the same temp dir for all
return dryRunDir, dryRunDir, dryRunDir, nil
}
fmt.Printf("[init] Waiting for the kubelet to boot up the control plane as Static Pods from directory %q\n", kubeadmconstants.GetStaticPodDirectory())
// TODO: Adjust this timeout or start polling the kubelet API
// TODO: Make this timeout more realistic when we do create some more complex logic about the interaction with the kubelet
if err := apiclient.WaitForAPI(client, 30*time.Minute); err != nil {
return nil, err
}
return client, nil
return defaultPkiDir, kubeadmconstants.KubernetesDir, kubeadmconstants.GetStaticPodDirectory(), nil
}
// printFilesIfDryRunning prints the Static Pod manifests to stdout and informs about the temporary directory to go and lookup
func printFilesIfDryRunning(dryRun bool, manifestDir string) error {
if !dryRun {
return nil
}
fmt.Printf("[dryrun] Wrote certificates, kubeconfig files and control plane manifests to %q\n", manifestDir)
fmt.Println("[dryrun] Won't print certificates or kubeconfig files due to the sensitive nature of them")
fmt.Printf("[dryrun] Please go and examine the %q directory for details about what would be written\n", manifestDir)
// Print the contents of the upgraded manifests and pretend like they were in /etc/kubernetes/manifests
files := []dryrunutil.FileToPrint{}
for _, component := range kubeadmconstants.MasterComponents {
realPath := kubeadmconstants.GetStaticPodFilepath(component, manifestDir)
outputPath := kubeadmconstants.GetStaticPodFilepath(component, kubeadmconstants.GetStaticPodDirectory())
files = append(files, dryrunutil.NewFileToPrint(realPath, outputPath))
}
return dryrunutil.PrintDryRunFiles(files, os.Stdout)
}
// getWaiter gets the right waiter implementation
func getWaiter(dryRun bool, client clientset.Interface) apiclient.Waiter {
if dryRun {
return dryrunutil.NewWaiter()
}
// TODO: Adjust this timeout slightly?
return apiclient.NewKubeWaiter(client, 30*time.Minute, os.Stdout)
}

View File

@@ -39,6 +39,7 @@ go_library(
"//cmd/kubeadm/app/phases/uploadconfig:go_default_library",
"//cmd/kubeadm/app/preflight:go_default_library",
"//cmd/kubeadm/app/util:go_default_library",
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//cmd/kubeadm/app/util/config:go_default_library",
"//cmd/kubeadm/app/util/kubeconfig:go_default_library",
"//pkg/api:go_default_library",

View File

@@ -17,16 +17,20 @@ limitations under the License.
package phases
import (
"os"
"strings"
"time"
"github.com/spf13/cobra"
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
cmdutil "k8s.io/kubernetes/cmd/kubeadm/app/cmd/util"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
"k8s.io/kubernetes/cmd/kubeadm/app/features"
"k8s.io/kubernetes/cmd/kubeadm/app/phases/selfhosting"
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
configutil "k8s.io/kubernetes/cmd/kubeadm/app/util/config"
kubeconfigutil "k8s.io/kubernetes/cmd/kubeadm/app/util/kubeconfig"
"k8s.io/kubernetes/pkg/api"
@@ -78,7 +82,8 @@ func getSelfhostingSubCommand() *cobra.Command {
kubeadmutil.CheckErr(err)
// Converts the Static Pod-hosted control plane into a self-hosted one
err = selfhosting.CreateSelfHostedControlPlane(internalcfg, client)
waiter := apiclient.NewKubeWaiter(client, 2*time.Minute, os.Stdout)
err = selfhosting.CreateSelfHostedControlPlane(constants.GetStaticPodDirectory(), constants.KubernetesDir, internalcfg, client, waiter)
kubeadmutil.CheckErr(err)
},
}

View File

@@ -54,7 +54,10 @@ const (
// 8. In order to avoid race conditions, we have to make sure that static pod is deleted correctly before we continue
// Otherwise, there is a race condition when we proceed without kubelet having restarted the API server correctly and the next .Create call flakes
// 9. Do that for the kube-apiserver, kube-controller-manager and kube-scheduler in a loop
func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client clientset.Interface) error {
func CreateSelfHostedControlPlane(manifestsDir, kubeConfigDir string, cfg *kubeadmapi.MasterConfiguration, client clientset.Interface, waiter apiclient.Waiter) error {
// Adjust the timeout slightly to something self-hosting specific
waiter.SetTimeout(selfHostingWaitTimeout)
// Here the map of different mutators to use for the control plane's podspec is stored
mutators := getDefaultMutators()
@@ -66,7 +69,7 @@ func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client cl
if err := uploadTLSSecrets(client, cfg.CertificatesDir); err != nil {
return err
}
if err := uploadKubeConfigSecrets(client); err != nil {
if err := uploadKubeConfigSecrets(client, kubeConfigDir); err != nil {
return err
}
// Add the store-certs-in-secrets-specific mutators here so that the self-hosted component starts using them
@@ -77,7 +80,7 @@ func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client cl
for _, componentName := range kubeadmconstants.MasterComponents {
start := time.Now()
manifestPath := kubeadmconstants.GetStaticPodFilepath(componentName, kubeadmconstants.GetStaticPodDirectory())
manifestPath := kubeadmconstants.GetStaticPodFilepath(componentName, manifestsDir)
// Since we want this function to be idempotent; just continue and try the next component if this file doesn't exist
if _, err := os.Stat(manifestPath); err != nil {
@@ -102,7 +105,7 @@ func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client cl
}
// Wait for the self-hosted component to come up
if err := apiclient.WaitForPodsWithLabel(client, selfHostingWaitTimeout, os.Stdout, buildSelfHostedWorkloadLabelQuery(componentName)); err != nil {
if err := waiter.WaitForPodsWithLabel(buildSelfHostedWorkloadLabelQuery(componentName)); err != nil {
return err
}
@@ -115,12 +118,12 @@ func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client cl
// remove the Static Pod (or the mirror Pod respectively). This implicitely also tests that the API server endpoint is healthy,
// because this blocks until the API server returns a 404 Not Found when getting the Static Pod
staticPodName := fmt.Sprintf("%s-%s", componentName, cfg.NodeName)
if err := apiclient.WaitForStaticPodToDisappear(client, selfHostingWaitTimeout, staticPodName); err != nil {
if err := waiter.WaitForPodToDisappear(staticPodName); err != nil {
return err
}
// Just as an extra safety check; make sure the API server is returning ok at the /healthz endpoint (although we know it could return a GET answer for a Pod above)
if err := apiclient.WaitForAPI(client, selfHostingWaitTimeout); err != nil {
if err := waiter.WaitForAPI(); err != nil {
return err
}

View File

@@ -19,7 +19,7 @@ package selfhosting
import (
"fmt"
"io/ioutil"
"path"
"path/filepath"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -184,8 +184,8 @@ func uploadTLSSecrets(client clientset.Interface, certDir string) error {
for _, tlsKeyPair := range getTLSKeyPairs() {
secret, err := createTLSSecretFromFiles(
tlsKeyPair.name,
path.Join(certDir, tlsKeyPair.cert),
path.Join(certDir, tlsKeyPair.key),
filepath.Join(certDir, tlsKeyPair.cert),
filepath.Join(certDir, tlsKeyPair.key),
)
if err != nil {
return err
@@ -200,13 +200,13 @@ func uploadTLSSecrets(client clientset.Interface, certDir string) error {
return nil
}
func uploadKubeConfigSecrets(client clientset.Interface) error {
func uploadKubeConfigSecrets(client clientset.Interface, kubeConfigDir string) error {
files := []string{
kubeadmconstants.SchedulerKubeConfigFileName,
kubeadmconstants.ControllerManagerKubeConfigFileName,
}
for _, file := range files {
kubeConfigPath := path.Join(kubeadmconstants.KubernetesDir, file)
kubeConfigPath := filepath.Join(kubeConfigDir, file)
secret, err := createOpaqueSecretFromFile(file, kubeConfigPath)
if err != nil {
return err
@@ -257,7 +257,7 @@ func createOpaqueSecretFromFile(secretName, file string) (*v1.Secret, error) {
},
Type: v1.SecretTypeOpaque,
Data: map[string][]byte{
path.Base(file): fileBytes,
filepath.Base(file): fileBytes,
},
}, nil
}

View File

@@ -51,6 +51,7 @@ filegroup(
":package-srcs",
"//cmd/kubeadm/app/util/apiclient:all-srcs",
"//cmd/kubeadm/app/util/config:all-srcs",
"//cmd/kubeadm/app/util/dryrun:all-srcs",
"//cmd/kubeadm/app/util/kubeconfig:all-srcs",
"//cmd/kubeadm/app/util/pubkeypin:all-srcs",
"//cmd/kubeadm/app/util/staticpod:all-srcs",

View File

@@ -18,13 +18,13 @@ go_library(
deps = [
"//cmd/kubeadm/app/constants:go_default_library",
"//pkg/registry/core/service/ipallocator:go_default_library",
"//vendor/github.com/ghodss/yaml:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/api/extensions/v1beta1:go_default_library",
"//vendor/k8s.io/api/rbac/v1beta1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/intstr:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/dynamic:go_default_library",

View File

@@ -23,10 +23,11 @@ import (
"io"
"strings"
"github.com/ghodss/yaml"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
clientset "k8s.io/client-go/kubernetes"
fakeclientset "k8s.io/client-go/kubernetes/fake"
clientsetscheme "k8s.io/client-go/kubernetes/scheme"
core "k8s.io/client-go/testing"
)
@@ -37,12 +38,18 @@ type DryRunGetter interface {
}
// MarshalFunc takes care of converting any object to a byte array for displaying the object to the user
type MarshalFunc func(runtime.Object) ([]byte, error)
type MarshalFunc func(runtime.Object, schema.GroupVersion) ([]byte, error)
// DefaultMarshalFunc is the default MarshalFunc used; uses YAML to print objects to the user
func DefaultMarshalFunc(obj runtime.Object) ([]byte, error) {
b, err := yaml.Marshal(obj)
return b, err
func DefaultMarshalFunc(obj runtime.Object, gv schema.GroupVersion) ([]byte, error) {
mediaType := "application/yaml"
info, ok := runtime.SerializerInfoForMediaType(clientsetscheme.Codecs.SupportedMediaTypes(), mediaType)
if !ok {
return []byte{}, fmt.Errorf("unsupported media type %q", mediaType)
}
encoder := clientsetscheme.Codecs.EncoderForVersion(info.Serializer, gv)
return runtime.Encode(encoder, obj)
}
// DryRunClientOptions specifies options to pass to NewDryRunClientWithOpts in order to get a dryrun clientset
@@ -115,10 +122,10 @@ func NewDryRunClientWithOpts(opts DryRunClientOptions) clientset.Interface {
if opts.PrintGETAndLIST {
// Print the marshalled object format with one tab indentation
objBytes, err := opts.MarshalFunc(obj)
objBytes, err := opts.MarshalFunc(obj, action.GetResource().GroupVersion())
if err == nil {
fmt.Println("[dryrun] Returning faked GET response:")
printBytesWithLinePrefix(opts.Writer, objBytes, "\t")
PrintBytesWithLinePrefix(opts.Writer, objBytes, "\t")
}
}
@@ -140,10 +147,10 @@ func NewDryRunClientWithOpts(opts DryRunClientOptions) clientset.Interface {
if opts.PrintGETAndLIST {
// Print the marshalled object format with one tab indentation
objBytes, err := opts.MarshalFunc(objs)
objBytes, err := opts.MarshalFunc(objs, action.GetResource().GroupVersion())
if err == nil {
fmt.Println("[dryrun] Returning faked LIST response:")
printBytesWithLinePrefix(opts.Writer, objBytes, "\t")
PrintBytesWithLinePrefix(opts.Writer, objBytes, "\t")
}
}
@@ -214,10 +221,10 @@ func logDryRunAction(action core.Action, w io.Writer, marshalFunc MarshalFunc) {
objAction, ok := action.(actionWithObject)
if ok && objAction.GetObject() != nil {
// Print the marshalled object with a tab indentation
objBytes, err := marshalFunc(objAction.GetObject())
objBytes, err := marshalFunc(objAction.GetObject(), action.GetResource().GroupVersion())
if err == nil {
fmt.Println("[dryrun] Attached object:")
printBytesWithLinePrefix(w, objBytes, "\t")
PrintBytesWithLinePrefix(w, objBytes, "\t")
}
}
@@ -228,8 +235,8 @@ func logDryRunAction(action core.Action, w io.Writer, marshalFunc MarshalFunc) {
}
}
// printBytesWithLinePrefix prints objBytes to writer w with linePrefix in the beginning of every line
func printBytesWithLinePrefix(w io.Writer, objBytes []byte, linePrefix string) {
// PrintBytesWithLinePrefix prints objBytes to writer w with linePrefix in the beginning of every line
func PrintBytesWithLinePrefix(w io.Writer, objBytes []byte, linePrefix string) {
scanner := bufio.NewScanner(bytes.NewReader(objBytes))
for scanner.Scan() {
fmt.Fprintf(w, "%s%s\n", linePrefix, scanner.Text())

View File

@@ -60,6 +60,8 @@ func TestLogDryRunAction(t *testing.T) {
},
}),
expectedBytes: []byte(`[dryrun] Would perform action CREATE on resource "services" in API group "core/v1"
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
name: foo

View File

@@ -30,12 +30,40 @@ import (
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
)
// Waiter is an interface for waiting for criterias in Kubernetes to happen
type Waiter interface {
// WaitForAPI waits for the API Server's /healthz endpoint to become "ok"
WaitForAPI() error
// WaitForPodsWithLabel waits for Pods in the kube-system namespace to become Ready
WaitForPodsWithLabel(kvLabel string) error
// WaitForPodToDisappear waits for the given Pod in the kube-system namespace to be deleted
WaitForPodToDisappear(staticPodName string) error
// SetTimeout adjusts the timeout to the specified duration
SetTimeout(timeout time.Duration)
}
// KubeWaiter is an implementation of Waiter that is backed by a Kubernetes client
type KubeWaiter struct {
client clientset.Interface
timeout time.Duration
writer io.Writer
}
// NewKubeWaiter returns a new Waiter object that talks to the given Kubernetes cluster
func NewKubeWaiter(client clientset.Interface, timeout time.Duration, writer io.Writer) Waiter {
return &KubeWaiter{
client: client,
timeout: timeout,
writer: writer,
}
}
// WaitForAPI waits for the API Server's /healthz endpoint to report "ok"
func WaitForAPI(client clientset.Interface, timeout time.Duration) error {
func (w *KubeWaiter) WaitForAPI() error {
start := time.Now()
return wait.PollImmediate(constants.APICallRetryInterval, timeout, func() (bool, error) {
return wait.PollImmediate(constants.APICallRetryInterval, w.timeout, func() (bool, error) {
healthStatus := 0
client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
w.client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
if healthStatus != http.StatusOK {
return false, nil
}
@@ -47,19 +75,19 @@ func WaitForAPI(client clientset.Interface, timeout time.Duration) error {
// WaitForPodsWithLabel will lookup pods with the given label and wait until they are all
// reporting status as running.
func WaitForPodsWithLabel(client clientset.Interface, timeout time.Duration, out io.Writer, labelKeyValPair string) error {
func (w *KubeWaiter) WaitForPodsWithLabel(kvLabel string) error {
lastKnownPodNumber := -1
return wait.PollImmediate(constants.APICallRetryInterval, timeout, func() (bool, error) {
listOpts := metav1.ListOptions{LabelSelector: labelKeyValPair}
pods, err := client.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
return wait.PollImmediate(constants.APICallRetryInterval, w.timeout, func() (bool, error) {
listOpts := metav1.ListOptions{LabelSelector: kvLabel}
pods, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).List(listOpts)
if err != nil {
fmt.Fprintf(out, "[apiclient] Error getting Pods with label selector %q [%v]\n", labelKeyValPair, err)
fmt.Fprintf(w.writer, "[apiclient] Error getting Pods with label selector %q [%v]\n", kvLabel, err)
return false, nil
}
if lastKnownPodNumber != len(pods.Items) {
fmt.Fprintf(out, "[apiclient] Found %d Pods for label selector %s\n", len(pods.Items), labelKeyValPair)
fmt.Fprintf(w.writer, "[apiclient] Found %d Pods for label selector %s\n", len(pods.Items), kvLabel)
lastKnownPodNumber = len(pods.Items)
}
@@ -77,10 +105,10 @@ func WaitForPodsWithLabel(client clientset.Interface, timeout time.Duration, out
})
}
// WaitForStaticPodToDisappear blocks until it timeouts or gets a "NotFound" response from the API Server when getting the Static Pod in question
func WaitForStaticPodToDisappear(client clientset.Interface, timeout time.Duration, podName string) error {
return wait.PollImmediate(constants.APICallRetryInterval, timeout, func() (bool, error) {
_, err := client.CoreV1().Pods(metav1.NamespaceSystem).Get(podName, metav1.GetOptions{})
// WaitForPodToDisappear blocks until it timeouts or gets a "NotFound" response from the API Server when getting the Static Pod in question
func (w *KubeWaiter) WaitForPodToDisappear(podName string) error {
return wait.PollImmediate(constants.APICallRetryInterval, w.timeout, func() (bool, error) {
_, err := w.client.CoreV1().Pods(metav1.NamespaceSystem).Get(podName, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
fmt.Printf("[apiclient] The Static Pod %q is now removed\n", podName)
return true, nil
@@ -89,6 +117,11 @@ func WaitForStaticPodToDisappear(client clientset.Interface, timeout time.Durati
})
}
// SetTimeout adjusts the timeout to the specified duration
func (w *KubeWaiter) SetTimeout(timeout time.Duration) {
w.timeout = timeout
}
// TryRunCommand runs a function a maximum of failureThreshold times, and retries on error. If failureThreshold is hit; the last error is returned
func TryRunCommand(f func() error, failureThreshold uint8) error {
var numFailures uint8

View File

@@ -0,0 +1,26 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["dryrun.go"],
visibility = ["//visibility:public"],
deps = [
"//cmd/kubeadm/app/util/apiclient:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,100 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dryrun
import (
"fmt"
"io"
"io/ioutil"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient"
)
// FileToPrint represents a temporary file on disk that might want to be aliased when printing
// Useful for things like loading a file from /tmp/ but saying to the user "Would write file foo to /etc/kubernetes/..."
type FileToPrint struct {
RealPath string
PrintPath string
}
// NewFileToPrint makes a new instance of FileToPrint with the specified arguments
func NewFileToPrint(realPath, printPath string) FileToPrint {
return FileToPrint{
RealPath: realPath,
PrintPath: printPath,
}
}
// PrintDryRunFiles prints the contents of the FileToPrints given to it to the writer w
func PrintDryRunFiles(files []FileToPrint, w io.Writer) error {
errs := []error{}
for _, file := range files {
if len(file.RealPath) == 0 {
continue
}
fileBytes, err := ioutil.ReadFile(file.RealPath)
if err != nil {
errs = append(errs, err)
continue
}
// Make it possible to fake the path of the file; i.e. you may want to tell the user
// "Here is what would be written to /etc/kubernetes/admin.conf", although you wrote it to /tmp/kubeadm-dryrun/admin.conf and are loading it from there
// Fall back to the "real" path if PrintPath is not set
outputFilePath := file.PrintPath
if len(outputFilePath) == 0 {
outputFilePath = file.RealPath
}
fmt.Fprintf(w, "[dryrun] Would write file %q with content:\n", outputFilePath)
apiclient.PrintBytesWithLinePrefix(w, fileBytes, "\t")
}
return errors.NewAggregate(errs)
}
// Waiter is an implementation of apiclient.Waiter that should be used for dry-running
type Waiter struct{}
// NewWaiter returns a new Waiter object that talks to the given Kubernetes cluster
func NewWaiter() apiclient.Waiter {
return &Waiter{}
}
// WaitForAPI just returns a dummy nil, to indicate that the program should just proceed
func (w *Waiter) WaitForAPI() error {
fmt.Println("[dryrun] Would wait for the API Server's /healthz endpoint to return 'ok'")
return nil
}
// WaitForPodsWithLabel just returns a dummy nil, to indicate that the program should just proceed
func (w *Waiter) WaitForPodsWithLabel(kvLabel string) error {
fmt.Printf("[dryrun] Would wait for the Pods with the label %q in the %s namespace to become Running\n", kvLabel, metav1.NamespaceSystem)
return nil
}
// WaitForPodToDisappear just returns a dummy nil, to indicate that the program should just proceed
func (w *Waiter) WaitForPodToDisappear(podName string) error {
fmt.Printf("[dryrun] Would wait for the %q Pod in the %s namespace to be deleted\n", podName, metav1.NamespaceSystem)
return nil
}
// SetTimeout is a no-op; we don't wait in this implementation
func (w *Waiter) SetTimeout(_ time.Duration) {}