From c7189fd5557907ee5d481b66606672788895d81b Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Fri, 24 Feb 2017 16:42:16 -0800 Subject: [PATCH 1/5] Supports 'ensure exist' class addon in addon-manager --- cluster/addons/addon-manager/kube-addons.sh | 79 +++++++++++---------- 1 file changed, 43 insertions(+), 36 deletions(-) diff --git a/cluster/addons/addon-manager/kube-addons.sh b/cluster/addons/addon-manager/kube-addons.sh index 63db5d5e0aa..008aa766282 100755 --- a/cluster/addons/addon-manager/kube-addons.sh +++ b/cluster/addons/addon-manager/kube-addons.sh @@ -37,6 +37,16 @@ ADDON_PATH=${ADDON_PATH:-/etc/kubernetes/addons} SYSTEM_NAMESPACE=kube-system +# Addons could use this label with two modes: +# - ADDON_MANAGER_LABEL=Reconcile +# - ADDON_MANAGER_LABEL=EnsureExists +ADDON_MANAGER_LABEL="addonmanager.kubernetes.io/mode" +# This label is deprecated (only for Addon Manager). In future release +# addon-manager may not respect it anymore. Addons with +# CLUSTER_SERVICE_LABEL=true and without ADDON_MANAGER_LABEL=EnsureExists +# will be reconciled for now. +CLUSTER_SERVICE_LABEL="kubernetes.io/cluster-service" + # Remember that you can't log from functions that print some output (because # logs are also printed on stdout). # $1 level @@ -70,28 +80,6 @@ function log() { esac } -# $1 command to execute. -# $2 count of tries to execute the command. -# $3 delay in seconds between two consecutive tries -function run_until_success() { - local -r command=$1 - local tries=$2 - local -r delay=$3 - local -r command_name=$1 - while [ ${tries} -gt 0 ]; do - log DBG "executing: '$command'" - # let's give the command as an argument to bash -c, so that we can use - # && and || inside the command itself - /bin/bash -c "${command}" && \ - log DB3 "== Successfully executed ${command_name} at $(date -Is) ==" && \ - return 0 - let tries=tries-1 - log WRN "== Failed to execute ${command_name} at $(date -Is). ${tries} tries remaining. ==" - sleep ${delay} - done - return 1 -} - # $1 filename of addon to start. # $2 count of tries to start the addon. # $3 delay in seconds between two consecutive tries @@ -133,7 +121,7 @@ function annotate_addons() { # Annotate to objects already have this annotation should fail. # Only try once for now. - ${KUBECTL} ${KUBECTL_OPTS} annotate ${obj_type} --namespace=${SYSTEM_NAMESPACE} -l kubernetes.io/cluster-service=true \ + ${KUBECTL} ${KUBECTL_OPTS} annotate ${obj_type} --namespace=${SYSTEM_NAMESPACE} -l ${CLUSTER_SERVICE_LABEL}=true \ kubectl.kubernetes.io/last-applied-configuration='' --overwrite=false if [[ $? -eq 0 ]]; then @@ -144,19 +132,34 @@ function annotate_addons() { } # $1 enable --prune or not. -# $2 additional option for command. -function update_addons() { +function reconcile_addons() { local -r enable_prune=$1; - local -r additional_opt=$2; - run_until_success "${KUBECTL} ${KUBECTL_OPTS} apply --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \ - --prune=${enable_prune} -l kubernetes.io/cluster-service=true --recursive ${additional_opt}" 3 5 + # TODO: Remove the first command in future release. + # Adding this for backward compatibility. Old addons have CLUSTER_SERVICE_LABEL=true and don't have + # ADDON_MANAGER_LABEL=EnsureExists will still be reconciled. + # Filter out `configured` message to not noisily log. + # `created`, `pruned` and errors will be logged. + log INFO "== Reconciling with deprecated label ==" + ${KUBECTL} ${KUBECTL_OPTS} apply --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \ + -l ${CLUSTER_SERVICE_LABEL}=true,${ADDON_MANAGER_LABEL}!=EnsureExists \ + --prune=${enable_prune} --recursive | grep -v configured - if [[ $? -eq 0 ]]; then - log INFO "== Kubernetes addon update completed successfully at $(date -Is) ==" - else - log WRN "== Kubernetes addon update completed with errors at $(date -Is) ==" - fi + log INFO "== Reconciling with addon-manager label ==" + ${KUBECTL} ${KUBECTL_OPTS} apply --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \ + -l ${CLUSTER_SERVICE_LABEL}!=true,${ADDON_MANAGER_LABEL}=Reconcile \ + --prune=${enable_prune} --recursive | grep -v configured + + log INFO "== Kubernetes addon reconcile completed at $(date -Is) ==" +} + +function ensure_addons() { + # Create objects already exist should fail. + # Filter out `AlreadyExists` message to not noisily log. + ${KUBECTL} ${KUBECTL_OPTS} create --namespace=${SYSTEM_NAMESPACE} -f ${ADDON_PATH} \ + -l ${ADDON_MANAGER_LABEL}=EnsureExists --recursive 2>&1 | grep -v AlreadyExists + + log INFO "== Kubernetes addon ensure completed at $(date -Is) ==" } # The business logic for whether a given object should be created @@ -188,9 +191,11 @@ for obj in $(find /etc/kubernetes/admission-controls \( -name \*.yaml -o -name \ log INFO "++ obj ${obj} is created ++" done +# TODO: The annotate and spin up parts should be removed after 1.6 is released. + # Fake the "kubectl.kubernetes.io/last-applied-configuration" annotation on old resources # in order to clean them up by `kubectl apply --prune`. -# RCs have to be annotated for 1.4->1.5 upgrade, because we are migrating from RCs to deployments for all default addons. +# RCs have to be annotated for 1.4->1.5+ upgrade, because we migrated from RCs to deployments for all default addons in 1.5. # Other types resources will also need this fake annotation if their names are changed, # otherwise they would be leaked during upgrade. log INFO "== Annotating the old addon resources at $(date -Is) ==" @@ -202,7 +207,8 @@ annotate_addons Deployment # The new Deployments will not fight for pods created by old RCs with the same label because the additional `pod-template-hash` label. # Apply will fail if some fields are modified but not are allowed, in that case should bump up addon version and name (e.g. handle externally). log INFO "== Executing apply to spin up new addon resources at $(date -Is) ==" -update_addons false +reconcile_addons false +ensure_addons # Wait for new addons to be spinned up before delete old resources log INFO "== Wait for addons to be spinned up at $(date -Is) ==" @@ -215,7 +221,8 @@ log INFO "== Entering periodical apply loop at $(date -Is) ==" while true; do start_sec=$(date +"%s") # Only print stderr for the readability of logging - update_addons true ">/dev/null" + reconcile_addons true + ensure_addons end_sec=$(date +"%s") len_sec=$((${end_sec}-${start_sec})) # subtract the time passed from the sleep time From 72b35ce7d7d14e995d3b67941c849aeb2760bf92 Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Fri, 24 Feb 2017 16:42:41 -0800 Subject: [PATCH 2/5] Updates READMEs regarding the new behavior of addon-manager --- cluster/addons/README.md | 63 +++++++++----------------- cluster/addons/addon-manager/README.md | 34 +++++++++++--- 2 files changed, 49 insertions(+), 48 deletions(-) diff --git a/cluster/addons/README.md b/cluster/addons/README.md index 52590882459..7c305cedcb3 100644 --- a/cluster/addons/README.md +++ b/cluster/addons/README.md @@ -1,53 +1,34 @@ # Cluster add-ons +## Overview + Cluster add-ons are resources like Services and Deployments (with pods) that are shipped with the Kubernetes binaries and are considered an inherent part of the -Kubernetes clusters. The add-ons are visible through the API (they can be listed using -`kubectl`), but direct manipulation of these objects through Apiserver is discouraged -because the system will bring them back to the original state, in particular: -- If an add-on is deleted, it will be recreated automatically. -- If an add-on is updated through Apiserver, it will be reconfigured to the state given by -the supplied fields in the initial config. +Kubernetes clusters. -On the cluster, the add-ons are kept in `/etc/kubernetes/addons` on the master node, in -yaml / json files. The addon manager periodically `kubectl apply`s the contents of this -directory. Any legit modification would be reflected on the API objects accordingly. -Particularly, rolling-update for deployments is now supported. +There are currently two classes of add-ons: +- Add-ons that will be reconciled. +- Add-ons that will be created if they don't exist. -Each add-on must specify the following label: `kubernetes.io/cluster-service: true`. -Config files that do not define this label will be ignored. For those resources -exist in `kube-system` namespace but not in `/etc/kubernetes/addons`, addon manager -will attempt to remove them if they are attached with this label. Currently the other -usage of `kubernetes.io/cluster-service` is for `kubectl cluster-info` command to recognize -these cluster services. +More details could be found in [addon-manager/README.md](addon-manager/README.md). -The suggested naming for most types of resources is just `` (with no version -number) because we do not expect the resource name to change. But resources like `Pod` -, `ReplicationController` and `DaemonSet` are exceptional. As `Pod` updates may not change -fields other than `containers[*].image` or `spec.activeDeadlineSeconds` and may not add or -remove containers, it may not be sufficient during a major update. For `ReplicationController`, -most of the modifications would be legit, but the underlying pods would not got re-created -automatically. `DaemonSet` has similar problem as the `ReplicationController`. In these -cases, the suggested naming is `-`. When version changes, the system will -delete the old one and create the new one (order not guaranteed). +## Cooperating Horizontal / Vertical Auto-Scaling with "reconcile class addons" -# Add-on update procedure +"Reconcile" class addons will be periodically reconciled to the original state given +by the initial config. In order to make Horizontal / Vertical Auto-scaling functional, +the related fields in config should be left unset. More specifically, leave `replicas` +in `ReplicationController` / `Deployment` / `ReplicaSet` unset for Horizontal Scaling, +leave `resources` for container unset for Vertical Scaling. The periodic reconcile +won't clobbered these fields, hence they could be managed by Horizontal / Vertical +Auto-scaler. -To update add-ons, just update the contents of `/etc/kubernetes/addons` -directory with the desired definition of add-ons. Then the system will take care -of: +## Add-on naming -- Removing objects from the API server whose manifest was removed. -- Creating objects from new manifests -- Updating objects whose fields are legally changed. - -# Cooperating with Horizontal / Vertical Auto-Scaling - -As all cluster add-ons will be reconciled to the original state given by the initial config. -In order to make Horizontal / Vertical Auto-scaling functional, the related fields in config should -be left unset. More specifically, leave `replicas` in `ReplicationController` / `Deployment` -/ `ReplicaSet` unset for Horizontal Scaling, and leave `resources` for container unset for Vertical -Scaling. The periodical update won't include these specs, which will be managed by Horizontal / Vertical - Auto-scaler. +The suggested naming for most of the resources is `` (with no version number). +Though resources like `Pod`, `ReplicationController` and `DaemonSet` are exceptional. +It would be hard to update `Pod` because many fields in `Pod` are immutable. For +`ReplicationController` and `DaemonSet`, in-place update may not trigger the underlying +pods to be re-created. You probably need to change their names during update to trigger +a complete deletion and creation. [![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/cluster/addons/README.md?pixel)]() diff --git a/cluster/addons/addon-manager/README.md b/cluster/addons/addon-manager/README.md index e2fb4413b77..b9da7039ba4 100644 --- a/cluster/addons/addon-manager/README.md +++ b/cluster/addons/addon-manager/README.md @@ -1,15 +1,35 @@ -### addon-manager +### Addon-manager -The `addon-manager` periodically `kubectl apply`s the Kubernetes manifest in the `/etc/kubernetes/addons` directory, -and handles any added / updated / deleted addon. +addon-manager manages two classes of addons with given template files. +- Addons with label `addonmanager.kubernetes.io/mode=Reconcile` will be periodically +reconciled. Direct manipulation to these addons through apiserver is discouraged because +addon-manager will bring them back to the original state. In particular: + - Addon will be re-created if it is deleted. + - Addon will be reconfigured to the state given by the supplied fields in the template + file periodically. + - Addon will be deleted when its manifest file is deleted. +- Addons with label `addonmanager.kubernetes.io/mode=EnsureExists` will be checked for +existence only. Users can edit these addons as they want. In particular: + - Addon will only be created/re-created with the given template file when there is no + instance of the resource with that name. + - Addon will not be deleted when the manifest file is deleted. -It supports all types of resource. -The requirement is to label them with `kubernetes.io/cluster-service: "true"`. - -The `addon-manager` is built for multiple architectures. +Notes: +- Label `kubernetes.io/cluster-service=true` is deprecated (only for Addon Manager). +In future release (after one year), Addon Manager may not respect it anymore. Addons +have this label but without `addonmanager.kubernetes.io/mode=EnsureExists` will be +treated as "reconcile class addons" for now. +- Resources under $ADDON_PATH (default `/etc/kubernetes/addons/`) needs to have either one +of these two labels. Meanwhile namespaced resources need to be in `kube-system` namespace. +Otherwise it will be omitted. +- The above label and namespace rule does not stand for `/opt/namespace.yaml` and +resources under `/etc/kubernetes/admission-controls/`. addon-manager will attempt to +create them regardless during startup. #### How to release +The `addon-manager` is built for multiple architectures. + 1. Change something in the source 2. Bump `VERSION` in the `Makefile` 3. Bump `KUBECTL_VERSION` in the `Makefile` if required From 7eb9b81d67076b5f8855560bc081d1d36ba56bfb Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Fri, 24 Feb 2017 16:44:21 -0800 Subject: [PATCH 3/5] Updates test/e2e/addon_update.go to match addon-manager's new behavior --- test/e2e/addon_update.go | 254 +++++++++++++++++++------------------ test/e2e/framework/util.go | 58 +++++++-- 2 files changed, 179 insertions(+), 133 deletions(-) diff --git a/test/e2e/addon_update.go b/test/e2e/addon_update.go index 666a2217607..74cd41ad58b 100644 --- a/test/e2e/addon_update.go +++ b/test/e2e/addon_update.go @@ -26,6 +26,7 @@ import ( "golang.org/x/crypto/ssh" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" "k8s.io/kubernetes/test/e2e/framework" @@ -35,126 +36,156 @@ import ( // TODO: it would probably be slightly better to build up the objects // in the code and then serialize to yaml. -var addon_controller_v1 = ` +var reconcile_addon_controller = ` apiVersion: v1 kind: ReplicationController metadata: - name: addon-test-v1 + name: addon-reconcile-test namespace: %s labels: - k8s-app: addon-test - version: v1 + k8s-app: addon-reconcile-test kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: replicas: 2 selector: - k8s-app: addon-test - version: v1 + k8s-app: addon-reconcile-test template: metadata: labels: - k8s-app: addon-test - version: v1 - kubernetes.io/cluster-service: "true" + k8s-app: addon-reconcile-test spec: containers: - image: gcr.io/google_containers/serve_hostname:v1.4 - name: addon-test + name: addon-reconcile-test ports: - containerPort: 9376 protocol: TCP ` -var addon_controller_v2 = ` +// Should update "reconcile" class addon. +var reconcile_addon_controller_updated = ` apiVersion: v1 kind: ReplicationController metadata: - name: addon-test-v2 + name: addon-reconcile-test namespace: %s labels: - k8s-app: addon-test - version: v2 + k8s-app: addon-reconcile-test kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile + newLabel: addon-reconcile-test spec: replicas: 2 selector: - k8s-app: addon-test - version: v2 + k8s-app: addon-reconcile-test template: metadata: labels: - k8s-app: addon-test - version: v2 - kubernetes.io/cluster-service: "true" + k8s-app: addon-reconcile-test spec: containers: - image: gcr.io/google_containers/serve_hostname:v1.4 - name: addon-test + name: addon-reconcile-test ports: - containerPort: 9376 protocol: TCP ` -var addon_service_v1 = ` +var ensure_exists_addon_service = ` apiVersion: v1 kind: Service metadata: - name: addon-test + name: addon-ensure-exists-test namespace: %s labels: - k8s-app: addon-test - kubernetes.io/cluster-service: "true" - kubernetes.io/name: addon-test + k8s-app: addon-ensure-exists-test + addonmanager.kubernetes.io/mode: EnsureExists spec: ports: - port: 9376 protocol: TCP targetPort: 9376 selector: - k8s-app: addon-test + k8s-app: addon-ensure-exists-test ` -var addon_service_v2 = ` +// Should create but don't update "ensure exist" class addon. +var ensure_exists_addon_service_updated = ` apiVersion: v1 kind: Service metadata: - name: addon-test-updated + name: addon-ensure-exists-test namespace: %s labels: - k8s-app: addon-test - kubernetes.io/cluster-service: "true" - kubernetes.io/name: addon-test - newLabel: newValue + k8s-app: addon-ensure-exists-test + addonmanager.kubernetes.io/mode: EnsureExists + newLabel: addon-ensure-exists-test spec: ports: - port: 9376 protocol: TCP targetPort: 9376 selector: - k8s-app: addon-test + k8s-app: addon-ensure-exists-test ` -// Wrong label case -var invalid_addon_controller_v1 = ` +var deprecated_label_addon_service = ` +apiVersion: v1 +kind: Service +metadata: + name: addon-deprecated-label-test + namespace: %s + labels: + k8s-app: addon-deprecated-label-test + kubernetes.io/cluster-service: "true" +spec: + ports: + - port: 9376 + protocol: TCP + targetPort: 9376 + selector: + k8s-app: addon-deprecated-label-test +` + +// Should update addon with label "kubernetes.io/cluster-service=true". +var deprecated_label_addon_service_updated = ` +apiVersion: v1 +kind: Service +metadata: + name: addon-deprecated-label-test + namespace: %s + labels: + k8s-app: addon-deprecated-label-test + kubernetes.io/cluster-service: "true" + newLabel: addon-deprecated-label-test +spec: + ports: + - port: 9376 + protocol: TCP + targetPort: 9376 + selector: + k8s-app: addon-deprecated-label-test +` + +// Should not create addon without valid label. +var invalid_addon_controller = ` apiVersion: v1 kind: ReplicationController metadata: - name: invalid-addon-test-v1 + name: invalid-addon-test namespace: %s labels: k8s-app: invalid-addon-test - version: v1 + addonmanager.kubernetes.io/mode: NotMatch spec: replicas: 2 selector: k8s-app: invalid-addon-test - version: v1 template: metadata: labels: k8s-app: invalid-addon-test - version: v1 - kubernetes.io/cluster-service: "true" spec: containers: - image: gcr.io/google_containers/serve_hostname:v1.4 @@ -164,49 +195,10 @@ spec: protocol: TCP ` -// Wrong label case -var invalid_addon_service_v1 = ` -apiVersion: v1 -kind: Service -metadata: - name: ivalid-addon-test - namespace: %s - labels: - k8s-app: invalid-addon-test - kubernetes.io/name: invalid-addon-test -spec: - ports: - - port: 9377 - protocol: TCP - targetPort: 9376 - selector: - k8s-app: invalid-addon-test -` - -// Wrong namespace case -var invalid_addon_service_v2 = ` -apiVersion: v1 -kind: Service -metadata: - name: ivalid-addon-test-v2 - namespace: %s - labels: - k8s-app: invalid-addon-test-v2 - kubernetes.io/cluster-service: "true" -spec: - ports: - - port: 9377 - protocol: TCP - targetPort: 9376 - selector: - k8s-app: invalid-addon-test -` - const ( addonTestPollInterval = 3 * time.Second addonTestPollTimeout = 5 * time.Minute - defaultNsName = metav1.NamespaceDefault - addonNsName = "kube-system" + addonNsName = metav1.NamespaceSystem ) type stringPair struct { @@ -257,23 +249,23 @@ var _ = framework.KubeDescribe("Addon update", func() { defer sshExec(sshClient, fmt.Sprintf("rm -rf %s", temporaryRemotePathPrefix)) // ignore the result in cleanup sshExecAndVerify(sshClient, fmt.Sprintf("mkdir -p %s", temporaryRemotePath)) - rcv1 := "addon-controller-v1.yaml" - rcv2 := "addon-controller-v2.yaml" - rcInvalid := "invalid-addon-controller-v1.yaml" + rcAddonReconcile := "addon-reconcile-controller.yaml" + rcAddonReconcileUpdated := "addon-reconcile-controller-Updated.yaml" + rcInvalid := "invalid-addon-controller.yaml" - svcv1 := "addon-service-v1.yaml" - svcv2 := "addon-service-v2.yaml" - svcInvalid := "invalid-addon-service-v1.yaml" - svcInvalidv2 := "invalid-addon-service-v2.yaml" + svcAddonDeprecatedLabel := "addon-deprecated-label-service.yaml" + svcAddonDeprecatedLabelUpdated := "addon-deprecated-label-service-updated.yaml" + svcAddonEnsureExists := "addon-ensure-exists-service.yaml" + svcAddonEnsureExistsUpdated := "addon-ensure-exists-service-updated.yaml" var remoteFiles []stringPair = []stringPair{ - {fmt.Sprintf(addon_controller_v1, addonNsName), rcv1}, - {fmt.Sprintf(addon_controller_v2, addonNsName), rcv2}, - {fmt.Sprintf(addon_service_v1, addonNsName), svcv1}, - {fmt.Sprintf(addon_service_v2, addonNsName), svcv2}, - {fmt.Sprintf(invalid_addon_controller_v1, addonNsName), rcInvalid}, - {fmt.Sprintf(invalid_addon_service_v1, addonNsName), svcInvalid}, - {fmt.Sprintf(invalid_addon_service_v2, defaultNsName), svcInvalidv2}, + {fmt.Sprintf(reconcile_addon_controller, addonNsName), rcAddonReconcile}, + {fmt.Sprintf(reconcile_addon_controller_updated, addonNsName), rcAddonReconcileUpdated}, + {fmt.Sprintf(deprecated_label_addon_service, addonNsName), svcAddonDeprecatedLabel}, + {fmt.Sprintf(deprecated_label_addon_service_updated, addonNsName), svcAddonDeprecatedLabelUpdated}, + {fmt.Sprintf(ensure_exists_addon_service, addonNsName), svcAddonEnsureExists}, + {fmt.Sprintf(ensure_exists_addon_service_updated, addonNsName), svcAddonEnsureExistsUpdated}, + {fmt.Sprintf(invalid_addon_controller, addonNsName), rcInvalid}, } for _, p := range remoteFiles { @@ -292,51 +284,54 @@ var _ = framework.KubeDescribe("Addon update", func() { defer sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) // ignore result in cleanup sshExecAndVerify(sshClient, fmt.Sprintf("sudo mkdir -p %s", destinationDir)) - By("copy invalid manifests to the destination dir (without kubernetes.io/cluster-service label)") + By("copy invalid manifests to the destination dir") sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcInvalid, destinationDir, rcInvalid)) - sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcInvalid, destinationDir, svcInvalid)) // we will verify at the end of the test that the objects weren't created from the invalid manifests By("copy new manifests") - sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv1, destinationDir, rcv1)) - sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv1, destinationDir, svcv1)) + sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcAddonReconcile, destinationDir, rcAddonReconcile)) + sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonDeprecatedLabel, destinationDir, svcAddonDeprecatedLabel)) + sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExists, destinationDir, svcAddonEnsureExists)) + // Delete the "ensure exist class" addon at the end. + defer func() { + framework.Logf("Cleaning up ensure exist class addon.") + Expect(f.ClientSet.Core().Services(addonNsName).Delete("addon-ensure-exists-test", nil)).NotTo(HaveOccurred()) + }() - waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-test", true) - waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v1", true) + waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", true) + waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-deprecated-label-test", true) + waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true) + // Replace the manifests with new contents. By("update manifests") - sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv2, destinationDir, rcv2)) - sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv2, destinationDir, svcv2)) - sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv1)) - sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv1)) - /** - * Note that we have a small race condition here - the kube-addon-updater - * May notice that a new rc/service file appeared, while the old one will still be there. - * But it is ok - as long as we don't have rolling update, the result will be the same - */ + sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcAddonReconcileUpdated, destinationDir, rcAddonReconcile)) + sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonDeprecatedLabelUpdated, destinationDir, svcAddonDeprecatedLabel)) + sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcAddonEnsureExistsUpdated, destinationDir, svcAddonEnsureExists)) - waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-test-updated", true) - waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v2", true) - - waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-test", false) - waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v1", false) + // Wait for updated addons to have the new added label. + reconcileSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-reconcile-test"})) + waitForReplicationControllerwithSelectorInAddonTest(f.ClientSet, addonNsName, true, reconcileSelector) + deprecatedLabelSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-deprecated-label-test"})) + waitForServicewithSelectorInAddonTest(f.ClientSet, addonNsName, true, deprecatedLabelSelector) + // "Ensure exist class" addon should not be updated. + ensureExistSelector := labels.SelectorFromSet(labels.Set(map[string]string{"newLabel": "addon-ensure-exists-test"})) + waitForServicewithSelectorInAddonTest(f.ClientSet, addonNsName, false, ensureExistSelector) By("remove manifests") - sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2)) - sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2)) + sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcAddonReconcile)) + sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonDeprecatedLabel)) + sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcAddonEnsureExists)) - waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-test-updated", false) - waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v2", false) + waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-reconcile-test", false) + waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-deprecated-label-test", false) + // "Ensure exist class" addon will not be deleted when manifest is removed. + waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-ensure-exists-test", true) - By("verify invalid API addons weren't created") - _, err = f.ClientSet.Core().ReplicationControllers(addonNsName).Get("invalid-addon-test-v1", metav1.GetOptions{}) - Expect(err).To(HaveOccurred()) - _, err = f.ClientSet.Core().Services(addonNsName).Get("ivalid-addon-test", metav1.GetOptions{}) - Expect(err).To(HaveOccurred()) - _, err = f.ClientSet.Core().Services(defaultNsName).Get("ivalid-addon-test-v2", metav1.GetOptions{}) + By("verify invalid addons weren't created") + _, err = f.ClientSet.Core().ReplicationControllers(addonNsName).Get("invalid-addon-test", metav1.GetOptions{}) Expect(err).To(HaveOccurred()) - // invalid addons will be deleted by the deferred function + // Invalid addon manifests and the "ensure exist class" addon will be deleted by the deferred function. }) }) @@ -348,6 +343,15 @@ func waitForReplicationControllerInAddonTest(c clientset.Interface, addonNamespa framework.ExpectNoError(framework.WaitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout)) } +func waitForServicewithSelectorInAddonTest(c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) { + framework.ExpectNoError(framework.WaitForServiceWithSelector(c, addonNamespace, selector, exist, addonTestPollInterval, addonTestPollTimeout)) +} + +func waitForReplicationControllerwithSelectorInAddonTest(c clientset.Interface, addonNamespace string, exist bool, selector labels.Selector) { + framework.ExpectNoError(framework.WaitForReplicationControllerwithSelector(c, addonNamespace, selector, exist, addonTestPollInterval, + addonTestPollTimeout)) +} + // TODO use the framework.SSH code, either adding an SCP to it or copying files // differently. func getMasterSSHClient() (*ssh.Client, error) { diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go index 42384bee790..884c18647dd 100644 --- a/test/e2e/framework/util.go +++ b/test/e2e/framework/util.go @@ -1456,17 +1456,11 @@ func WaitForService(c clientset.Interface, namespace, name string, exist bool, i _, err := c.Core().Services(namespace).Get(name, metav1.GetOptions{}) switch { case err == nil: - if !exist { - return false, nil - } Logf("Service %s in namespace %s found.", name, namespace) - return true, nil + return exist, nil case apierrs.IsNotFound(err): - if exist { - return false, nil - } Logf("Service %s in namespace %s disappeared.", name, namespace) - return true, nil + return !exist, nil default: Logf("Get service %s in namespace %s failed: %v", name, namespace, err) return false, nil @@ -1479,6 +1473,30 @@ func WaitForService(c clientset.Interface, namespace, name string, exist bool, i return nil } +// WaitForServiceWithSelector waits until any service with given selector appears (exist == true), or disappears (exist == false) +func WaitForServiceWithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, + timeout time.Duration) error { + err := wait.PollImmediate(interval, timeout, func() (bool, error) { + services, err := c.Core().Services(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + switch { + case len(services.Items) != 0: + Logf("Service with %s in namespace %s found.", selector.String(), namespace) + return exist, nil + case len(services.Items) == 0: + Logf("Service with %s in namespace %s disappeared.", selector.String(), namespace) + return !exist, nil + default: + Logf("List service with %s in namespace %s failed: %v", selector.String(), namespace, err) + return false, nil + } + }) + if err != nil { + stateMsg := map[bool]string{true: "to appear", false: "to disappear"} + return fmt.Errorf("error waiting for service with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err) + } + return nil +} + //WaitForServiceEndpointsNum waits until the amount of endpoints that implement service to expectNum. func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error { return wait.Poll(interval, timeout, func() (bool, error) { @@ -1524,6 +1542,30 @@ func WaitForReplicationController(c clientset.Interface, namespace, name string, return nil } +// WaitForReplicationControllerwithSelector waits until any RC with given selector appears (exist == true), or disappears (exist == false) +func WaitForReplicationControllerwithSelector(c clientset.Interface, namespace string, selector labels.Selector, exist bool, interval, + timeout time.Duration) error { + err := wait.PollImmediate(interval, timeout, func() (bool, error) { + rcs, err := c.Core().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: selector.String()}) + switch { + case len(rcs.Items) != 0: + Logf("ReplicationController with %s in namespace %s found.", selector.String(), namespace) + return exist, nil + case len(rcs.Items) == 0: + Logf("ReplicationController with %s in namespace %s disappeared.", selector.String(), namespace) + return !exist, nil + default: + Logf("List ReplicationController with %s in namespace %s failed: %v", selector.String(), namespace, err) + return false, nil + } + }) + if err != nil { + stateMsg := map[bool]string{true: "to appear", false: "to disappear"} + return fmt.Errorf("error waiting for ReplicationControllers with %s in namespace %s %s: %v", selector.String(), namespace, stateMsg[exist], err) + } + return nil +} + func WaitForEndpoint(c clientset.Interface, ns, name string) error { for t := time.Now(); time.Since(t) < EndpointRegisterTimeout; time.Sleep(Poll) { endpoint, err := c.Core().Endpoints(ns).Get(name, metav1.GetOptions{}) From 64ba52ae71d4363a45c2cf8e44f3ca9e85a6b345 Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Fri, 24 Feb 2017 16:47:29 -0800 Subject: [PATCH 4/5] Bumps addon-manager to v6.4-alpha.3 and updates template files --- cluster/addons/addon-manager/CHANGELOG.md | 3 +++ cluster/addons/addon-manager/Makefile | 5 ++++- .../hyperkube/static-pods/addon-manager-multinode.json | 2 +- .../hyperkube/static-pods/addon-manager-singlenode.json | 2 +- cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml | 2 +- test/kubemark/resources/manifests/kube-addon-manager.yaml | 2 +- 6 files changed, 11 insertions(+), 5 deletions(-) diff --git a/cluster/addons/addon-manager/CHANGELOG.md b/cluster/addons/addon-manager/CHANGELOG.md index 2068f730cb5..762527ff7f5 100644 --- a/cluster/addons/addon-manager/CHANGELOG.md +++ b/cluster/addons/addon-manager/CHANGELOG.md @@ -1,3 +1,6 @@ +### Version 6.4-alpha.3 (Fri February 24 2017 Zihong Zheng ) + - Support 'ensure exist' class addon and use addon-manager specific label. + ### Version 6.4-alpha.2 (Wed February 16 2017 Zihong Zheng ) - Update kubectl to v1.6.0-alpha.2 to use HPA in autoscaling/v1 instead of extensions/v1beta1. diff --git a/cluster/addons/addon-manager/Makefile b/cluster/addons/addon-manager/Makefile index ad94806ca78..dcb1b46f8b6 100644 --- a/cluster/addons/addon-manager/Makefile +++ b/cluster/addons/addon-manager/Makefile @@ -15,7 +15,10 @@ IMAGE=gcr.io/google-containers/kube-addon-manager ARCH?=amd64 TEMP_DIR:=$(shell mktemp -d) -VERSION=v6.4-alpha.2 +VERSION=v6.4-alpha.3 +# TODO: Current Addon Manager is built with kubectl on head +# (GitCommit:"17375fc59fff39135af63bd1750bb07c36ef873b"). +# Should use next released kubectl once available. KUBECTL_VERSION?=v1.6.0-alpha.2 ifeq ($(ARCH),amd64) diff --git a/cluster/images/hyperkube/static-pods/addon-manager-multinode.json b/cluster/images/hyperkube/static-pods/addon-manager-multinode.json index bd1b9dda206..b67ec04f1a4 100644 --- a/cluster/images/hyperkube/static-pods/addon-manager-multinode.json +++ b/cluster/images/hyperkube/static-pods/addon-manager-multinode.json @@ -11,7 +11,7 @@ "containers": [ { "name": "kube-addon-manager", - "image": "REGISTRY/kube-addon-manager-ARCH:v6.4-alpha.2", + "image": "REGISTRY/kube-addon-manager-ARCH:v6.4-alpha.3", "resources": { "requests": { "cpu": "5m", diff --git a/cluster/images/hyperkube/static-pods/addon-manager-singlenode.json b/cluster/images/hyperkube/static-pods/addon-manager-singlenode.json index a3eeffc0218..07118106f88 100644 --- a/cluster/images/hyperkube/static-pods/addon-manager-singlenode.json +++ b/cluster/images/hyperkube/static-pods/addon-manager-singlenode.json @@ -11,7 +11,7 @@ "containers": [ { "name": "kube-addon-manager", - "image": "REGISTRY/kube-addon-manager-ARCH:v6.4-alpha.2", + "image": "REGISTRY/kube-addon-manager-ARCH:v6.4-alpha.3", "resources": { "requests": { "cpu": "5m", diff --git a/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml b/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml index 2710fbde44e..0ae35c95f1f 100644 --- a/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml +++ b/cluster/saltbase/salt/kube-addons/kube-addon-manager.yaml @@ -13,7 +13,7 @@ spec: # - cluster/images/hyperkube/static-pods/addon-manager-singlenode.json # - cluster/images/hyperkube/static-pods/addon-manager-multinode.json # - test/kubemark/resources/manifests/kube-addon-manager.yaml - image: gcr.io/google-containers/kube-addon-manager:v6.4-alpha.2 + image: gcr.io/google-containers/kube-addon-manager:v6.4-alpha.3 command: - /bin/bash - -c diff --git a/test/kubemark/resources/manifests/kube-addon-manager.yaml b/test/kubemark/resources/manifests/kube-addon-manager.yaml index 764b75796d9..7e9e456781f 100644 --- a/test/kubemark/resources/manifests/kube-addon-manager.yaml +++ b/test/kubemark/resources/manifests/kube-addon-manager.yaml @@ -9,7 +9,7 @@ spec: hostNetwork: true containers: - name: kube-addon-manager - image: {{kube_docker_registry}}/kube-addon-manager:v6.4-alpha.2 + image: {{kube_docker_registry}}/kube-addon-manager:v6.4-alpha.3 command: - /bin/bash - -c From d4fa046d567698e01295fbf0a43d8eb7a453702b Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Fri, 24 Feb 2017 16:53:12 -0800 Subject: [PATCH 5/5] Adds the new addon-manager labels on cluster addon templates --- .../addons/calico-policy-controller/calico-etcd-service.yaml | 1 + .../calico-policy-controller/calico-etcd-statefulset.yaml | 1 + .../calico-policy-controller/calico-policy-controller.yaml | 1 + .../cluster-loadbalancing/glbc/default-svc-controller.yaml | 1 + cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml | 1 + .../addons/cluster-monitoring/google/heapster-controller.yaml | 1 + cluster/addons/cluster-monitoring/google/heapster-service.yaml | 1 + .../googleinfluxdb/heapster-controller-combined.yaml | 1 + .../addons/cluster-monitoring/influxdb/grafana-service.yaml | 1 + .../cluster-monitoring/influxdb/heapster-controller.yaml | 1 + .../addons/cluster-monitoring/influxdb/heapster-service.yaml | 1 + .../influxdb/influxdb-grafana-controller.yaml | 1 + .../addons/cluster-monitoring/influxdb/influxdb-service.yaml | 1 + .../cluster-monitoring/standalone/heapster-controller.yaml | 1 + .../addons/cluster-monitoring/standalone/heapster-service.yaml | 1 + cluster/addons/dashboard/dashboard-controller.yaml | 1 + cluster/addons/dashboard/dashboard-service.yaml | 1 + .../dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml | 1 + cluster/addons/dns/kubedns-controller.yaml.base | 1 + cluster/addons/dns/kubedns-controller.yaml.in | 1 + cluster/addons/dns/kubedns-controller.yaml.sed | 1 + cluster/addons/dns/kubedns-sa.yaml | 1 + cluster/addons/dns/kubedns-svc.yaml.base | 1 + cluster/addons/dns/kubedns-svc.yaml.in | 1 + cluster/addons/dns/kubedns-svc.yaml.sed | 1 + cluster/addons/e2e-rbac-bindings/kubelet-binding.yaml | 1 + cluster/addons/e2e-rbac-bindings/random-addon-grabbag.yaml | 1 + cluster/addons/fluentd-elasticsearch/es-controller.yaml | 1 + cluster/addons/fluentd-elasticsearch/es-service.yaml | 1 + cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml | 1 + cluster/addons/fluentd-elasticsearch/kibana-controller.yaml | 1 + cluster/addons/fluentd-elasticsearch/kibana-service.yaml | 1 + cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml | 1 + cluster/addons/node-problem-detector/npd.yaml | 3 +++ .../addons/node-problem-detector/standalone/npd-binding.yaml | 1 + cluster/addons/rbac/apiserver-node-proxy-binding.yaml | 1 + cluster/addons/rbac/node-proxy-role.yaml | 1 + cluster/addons/registry/registry-pv.yaml.in | 1 + cluster/addons/registry/registry-pvc.yaml.in | 1 + cluster/addons/registry/registry-rc.yaml | 1 + cluster/addons/registry/registry-svc.yaml | 1 + cluster/addons/storage-class/aws/default.yaml | 1 + cluster/addons/storage-class/azure/default.yaml | 1 + cluster/addons/storage-class/gce/default.yaml | 1 + cluster/addons/storage-class/openstack/default.yaml | 1 + 45 files changed, 47 insertions(+) diff --git a/cluster/addons/calico-policy-controller/calico-etcd-service.yaml b/cluster/addons/calico-policy-controller/calico-etcd-service.yaml index 7b9e6e18ba0..ad9a95e4690 100644 --- a/cluster/addons/calico-policy-controller/calico-etcd-service.yaml +++ b/cluster/addons/calico-policy-controller/calico-etcd-service.yaml @@ -4,6 +4,7 @@ metadata: labels: k8s-app: calico-etcd kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile name: calico-etcd namespace: kube-system spec: diff --git a/cluster/addons/calico-policy-controller/calico-etcd-statefulset.yaml b/cluster/addons/calico-policy-controller/calico-etcd-statefulset.yaml index 77fd12e522e..61e996ce195 100644 --- a/cluster/addons/calico-policy-controller/calico-etcd-statefulset.yaml +++ b/cluster/addons/calico-policy-controller/calico-etcd-statefulset.yaml @@ -5,6 +5,7 @@ metadata: namespace: kube-system labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile k8s-app: calico-etcd spec: serviceName: calico-etcd diff --git a/cluster/addons/calico-policy-controller/calico-policy-controller.yaml b/cluster/addons/calico-policy-controller/calico-policy-controller.yaml index 68d93f87e64..899cadeabf1 100644 --- a/cluster/addons/calico-policy-controller/calico-policy-controller.yaml +++ b/cluster/addons/calico-policy-controller/calico-policy-controller.yaml @@ -6,6 +6,7 @@ metadata: labels: k8s-app: calico-policy kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: replicas: 1 selector: diff --git a/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml b/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml index 7bea6cf1f7f..f20af59d623 100644 --- a/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml +++ b/cluster/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml @@ -7,6 +7,7 @@ metadata: k8s-app: glbc kubernetes.io/name: "GLBC" kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: replicas: 1 selector: diff --git a/cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml b/cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml index 1bd59320db3..84b8881dfaa 100644 --- a/cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml +++ b/cluster/addons/cluster-loadbalancing/glbc/default-svc.yaml @@ -8,6 +8,7 @@ metadata: labels: k8s-app: glbc kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "GLBCDefaultBackend" spec: # The default backend must be of type NodePort. diff --git a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml index 0a1b02f6fbb..f1e15e3df67 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-controller.yaml @@ -19,6 +19,7 @@ metadata: labels: k8s-app: heapster kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile version: v1.3.0-beta.0 spec: replicas: 1 diff --git a/cluster/addons/cluster-monitoring/google/heapster-service.yaml b/cluster/addons/cluster-monitoring/google/heapster-service.yaml index 31e8b96006d..aab19cbb064 100644 --- a/cluster/addons/cluster-monitoring/google/heapster-service.yaml +++ b/cluster/addons/cluster-monitoring/google/heapster-service.yaml @@ -5,6 +5,7 @@ metadata: namespace: kube-system labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "Heapster" spec: ports: diff --git a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml index cfff801866a..ee71914ef68 100644 --- a/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml +++ b/cluster/addons/cluster-monitoring/googleinfluxdb/heapster-controller-combined.yaml @@ -19,6 +19,7 @@ metadata: labels: k8s-app: heapster kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile version: v1.3.0-beta.0 spec: replicas: 1 diff --git a/cluster/addons/cluster-monitoring/influxdb/grafana-service.yaml b/cluster/addons/cluster-monitoring/influxdb/grafana-service.yaml index 9140e8b0c2c..50e6997b2f7 100644 --- a/cluster/addons/cluster-monitoring/influxdb/grafana-service.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/grafana-service.yaml @@ -5,6 +5,7 @@ metadata: namespace: kube-system labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "Grafana" spec: # On production clusters, consider setting up auth for grafana, and diff --git a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml index 49c34b72226..f7c15d7d141 100644 --- a/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/heapster-controller.yaml @@ -19,6 +19,7 @@ metadata: labels: k8s-app: heapster kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile version: v1.3.0-beta.0 spec: replicas: 1 diff --git a/cluster/addons/cluster-monitoring/influxdb/heapster-service.yaml b/cluster/addons/cluster-monitoring/influxdb/heapster-service.yaml index e406d69c44c..ed8ac37499e 100644 --- a/cluster/addons/cluster-monitoring/influxdb/heapster-service.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/heapster-service.yaml @@ -5,6 +5,7 @@ metadata: namespace: kube-system labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "Heapster" spec: ports: diff --git a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml index 680378e7591..d7ee7496784 100644 --- a/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/influxdb-grafana-controller.yaml @@ -7,6 +7,7 @@ metadata: k8s-app: influxGrafana version: v4 kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: replicas: 1 selector: diff --git a/cluster/addons/cluster-monitoring/influxdb/influxdb-service.yaml b/cluster/addons/cluster-monitoring/influxdb/influxdb-service.yaml index 066e052476e..8e9a1044277 100644 --- a/cluster/addons/cluster-monitoring/influxdb/influxdb-service.yaml +++ b/cluster/addons/cluster-monitoring/influxdb/influxdb-service.yaml @@ -5,6 +5,7 @@ metadata: namespace: kube-system labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "InfluxDB" spec: ports: diff --git a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml index fb07acd2da4..0d63527ae5d 100644 --- a/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml +++ b/cluster/addons/cluster-monitoring/standalone/heapster-controller.yaml @@ -17,6 +17,7 @@ metadata: labels: k8s-app: heapster kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile version: v1.3.0-beta.0 spec: replicas: 1 diff --git a/cluster/addons/cluster-monitoring/standalone/heapster-service.yaml b/cluster/addons/cluster-monitoring/standalone/heapster-service.yaml index 31e8b96006d..aab19cbb064 100644 --- a/cluster/addons/cluster-monitoring/standalone/heapster-service.yaml +++ b/cluster/addons/cluster-monitoring/standalone/heapster-service.yaml @@ -5,6 +5,7 @@ metadata: namespace: kube-system labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "Heapster" spec: ports: diff --git a/cluster/addons/dashboard/dashboard-controller.yaml b/cluster/addons/dashboard/dashboard-controller.yaml index f74fddb85a5..bb21ffe9ce9 100644 --- a/cluster/addons/dashboard/dashboard-controller.yaml +++ b/cluster/addons/dashboard/dashboard-controller.yaml @@ -6,6 +6,7 @@ metadata: labels: k8s-app: kubernetes-dashboard kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: selector: matchLabels: diff --git a/cluster/addons/dashboard/dashboard-service.yaml b/cluster/addons/dashboard/dashboard-service.yaml index 195b503de10..831248a97d7 100644 --- a/cluster/addons/dashboard/dashboard-service.yaml +++ b/cluster/addons/dashboard/dashboard-service.yaml @@ -6,6 +6,7 @@ metadata: labels: k8s-app: kubernetes-dashboard kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: selector: k8s-app: kubernetes-dashboard diff --git a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml index e2dd03e6159..bac10dca91b 100644 --- a/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml +++ b/cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml @@ -20,6 +20,7 @@ metadata: labels: k8s-app: kube-dns-autoscaler kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: template: metadata: diff --git a/cluster/addons/dns/kubedns-controller.yaml.base b/cluster/addons/dns/kubedns-controller.yaml.base index a420a416c6a..23e5b584e63 100644 --- a/cluster/addons/dns/kubedns-controller.yaml.base +++ b/cluster/addons/dns/kubedns-controller.yaml.base @@ -25,6 +25,7 @@ metadata: labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: # replicas: not specified here: # 1. In order to make Addon Manager do not reconcile this replicas parameter. diff --git a/cluster/addons/dns/kubedns-controller.yaml.in b/cluster/addons/dns/kubedns-controller.yaml.in index 706baac3822..bcf1878bba6 100644 --- a/cluster/addons/dns/kubedns-controller.yaml.in +++ b/cluster/addons/dns/kubedns-controller.yaml.in @@ -25,6 +25,7 @@ metadata: labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: # replicas: not specified here: # 1. In order to make Addon Manager do not reconcile this replicas parameter. diff --git a/cluster/addons/dns/kubedns-controller.yaml.sed b/cluster/addons/dns/kubedns-controller.yaml.sed index 150a384fa6d..2a5928a45d9 100644 --- a/cluster/addons/dns/kubedns-controller.yaml.sed +++ b/cluster/addons/dns/kubedns-controller.yaml.sed @@ -25,6 +25,7 @@ metadata: labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: # replicas: not specified here: # 1. In order to make Addon Manager do not reconcile this replicas parameter. diff --git a/cluster/addons/dns/kubedns-sa.yaml b/cluster/addons/dns/kubedns-sa.yaml index 892351aa0c7..b7524758e59 100644 --- a/cluster/addons/dns/kubedns-sa.yaml +++ b/cluster/addons/dns/kubedns-sa.yaml @@ -4,3 +4,4 @@ metadata: name: kube-dns labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile diff --git a/cluster/addons/dns/kubedns-svc.yaml.base b/cluster/addons/dns/kubedns-svc.yaml.base index 90a65677a56..cdeeedf96cf 100644 --- a/cluster/addons/dns/kubedns-svc.yaml.base +++ b/cluster/addons/dns/kubedns-svc.yaml.base @@ -22,6 +22,7 @@ metadata: labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "KubeDNS" spec: selector: diff --git a/cluster/addons/dns/kubedns-svc.yaml.in b/cluster/addons/dns/kubedns-svc.yaml.in index c8c59134e73..ed82d6155b5 100644 --- a/cluster/addons/dns/kubedns-svc.yaml.in +++ b/cluster/addons/dns/kubedns-svc.yaml.in @@ -22,6 +22,7 @@ metadata: labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "KubeDNS" spec: selector: diff --git a/cluster/addons/dns/kubedns-svc.yaml.sed b/cluster/addons/dns/kubedns-svc.yaml.sed index 0127ecf5d12..bac6d97f678 100644 --- a/cluster/addons/dns/kubedns-svc.yaml.sed +++ b/cluster/addons/dns/kubedns-svc.yaml.sed @@ -22,6 +22,7 @@ metadata: labels: k8s-app: kube-dns kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "KubeDNS" spec: selector: diff --git a/cluster/addons/e2e-rbac-bindings/kubelet-binding.yaml b/cluster/addons/e2e-rbac-bindings/kubelet-binding.yaml index 066e28b101d..fd8624951c1 100644 --- a/cluster/addons/e2e-rbac-bindings/kubelet-binding.yaml +++ b/cluster/addons/e2e-rbac-bindings/kubelet-binding.yaml @@ -9,6 +9,7 @@ metadata: name: kubelet-cluster-admin labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/cluster/addons/e2e-rbac-bindings/random-addon-grabbag.yaml b/cluster/addons/e2e-rbac-bindings/random-addon-grabbag.yaml index f27c2222497..ec180e212b4 100644 --- a/cluster/addons/e2e-rbac-bindings/random-addon-grabbag.yaml +++ b/cluster/addons/e2e-rbac-bindings/random-addon-grabbag.yaml @@ -9,6 +9,7 @@ metadata: name: todo-remove-grabbag-cluster-admin labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/cluster/addons/fluentd-elasticsearch/es-controller.yaml b/cluster/addons/fluentd-elasticsearch/es-controller.yaml index b6f24cde43d..75ff11205b4 100644 --- a/cluster/addons/fluentd-elasticsearch/es-controller.yaml +++ b/cluster/addons/fluentd-elasticsearch/es-controller.yaml @@ -7,6 +7,7 @@ metadata: k8s-app: elasticsearch-logging version: v1 kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: replicas: 2 selector: diff --git a/cluster/addons/fluentd-elasticsearch/es-service.yaml b/cluster/addons/fluentd-elasticsearch/es-service.yaml index abf1fd3f684..ede9306bb89 100644 --- a/cluster/addons/fluentd-elasticsearch/es-service.yaml +++ b/cluster/addons/fluentd-elasticsearch/es-service.yaml @@ -6,6 +6,7 @@ metadata: labels: k8s-app: elasticsearch-logging kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "Elasticsearch" spec: ports: diff --git a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml index a844acc2e83..fb8afe95f65 100644 --- a/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml +++ b/cluster/addons/fluentd-elasticsearch/fluentd-es-ds.yaml @@ -6,6 +6,7 @@ metadata: labels: k8s-app: fluentd-es kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile version: v1.22 spec: template: diff --git a/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml b/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml index 06fd7bc947f..bc7725686d0 100644 --- a/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml +++ b/cluster/addons/fluentd-elasticsearch/kibana-controller.yaml @@ -6,6 +6,7 @@ metadata: labels: k8s-app: kibana-logging kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: replicas: 1 selector: diff --git a/cluster/addons/fluentd-elasticsearch/kibana-service.yaml b/cluster/addons/fluentd-elasticsearch/kibana-service.yaml index 43efada2c50..d241ef728de 100644 --- a/cluster/addons/fluentd-elasticsearch/kibana-service.yaml +++ b/cluster/addons/fluentd-elasticsearch/kibana-service.yaml @@ -6,6 +6,7 @@ metadata: labels: k8s-app: kibana-logging kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "Kibana" spec: ports: diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml index 6161da92e6f..03a4c268a2f 100644 --- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml +++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml @@ -7,6 +7,7 @@ metadata: labels: k8s-app: fluentd-gcp kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile version: v1.38 spec: template: diff --git a/cluster/addons/node-problem-detector/npd.yaml b/cluster/addons/node-problem-detector/npd.yaml index ec140e18259..bc16e15ef09 100644 --- a/cluster/addons/node-problem-detector/npd.yaml +++ b/cluster/addons/node-problem-detector/npd.yaml @@ -5,6 +5,7 @@ metadata: namespace: kube-system labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile --- apiVersion: rbac.authorization.k8s.io/v1alpha1 kind: ClusterRoleBinding @@ -12,6 +13,7 @@ metadata: name: npd-binding labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole @@ -30,6 +32,7 @@ metadata: k8s-app: node-problem-detector version: v0.3.0-alpha.1 kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: template: metadata: diff --git a/cluster/addons/node-problem-detector/standalone/npd-binding.yaml b/cluster/addons/node-problem-detector/standalone/npd-binding.yaml index 9f6a934d83c..48432afea21 100644 --- a/cluster/addons/node-problem-detector/standalone/npd-binding.yaml +++ b/cluster/addons/node-problem-detector/standalone/npd-binding.yaml @@ -4,6 +4,7 @@ metadata: name: npd-binding labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/cluster/addons/rbac/apiserver-node-proxy-binding.yaml b/cluster/addons/rbac/apiserver-node-proxy-binding.yaml index 46103d006ea..8bfe366edb5 100644 --- a/cluster/addons/rbac/apiserver-node-proxy-binding.yaml +++ b/cluster/addons/rbac/apiserver-node-proxy-binding.yaml @@ -4,6 +4,7 @@ metadata: name: apiserver-node-proxy labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole diff --git a/cluster/addons/rbac/node-proxy-role.yaml b/cluster/addons/rbac/node-proxy-role.yaml index d3684c99335..03a7f944c33 100644 --- a/cluster/addons/rbac/node-proxy-role.yaml +++ b/cluster/addons/rbac/node-proxy-role.yaml @@ -4,6 +4,7 @@ metadata: name: node-proxy labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile rules: - apiGroups: - "" diff --git a/cluster/addons/registry/registry-pv.yaml.in b/cluster/addons/registry/registry-pv.yaml.in index b6fd2b35295..196efa928da 100644 --- a/cluster/addons/registry/registry-pv.yaml.in +++ b/cluster/addons/registry/registry-pv.yaml.in @@ -4,6 +4,7 @@ metadata: name: kube-system-kube-registry-pv labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: {% if pillar.get('cluster_registry_disk_type', '') == 'gce' %} capacity: diff --git a/cluster/addons/registry/registry-pvc.yaml.in b/cluster/addons/registry/registry-pvc.yaml.in index 92bfc69864c..35c78717713 100644 --- a/cluster/addons/registry/registry-pvc.yaml.in +++ b/cluster/addons/registry/registry-pvc.yaml.in @@ -5,6 +5,7 @@ metadata: namespace: kube-system labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: accessModes: - ReadWriteOnce diff --git a/cluster/addons/registry/registry-rc.yaml b/cluster/addons/registry/registry-rc.yaml index 29942c4538f..ab0afd27609 100644 --- a/cluster/addons/registry/registry-rc.yaml +++ b/cluster/addons/registry/registry-rc.yaml @@ -7,6 +7,7 @@ metadata: k8s-app: kube-registry version: v0 kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile spec: replicas: 1 selector: diff --git a/cluster/addons/registry/registry-svc.yaml b/cluster/addons/registry/registry-svc.yaml index b9f1cc40b99..db71c91ef78 100644 --- a/cluster/addons/registry/registry-svc.yaml +++ b/cluster/addons/registry/registry-svc.yaml @@ -6,6 +6,7 @@ metadata: labels: k8s-app: kube-registry kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile kubernetes.io/name: "KubeRegistry" spec: selector: diff --git a/cluster/addons/storage-class/aws/default.yaml b/cluster/addons/storage-class/aws/default.yaml index 319fe55cbc4..4ec88ff25d6 100644 --- a/cluster/addons/storage-class/aws/default.yaml +++ b/cluster/addons/storage-class/aws/default.yaml @@ -6,6 +6,7 @@ metadata: storageclass.beta.kubernetes.io/is-default-class: "true" labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists provisioner: kubernetes.io/aws-ebs parameters: type: gp2 diff --git a/cluster/addons/storage-class/azure/default.yaml b/cluster/addons/storage-class/azure/default.yaml index fe208c40e0d..bcc7a33a72a 100644 --- a/cluster/addons/storage-class/azure/default.yaml +++ b/cluster/addons/storage-class/azure/default.yaml @@ -6,4 +6,5 @@ metadata: storageclass.beta.kubernetes.io/is-default-class: "true" labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists provisioner: kubernetes.io/azure-disk diff --git a/cluster/addons/storage-class/gce/default.yaml b/cluster/addons/storage-class/gce/default.yaml index e1b89675f16..f728de1fb5a 100644 --- a/cluster/addons/storage-class/gce/default.yaml +++ b/cluster/addons/storage-class/gce/default.yaml @@ -6,6 +6,7 @@ metadata: storageclass.beta.kubernetes.io/is-default-class: "true" labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists provisioner: kubernetes.io/gce-pd parameters: type: pd-standard diff --git a/cluster/addons/storage-class/openstack/default.yaml b/cluster/addons/storage-class/openstack/default.yaml index 6e4f771b7bd..06edf513455 100644 --- a/cluster/addons/storage-class/openstack/default.yaml +++ b/cluster/addons/storage-class/openstack/default.yaml @@ -6,4 +6,5 @@ metadata: storageclass.beta.kubernetes.io/is-default-class: "true" labels: kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: EnsureExists provisioner: kubernetes.io/cinder