diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 0aadc4b243d..e1cf909c3a4 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -285,3 +285,10 @@ fi if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE" fi + +# Fluentd requirements +FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}" +FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}" +FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}" +# Adding to PROVIDER_VARS, since this is GCP-specific. +PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST" diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index 2746f06eb29..2b41a4c1936 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -334,3 +334,10 @@ fi if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then PROVIDER_VARS="${PROVIDER_VARS:-} LOGROTATE_MAX_SIZE" fi + +# Fluentd requirements +FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}" +FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}" +FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}" +# Adding to PROVIDER_VARS, since this is GCP-specific. +PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST" diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 34d3f936caa..e6fbfa582ee 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1596,6 +1596,26 @@ function setup-addon-manifests { chmod 644 "${dst_dir}"/* } +# Fluentd manifest is modified using kubectl, which may not be available at +# this point. Run this as a background process. +function wait-for-apiserver-and-update-fluentd { + until kubectl get nodes + do + sleep 10 + done + kubectl set resources --dry-run --local -f ${fluentd_gcp_yaml} \ + --limits=memory=${FLUENTD_GCP_MEMORY_LIMIT} \ + --requests=cpu=${FLUENTD_GCP_CPU_REQUEST},memory=${FLUENTD_GCP_MEMORY_REQUEST} \ + -o yaml > ${fluentd_gcp_yaml}.tmp + mv ${fluentd_gcp_yaml}.tmp ${fluentd_gcp_yaml} +} + +# Trigger background process that will ultimately update fluentd resource +# requirements. +function start-fluentd-resource-update { + wait-for-apiserver-and-update-fluentd & +} + # Prepares the manifests of k8s addons, and starts the addon manager. # Vars assumed: # CLUSTER_NAME @@ -1679,6 +1699,8 @@ function start-kube-addons { if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \ [[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then setup-addon-manifests "addons" "fluentd-gcp" + local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml" + start-fluentd-resource-update fi if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then setup-addon-manifests "addons" "dashboard" diff --git a/cluster/gke/config-default.sh b/cluster/gke/config-default.sh index bd7459325b1..b611b91ec8b 100644 --- a/cluster/gke/config-default.sh +++ b/cluster/gke/config-default.sh @@ -47,3 +47,10 @@ KUBE_DELETE_NETWORK=${KUBE_DELETE_NETWORK:-false} # authentication) in metadata should be treated as canonical, and therefore disk # copies ought to be recreated/clobbered. METADATA_CLOBBERS_CONFIG=true + +# Fluentd requirements +FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}" +FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}" +FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}" +# Adding to PROVIDER_VARS, since this is GCP-specific. +PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST"