diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml
new file mode 100644
index 00000000000..6cb3051b50f
--- /dev/null
+++ b/cluster/addons/fluentd-gcp/fluentd-gcp-configmap-old.yaml
@@ -0,0 +1,437 @@
+# This ConfigMap is used to ingest logs against old resources like
+# "gke_container" and "gce_instance" when $LOGGING_STACKDRIVER_RESOURCE_TYPES is
+# set to "old".
+# When $LOGGING_STACKDRIVER_RESOURCE_TYPES is set to "new", the ConfigMap in
+# fluentd-gcp-configmap.yaml will be used for ingesting logs against new
+# resources like "k8s_container" and "k8s_node".
+kind: ConfigMap
+apiVersion: v1
+data:
+ containers.input.conf: |-
+ # This configuration file for Fluentd is used
+ # to watch changes to Docker log files that live in the
+ # directory /var/lib/docker/containers/ and are symbolically
+ # linked to from the /var/log/containers directory using names that capture the
+ # pod name and container name. These logs are then submitted to
+ # Google Cloud Logging which assumes the installation of the cloud-logging plug-in.
+ #
+ # Example
+ # =======
+ # A line in the Docker log file might look like this JSON:
+ #
+ # {"log":"2014/09/25 21:15:03 Got request with path wombat\\n",
+ # "stream":"stderr",
+ # "time":"2014-09-25T21:15:03.499185026Z"}
+ #
+ # The original tag is derived from the log file's location.
+ # For example a Docker container's logs might be in the directory:
+ # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
+ # and in the file:
+ # 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
+ # where 997599971ee6... is the Docker ID of the running container.
+ # The Kubernetes kubelet makes a symbolic link to this file on the host
+ # machine in the /var/log/containers directory which includes the pod name,
+ # the namespace name and the Kubernetes container name:
+ # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
+ # ->
+ # /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
+ # The /var/log directory on the host is mapped to the /var/log directory in the container
+ # running this instance of Fluentd and we end up collecting the file:
+ # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
+ # This results in the tag:
+ # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
+ # where 'synthetic-logger-0.25lps-pod' is the pod name, 'default' is the
+ # namespace name, 'synth-lgr' is the container name and '997599971ee6..' is
+ # the container ID.
+ # The record reformer is used is discard the var.log.containers prefix and
+ # the Docker container ID suffix and "kubernetes." is pre-pended giving the tag:
+ # kubernetes.synthetic-logger-0.25lps-pod_default_synth-lgr
+ # Tag is then parsed by google_cloud plugin and translated to the metadata,
+ # visible in the log viewer
+
+ # Json Log Example:
+ # {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
+ # CRI Log Example:
+ # 2016-02-17T00:04:05.931087621Z stdout F [info:2016-02-16T16:04:05.930-08:00] Some log text here
+
+ @type tail
+ path /var/log/containers/*.log
+ pos_file /var/log/gcp-containers.log.pos
+ # Tags at this point are in the format of:
+ # reform.var.log.containers.__-.log
+ tag reform.*
+ read_from_head true
+ format multi_format
+
+ format json
+ time_key time
+ time_format %Y-%m-%dT%H:%M:%S.%NZ
+
+
+ format /^(?.+) (?stdout|stderr) [^ ]* (?.*)$/
+ time_format %Y-%m-%dT%H:%M:%S.%N%:z
+
+
+
+
+ @type parser
+ format /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
+ reserve_data true
+ suppress_parse_error_log true
+ emit_invalid_record_to_error false
+ key_name log
+
+
+
+ @type record_reformer
+ enable_ruby true
+ # Tags at this point are in the format of:
+ # 'raw.kubernetes.__'.
+ tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')}
+
+
+ # Detect exceptions in the log output and forward them as one log entry.
+
+ @type detect_exceptions
+
+ remove_tag_prefix raw
+ message log
+ stream stream
+ multiline_flush_interval 5
+ max_bytes 500000
+ max_lines 1000
+
+ system.input.conf: |-
+ # Example:
+ # Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
+
+ @type tail
+ format syslog
+ path /var/log/startupscript.log
+ pos_file /var/log/gcp-startupscript.log.pos
+ tag startupscript
+
+
+ # Examples:
+ # time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
+ # time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
+ # TODO(random-liu): Remove this after cri container runtime rolls out.
+
+ @type tail
+ format /^time="(?[^)]*)" level=(?[^ ]*) msg="(?[^"]*)"( err="(?[^"]*)")?( statusCode=($\d+))?/
+ path /var/log/docker.log
+ pos_file /var/log/gcp-docker.log.pos
+ tag docker
+
+
+ # Example:
+ # 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
+
+ @type tail
+ # Not parsing this, because it doesn't have anything particularly useful to
+ # parse out of it (like severities).
+ format none
+ path /var/log/etcd.log
+ pos_file /var/log/gcp-etcd.log.pos
+ tag etcd
+
+
+ # Multi-line parsing is required for all the kube logs because very large log
+ # statements, such as those that include entire object bodies, get split into
+ # multiple lines by glog.
+
+ # Example:
+ # I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
+
+ @type tail
+ format multiline
+ multiline_flush_interval 5s
+ format_firstline /^\w\d{4}/
+ format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
+ time_format %m%d %H:%M:%S.%N
+ path /var/log/kubelet.log
+ pos_file /var/log/gcp-kubelet.log.pos
+ tag kubelet
+
+
+ # Example:
+ # I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
+
+ @type tail
+ format multiline
+ multiline_flush_interval 5s
+ format_firstline /^\w\d{4}/
+ format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
+ time_format %m%d %H:%M:%S.%N
+ path /var/log/kube-proxy.log
+ pos_file /var/log/gcp-kube-proxy.log.pos
+ tag kube-proxy
+
+
+ # Example:
+ # I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
+
+ @type tail
+ format multiline
+ multiline_flush_interval 5s
+ format_firstline /^\w\d{4}/
+ format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
+ time_format %m%d %H:%M:%S.%N
+ path /var/log/kube-apiserver.log
+ pos_file /var/log/gcp-kube-apiserver.log.pos
+ tag kube-apiserver
+
+
+ # Example:
+ # I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
+
+ @type tail
+ format multiline
+ multiline_flush_interval 5s
+ format_firstline /^\w\d{4}/
+ format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
+ time_format %m%d %H:%M:%S.%N
+ path /var/log/kube-controller-manager.log
+ pos_file /var/log/gcp-kube-controller-manager.log.pos
+ tag kube-controller-manager
+
+
+ # Example:
+ # W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
+
+ @type tail
+ format multiline
+ multiline_flush_interval 5s
+ format_firstline /^\w\d{4}/
+ format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
+ time_format %m%d %H:%M:%S.%N
+ path /var/log/kube-scheduler.log
+ pos_file /var/log/gcp-kube-scheduler.log.pos
+ tag kube-scheduler
+
+
+ # Example:
+ # I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
+
+ @type tail
+ format multiline
+ multiline_flush_interval 5s
+ format_firstline /^\w\d{4}/
+ format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
+ time_format %m%d %H:%M:%S.%N
+ path /var/log/rescheduler.log
+ pos_file /var/log/gcp-rescheduler.log.pos
+ tag rescheduler
+
+
+ # Example:
+ # I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
+
+ @type tail
+ format multiline
+ multiline_flush_interval 5s
+ format_firstline /^\w\d{4}/
+ format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
+ time_format %m%d %H:%M:%S.%N
+ path /var/log/glbc.log
+ pos_file /var/log/gcp-glbc.log.pos
+ tag glbc
+
+
+ # Example:
+ # I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
+
+ @type tail
+ format multiline
+ multiline_flush_interval 5s
+ format_firstline /^\w\d{4}/
+ format1 /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
+ time_format %m%d %H:%M:%S.%N
+ path /var/log/cluster-autoscaler.log
+ pos_file /var/log/gcp-cluster-autoscaler.log.pos
+ tag cluster-autoscaler
+
+
+ # Logs from systemd-journal for interesting services.
+ # TODO(random-liu): Keep this for compatibility, remove this after
+ # cri container runtime rolls out.
+
+ @type systemd
+ filters [{ "_SYSTEMD_UNIT": "docker.service" }]
+ pos_file /var/log/gcp-journald-docker.pos
+ read_from_head true
+ tag docker
+
+
+
+ @type systemd
+ filters [{ "_SYSTEMD_UNIT": "{{ container_runtime }}.service" }]
+ pos_file /var/log/gcp-journald-container-runtime.pos
+ read_from_head true
+ tag container-runtime
+
+
+
+ @type systemd
+ filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
+ pos_file /var/log/gcp-journald-kubelet.pos
+ read_from_head true
+ tag kubelet
+
+
+
+ @type systemd
+ filters [{ "_SYSTEMD_UNIT": "node-problem-detector.service" }]
+ pos_file /var/log/gcp-journald-node-problem-detector.pos
+ read_from_head true
+ tag node-problem-detector
+
+
+ # BEGIN_NODE_JOURNAL
+ # Whether to include node-journal or not is determined when starting the
+ # cluster. It is not changed when the cluster is already running.
+
+ @type systemd
+ pos_file /var/log/gcp-journald.pos
+ read_from_head true
+ tag node-journal
+
+
+
+ @type grep
+
+ key _SYSTEMD_UNIT
+ pattern ^(docker|{{ container_runtime }}|kubelet|node-problem-detector)\.service$
+
+
+ # END_NODE_JOURNAL
+ monitoring.conf: |-
+ # This source is used to acquire approximate process start timestamp,
+ # which purpose is explained before the corresponding output plugin.
+
+ @type exec
+ command /bin/sh -c 'date +%s'
+ tag process_start
+ time_format %Y-%m-%d %H:%M:%S
+ keys process_start_timestamp
+
+
+ # This filter is used to convert process start timestamp to integer
+ # value for correct ingestion in the prometheus output plugin.
+
+ @type record_transformer
+ enable_ruby true
+ auto_typecast true
+
+ process_start_timestamp ${record["process_start_timestamp"].to_i}
+
+
+ output.conf: |-
+ # This match is placed before the all-matching output to provide metric
+ # exporter with a process start timestamp for correct exporting of
+ # cumulative metrics to Stackdriver.
+
+ @type prometheus
+
+
+ type gauge
+ name process_start_time_seconds
+ desc Timestamp of the process start in seconds
+ key process_start_timestamp
+
+
+
+ # This filter allows to count the number of log entries read by fluentd
+ # before they are processed by the output plugin. This in turn allows to
+ # monitor the number of log entries that were read but never sent, e.g.
+ # because of liveness probe removing buffer.
+
+ @type prometheus
+
+ type counter
+ name logging_entry_count
+ desc Total number of log entries generated by either application containers or system components
+
+
+
+ # TODO(instrumentation): Reconsider this workaround later.
+ # Trim the entries which exceed slightly less than 100KB, to avoid
+ # dropping them. It is a necessity, because Stackdriver only supports
+ # entries that are up to 100KB in size.
+
+ @type record_transformer
+ enable_ruby true
+
+ log ${record['log'].length > 100000 ? "[Trimmed]#{record['log'][0..100000]}..." : record['log']}
+
+
+
+ # Do not collect fluentd's own logs to avoid infinite loops.
+
+ @type null
+
+
+ # We use 2 output stanzas - one to handle the container logs and one to handle
+ # the node daemon logs, the latter of which explicitly sends its logs to the
+ # compute.googleapis.com service rather than container.googleapis.com to keep
+ # them separate since most users don't care about the node logs.
+
+ @type google_cloud
+
+ # Try to detect JSON formatted log entries.
+ detect_json true
+ # Collect metrics in Prometheus registry about plugin activity.
+ enable_monitoring true
+ monitoring_type prometheus
+ # Allow log entries from multiple containers to be sent in the same request.
+ split_logs_by_tag false
+ # Set the buffer type to file to improve the reliability and reduce the memory consumption
+ buffer_type file
+ buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer
+ # Set queue_full action to block because we want to pause gracefully
+ # in case of the off-the-limits load instead of throwing an exception
+ buffer_queue_full_action block
+ # Set the chunk limit conservatively to avoid exceeding the recommended
+ # chunk size of 5MB per write request.
+ buffer_chunk_limit 1M
+ # Cap the combined memory usage of this buffer and the one below to
+ # 1MiB/chunk * (6 + 2) chunks = 8 MiB
+ buffer_queue_limit 6
+ # Never wait more than 5 seconds before flushing logs in the non-error case.
+ flush_interval 5s
+ # Never wait longer than 30 seconds between retries.
+ max_retry_wait 30
+ # Disable the limit on the number of retries (retry forever).
+ disable_retry_limit
+ # Use multiple threads for processing.
+ num_threads 2
+ use_grpc true
+
+
+ # Keep a smaller buffer here since these logs are less important than the user's
+ # container logs.
+
+ @type google_cloud
+
+ detect_json true
+ enable_monitoring true
+ monitoring_type prometheus
+ # Allow entries from multiple system logs to be sent in the same request.
+ split_logs_by_tag false
+ detect_subservice false
+ buffer_type file
+ buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
+ buffer_queue_full_action block
+ buffer_chunk_limit 1M
+ buffer_queue_limit 2
+ flush_interval 5s
+ max_retry_wait 30
+ disable_retry_limit
+ num_threads 2
+ use_grpc true
+
+metadata:
+ name: fluentd-gcp-config-old-v1.2.5
+ namespace: kube-system
+ labels:
+ addonmanager.kubernetes.io/mode: Reconcile
diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml
index 24bca69e276..d02fc8a3870 100644
--- a/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml
+++ b/cluster/addons/fluentd-gcp/fluentd-gcp-configmap.yaml
@@ -1,3 +1,9 @@
+# This ConfigMap is used to ingest logs against new resources like
+# "k8s_container" and "k8s_node" when $LOGGING_STACKDRIVER_RESOURCE_TYPES is set
+# to "new".
+# When $LOGGING_STACKDRIVER_RESOURCE_TYPES is set to "old", the ConfigMap in
+# fluentd-gcp-configmap-old.yaml will be used for ingesting logs against old
+# resources like "gke_container" and "gce_instance".
kind: ConfigMap
apiVersion: v1
data:
@@ -17,29 +23,34 @@ data:
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z"}
#
- # The record reformer is used to write the tag to focus on the pod name
- # and the Kubernetes container name. For example a Docker container's logs
- # might be in the directory:
+ # The original tag is derived from the log file's location.
+ # For example a Docker container's logs might be in the directory:
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
# and in the file:
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
# where 997599971ee6... is the Docker ID of the running container.
- # The Kubernetes kubelet makes a symbolic link to this file on the host machine
- # in the /var/log/containers directory which includes the pod name and the Kubernetes
- # container name:
- # synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
+ # The Kubernetes kubelet makes a symbolic link to this file on the host
+ # machine in the /var/log/containers directory which includes the pod name,
+ # the namespace name and the Kubernetes container name:
+ # synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# ->
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
# The /var/log directory on the host is mapped to the /var/log directory in the container
# running this instance of Fluentd and we end up collecting the file:
- # /var/log/containers/synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
+ # /var/log/containers/synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# This results in the tag:
- # var.log.containers.synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
- # The record reformer is used is discard the var.log.containers prefix and
- # the Docker container ID suffix and "kubernetes." is pre-pended giving the tag:
- # kubernetes.synthetic-logger-0.25lps-pod_default-synth-lgr
- # Tag is then parsed by google_cloud plugin and translated to the metadata,
- # visible in the log viewer
+ # var.log.containers.synthetic-logger-0.25lps-pod_default_synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
+ # where 'synthetic-logger-0.25lps-pod' is the pod name, 'default' is the
+ # namespace name, 'synth-lgr' is the container name and '997599971ee6..' is
+ # the container ID.
+ # The record reformer is used to extract pod_name, namespace_name and
+ # container_name from the tag and set them in a local_resource_id in the
+ # format of:
+ # 'k8s_container...'.
+ # The reformer also changes the tags to 'stderr' or 'stdout' based on the
+ # value of 'stream'.
+ # local_resource_id is later used by google_cloud plugin to determine the
+ # monitored resource to ingest logs against.
# Json Log Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
@@ -49,28 +60,22 @@ data:
@type tail
path /var/log/containers/*.log
pos_file /var/log/gcp-containers.log.pos
+ # Tags at this point are in the format of:
+ # reform.var.log.containers.__-.log
tag reform.*
read_from_head true
- format none
+ format multi_format
+
+ format json
+ time_key time
+ time_format %Y-%m-%dT%H:%M:%S.%NZ
+
+
+ format /^(?.+) (?stdout|stderr) [^ ]* (?.*)$/
+ time_format %Y-%m-%dT%H:%M:%S.%N%:z
+
-
- @type parser
- key_name message
-
- @type multi_format
-
- format json
- time_key time
- time_format %Y-%m-%dT%H:%M:%S.%NZ
-
-
- format /^(?.+) (?stdout|stderr) [^ ]* (?.*)$/
- time_format %Y-%m-%dT%H:%M:%S.%N%:z
-
-
-
-
@type parser
format /^(?\w)(?\d{4} [^\s]*)\s+(?\d+)\s+(?[^ \]]+)\] (?.*)/
@@ -83,11 +88,23 @@ data:
@type record_reformer
enable_ruby true
- tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')}
+
+ # Extract local_resource_id from tag for 'k8s_container' monitored
+ # resource. The format is:
+ # 'k8s_container...'.
+ "logging.googleapis.com/local_resource_id" ${"k8s_container.#{tag_suffix[4].rpartition('.')[0].split('_')[1]}.#{tag_suffix[4].rpartition('.')[0].split('_')[0]}.#{tag_suffix[4].rpartition('.')[0].split('_')[2].rpartition('-')[0]}"}
+ # Rename the field 'log' to a more generic field 'message'. This way the
+ # fluent-plugin-google-cloud knows to flatten the field as textPayload
+ # instead of jsonPayload after extracting 'time', 'severity' and
+ # 'stream' from the record.
+ message ${record['log']}
+
+ tag ${if record['stream'] == 'stderr' then 'raw.stderr' else 'raw.stdout' end}
+ remove_keys stream,log
# Detect exceptions in the log output and forward them as one log entry.
-
+
@type detect_exceptions
remove_tag_prefix raw
@@ -350,23 +367,30 @@ data:
+ # This section is exclusive for k8s_container logs. Those come with
+ # 'stderr'/'stdout' tags.
# TODO(instrumentation): Reconsider this workaround later.
# Trim the entries which exceed slightly less than 100KB, to avoid
# dropping them. It is a necessity, because Stackdriver only supports
# entries that are up to 100KB in size.
-
+
@type record_transformer
enable_ruby true
- log ${record['log'].length > 100000 ? "[Trimmed]#{record['log'][0..100000]}..." : record['log']}
+ message ${record['message'].length > 100000 ? "[Trimmed]#{record['message'][0..100000]}..." : record['message']}
- # We use 2 output stanzas - one to handle the container logs and one to handle
- # the node daemon logs, the latter of which explicitly sends its logs to the
- # compute.googleapis.com service rather than container.googleapis.com to keep
- # them separate since most users don't care about the node logs.
-
+ # Do not collect fluentd's own logs to avoid infinite loops.
+
+ @type null
+
+
+ # This section is exclusive for k8s_container logs. These logs come with
+ # 'stderr'/'stdout' tags.
+ # We use a separate output stanza for 'k8s_node' logs with a smaller buffer
+ # because node logs are less important than user's container logs.
+
@type google_cloud
# Try to detect JSON formatted log entries.
@@ -397,15 +421,23 @@ data:
# Use multiple threads for processing.
num_threads 2
use_grpc true
- labels {
- # The logging backend will take responsibility for double writing to
- # the necessary resource types when this label is set.
- "logging.googleapis.com/k8s_compatibility": "true"
- }
+ # Use Metadata Agent to get monitored resource.
+ enable_metadata_agent true
- # Keep a smaller buffer here since these logs are less important than the user's
- # container logs.
+ # Attach local_resource_id for 'k8s_node' monitored resource.
+
+ @type record_transformer
+ enable_ruby true
+
+ "logging.googleapis.com/local_resource_id" ${"k8s_node.#{ENV['NODE_NAME']}"}
+
+
+
+ # This section is exclusive for 'k8s_node' logs. These logs come with tags
+ # that are neither 'stderr' or 'stdout'.
+ # We use a separate output stanza for 'k8s_container' logs with a larger
+ # buffer because user's container logs are more important than node logs.
@type google_cloud
@@ -425,14 +457,11 @@ data:
disable_retry_limit
num_threads 2
use_grpc true
- labels {
- # The logging backend will take responsibility for double writing to
- # the necessary resource types when this label is set.
- "logging.googleapis.com/k8s_compatibility": "true"
- }
+ # Use Metadata Agent to get monitored resource.
+ enable_metadata_agent true
metadata:
- name: fluentd-gcp-config-v1.2.4
+ name: fluentd-gcp-config-v1.2.5
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
diff --git a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml
index 2dbdadb47ec..1cb61a318be 100644
--- a/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml
+++ b/cluster/addons/fluentd-gcp/fluentd-gcp-ds.yaml
@@ -37,6 +37,14 @@ spec:
readOnly: true
- name: config-volume
mountPath: /etc/google-fluentd/config.d
+ env:
+ - name: STACKDRIVER_METADATA_AGENT_URL
+ value: {{ stackdriver_metadata_agent_url }}
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ apiVersion: v1
+ fieldPath: spec.nodeName
# Liveness probe is aimed to help in situarions where fluentd
# silently hangs for no apparent reasons until manual restart.
# The idea of this probe is that if fluentd is not queueing or
@@ -108,4 +116,4 @@ spec:
path: /var/lib/docker/containers
- name: config-volume
configMap:
- name: fluentd-gcp-config-v1.2.4
+ name: {{ fluentd_gcp_configmap_name }}-v1.2.5
diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh
index e9717a27a87..5700ccf32a0 100755
--- a/cluster/gce/config-default.sh
+++ b/cluster/gce/config-default.sh
@@ -383,7 +383,7 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
fi
# Fluentd requirements
-FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.28-1}"
+FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
@@ -395,7 +395,7 @@ HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
-PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER"
+PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES STACKDRIVER_METADATA_AGENT_URL"
# Fluentd configuration for node-journal
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh
index 2870234f50b..6d66b320d46 100755
--- a/cluster/gce/config-test.sh
+++ b/cluster/gce/config-test.sh
@@ -395,7 +395,7 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
fi
# Fluentd requirements
-FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.28-1}"
+FLUENTD_GCP_VERSION="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
@@ -407,7 +407,7 @@ HEAPSTER_GCP_BASE_CPU="${HEAPSTER_GCP_BASE_CPU:-80m}"
HEAPSTER_GCP_CPU_PER_NODE="${HEAPSTER_GCP_CPU_PER_NODE:-0.5}"
# Adding to PROVIDER_VARS, since this is GCP-specific.
-PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER"
+PROVIDER_VARS="${PROVIDER_VARS:-} FLUENTD_GCP_VERSION FLUENTD_GCP_MEMORY_LIMIT FLUENTD_GCP_CPU_REQUEST FLUENTD_GCP_MEMORY_REQUEST HEAPSTER_GCP_BASE_MEMORY HEAPSTER_GCP_MEMORY_PER_NODE HEAPSTER_GCP_BASE_CPU HEAPSTER_GCP_CPU_PER_NODE CUSTOM_KUBE_DASHBOARD_BANNER LOGGING_STACKDRIVER_RESOURCE_TYPES STACKDRIVER_METADATA_AGENT_URL"
# Fluentd configuration for node-journal
ENABLE_NODE_JOURNAL="${ENABLE_NODE_JOURNAL:-false}"
diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh
index 0000f325992..a47eec002a2 100644
--- a/cluster/gce/gci/configure-helper.sh
+++ b/cluster/gce/gci/configure-helper.sh
@@ -2051,6 +2051,36 @@ function setup-coredns-manifest {
sed -i -e "s@{{ *pillar\['service_cluster_ip_range'\] *}}@${SERVICE_CLUSTER_IP_RANGE}@g" "${coredns_file}"
}
+# Sets up the manifests of Fluentd configmap and yamls for k8s addons.
+function setup-fluentd {
+ local -r dst_dir="$1"
+ local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
+ # Ingest logs against new resources like "k8s_container" and "k8s_node" if
+ # LOGGING_STACKDRIVER_RESOURCE_TYPES is "new".
+ # Ingest logs against old resources like "gke_container" and "gce_instance" if
+ # LOGGING_STACKDRIVER_RESOURCE_TYPES is "old".
+ if [[ "${LOGGING_STACKDRIVER_RESOURCE_TYPES:-old}" == "new" ]]; then
+ local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap.yaml"
+ fluentd_gcp_configmap_name="fluentd-gcp-config"
+ else
+ local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap-old.yaml"
+ fluentd_gcp_configmap_name="fluentd-gcp-config-old"
+ fi
+ sed -i -e "s@{{ fluentd_gcp_configmap_name }}@${fluentd_gcp_configmap_name}@g" "${fluentd_gcp_yaml}"
+ fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.2-1.5.30-1-k8s}"
+ sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
+ if [[ "${STACKDRIVER_METADATA_AGENT_URL:-}" != "" ]]; then
+ metadata_agent_url="${STACKDRIVER_METADATA_AGENT_URL}"
+ else
+ metadata_agent_url="http://${HOSTNAME}:8799"
+ fi
+ sed -i -e "s@{{ stackdriver_metadata_agent_url }}@${metadata_agent_url}@g" "${fluentd_gcp_yaml}"
+ update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
+ start-fluentd-resource-update ${fluentd_gcp_yaml}
+ update-container-runtime ${fluentd_gcp_configmap_yaml}
+ update-node-journal ${fluentd_gcp_configmap_yaml}
+}
+
# Sets up the manifests of kube-dns for k8s addons.
function setup-kube-dns-manifest {
local -r kubedns_file="${dst_dir}/dns/kube-dns.yaml"
@@ -2191,17 +2221,10 @@ EOF
if [[ "${ENABLE_NODE_LOGGING:-}" == "true" ]] && \
[[ "${LOGGING_DESTINATION:-}" == "gcp" ]]; then
setup-addon-manifests "addons" "fluentd-gcp"
+ setup-fluentd ${dst_dir}
local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
- local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
- local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap.yaml"
update-event-exporter ${event_exporter_yaml}
- fluentd_gcp_version="${FLUENTD_GCP_VERSION:-0.2-1.5.28-1}"
- sed -i -e "s@{{ fluentd_gcp_version }}@${fluentd_gcp_version}@g" "${fluentd_gcp_yaml}"
update-prometheus-to-sd-parameters ${event_exporter_yaml}
- update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
- start-fluentd-resource-update ${fluentd_gcp_yaml}
- update-container-runtime ${fluentd_gcp_configmap_yaml}
- update-node-journal ${fluentd_gcp_configmap_yaml}
fi
if [[ "${ENABLE_CLUSTER_UI:-}" == "true" ]]; then
setup-addon-manifests "addons" "dashboard"