Merge pull request #26783 from a-robinson/newlogs

Automatic merge from submit-queue

Add collection of the new glbc and cluster-autoscaler logs

I've incremented the version numbers by 2 to avoid conflicting with #26652. I'll make sure the potential conflict between the images gets resolved reasonably.

cc @piosz @bprashanth @aledbf
This commit is contained in:
k8s-merge-robot 2016-06-04 07:35:33 -07:00
commit 714db74611
6 changed files with 61 additions and 8 deletions

View File

@ -15,7 +15,7 @@
.PHONY: build push .PHONY: build push
IMAGE = fluentd-elasticsearch IMAGE = fluentd-elasticsearch
TAG = 1.15 TAG = 1.17
build: build:
docker build -t gcr.io/google_containers/$(IMAGE):$(TAG) . docker build -t gcr.io/google_containers/$(IMAGE):$(TAG) .

View File

@ -124,7 +124,7 @@
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/ format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion path /var/log/salt/minion
pos_file /var/log/gcp-salt.pos pos_file /var/log/es-salt.pos
tag salt tag salt
</source> </source>
@ -222,6 +222,34 @@
type kubernetes_metadata type kubernetes_metadata
</filter> </filter>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/glbc.log
pos_file /var/log/es-glbc.log.pos
tag glbc
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/cluster-autoscaler.log
pos_file /var/log/es-cluster-autoscaler.log.pos
tag cluster-autoscaler
</source>
<match **> <match **>
type elasticsearch type elasticsearch
log_level info log_level info

View File

@ -28,7 +28,7 @@
.PHONY: kbuild kpush .PHONY: kbuild kpush
TAG = 1.18 TAG = 1.20
# Rules for building the test image for deployment to Dockerhub with user kubernetes. # Rules for building the test image for deployment to Dockerhub with user kubernetes.

View File

@ -13,9 +13,6 @@
# "stream":"stderr", # "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z"} # "time":"2014-09-25T21:15:03.499185026Z"}
# #
# Currently, the log information is ingested as plain text rather than JSON.
# TODO: When Cloud Logging supports it, ingest as JSON.
#
# The record reformer is used to write the tag to focus on the pod name # The record reformer is used to write the tag to focus on the pod name
# and the Kubernetes container name. For example a Docker container's logs # and the Kubernetes container name. For example a Docker container's logs
# might be in the directory: # might be in the directory:
@ -171,6 +168,34 @@
tag kube-scheduler tag kube-scheduler
</source> </source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/glbc.log
pos_file /var/log/gcp-glbc.log.pos
tag glbc
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/cluster-autoscaler.log
pos_file /var/log/gcp-cluster-autoscaler.log.pos
tag cluster-autoscaler
</source>
# We use 2 output stanzas - one to handle the container logs and one to handle # We use 2 output stanzas - one to handle the container logs and one to handle
# the node daemon logs, the latter of which explicitly sends its logs to the # the node daemon logs, the latter of which explicitly sends its logs to the
# compute.googleapis.com service rather than container.googleapis.com to keep # compute.googleapis.com service rather than container.googleapis.com to keep

View File

@ -8,7 +8,7 @@ metadata:
spec: spec:
containers: containers:
- name: fluentd-elasticsearch - name: fluentd-elasticsearch
image: gcr.io/google_containers/fluentd-elasticsearch:1.15 image: gcr.io/google_containers/fluentd-elasticsearch:1.17
resources: resources:
limits: limits:
memory: 200Mi memory: 200Mi

View File

@ -9,7 +9,7 @@ spec:
dnsPolicy: Default dnsPolicy: Default
containers: containers:
- name: fluentd-cloud-logging - name: fluentd-cloud-logging
image: gcr.io/google_containers/fluentd-gcp:1.18 image: gcr.io/google_containers/fluentd-gcp:1.20
resources: resources:
limits: limits:
memory: 200Mi memory: 200Mi