mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-07-23 11:50:44 +00:00
Merge pull request #58090 from serathius/pass-location-to-event-exporter
Automatic merge from submit-queue (batch tested with PRs 60054, 60202, 60219, 58090, 60275). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Pass location parameter to event exporter. **What this PR does / why we need it**: This PR makes event-exporter export cluster location together with events. **Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*: **Special notes for your reviewer**: **Release note**: ```release-note NONE ```
This commit is contained in:
commit
a85f7d9fff
@ -49,7 +49,8 @@ spec:
|
|||||||
- name: event-exporter
|
- name: event-exporter
|
||||||
image: k8s.gcr.io/event-exporter:v0.1.8
|
image: k8s.gcr.io/event-exporter:v0.1.8
|
||||||
command:
|
command:
|
||||||
- '/event-exporter'
|
- /event-exporter
|
||||||
|
- -sink-opts="-location={{ event_exporter_zone }}"
|
||||||
# BEGIN_PROMETHEUS_TO_SD
|
# BEGIN_PROMETHEUS_TO_SD
|
||||||
- name: prometheus-to-sd-exporter
|
- name: prometheus-to-sd-exporter
|
||||||
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
image: k8s.gcr.io/prometheus-to-sd:v0.2.4
|
||||||
|
@ -2080,6 +2080,11 @@ function update-prometheus-to-sd-parameters {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Updates parameters in yaml file for event-exporter configuration
|
||||||
|
function update-event-exporter {
|
||||||
|
sed -i -e "s@{{ *event_exporter_zone *}}@${ZONE:-}@g" "$1"
|
||||||
|
}
|
||||||
|
|
||||||
# Sets up the manifests of coreDNS for k8s addons.
|
# Sets up the manifests of coreDNS for k8s addons.
|
||||||
function setup-coredns-manifest {
|
function setup-coredns-manifest {
|
||||||
local -r coredns_file="${dst_dir}/dns/coredns.yaml"
|
local -r coredns_file="${dst_dir}/dns/coredns.yaml"
|
||||||
@ -2229,6 +2234,7 @@ EOF
|
|||||||
local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
|
local -r event_exporter_yaml="${dst_dir}/fluentd-gcp/event-exporter.yaml"
|
||||||
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
|
local -r fluentd_gcp_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-ds.yaml"
|
||||||
local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap.yaml"
|
local -r fluentd_gcp_configmap_yaml="${dst_dir}/fluentd-gcp/fluentd-gcp-configmap.yaml"
|
||||||
|
update-event-exporter ${event_exporter_yaml}
|
||||||
update-prometheus-to-sd-parameters ${event_exporter_yaml}
|
update-prometheus-to-sd-parameters ${event_exporter_yaml}
|
||||||
update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
|
update-prometheus-to-sd-parameters ${fluentd_gcp_yaml}
|
||||||
start-fluentd-resource-update ${fluentd_gcp_yaml}
|
start-fluentd-resource-update ${fluentd_gcp_yaml}
|
||||||
|
@ -621,6 +621,7 @@ CONTAINER_RUNTIME_ENDPOINT: $(yaml-quote ${CONTAINER_RUNTIME_ENDPOINT:-})
|
|||||||
CONTAINER_RUNTIME_NAME: $(yaml-quote ${CONTAINER_RUNTIME_NAME:-})
|
CONTAINER_RUNTIME_NAME: $(yaml-quote ${CONTAINER_RUNTIME_NAME:-})
|
||||||
NODE_LOCAL_SSDS_EXT: $(yaml-quote ${NODE_LOCAL_SSDS_EXT:-})
|
NODE_LOCAL_SSDS_EXT: $(yaml-quote ${NODE_LOCAL_SSDS_EXT:-})
|
||||||
LOAD_IMAGE_COMMAND: $(yaml-quote ${LOAD_IMAGE_COMMAND:-})
|
LOAD_IMAGE_COMMAND: $(yaml-quote ${LOAD_IMAGE_COMMAND:-})
|
||||||
|
ZONE: $(yaml-quote ${ZONE})
|
||||||
EOF
|
EOF
|
||||||
if [ -n "${KUBELET_PORT:-}" ]; then
|
if [ -n "${KUBELET_PORT:-}" ]; then
|
||||||
cat >>$file <<EOF
|
cat >>$file <<EOF
|
||||||
@ -861,7 +862,6 @@ EOF
|
|||||||
# Node-only env vars.
|
# Node-only env vars.
|
||||||
cat >>$file <<EOF
|
cat >>$file <<EOF
|
||||||
KUBERNETES_MASTER: $(yaml-quote "false")
|
KUBERNETES_MASTER: $(yaml-quote "false")
|
||||||
ZONE: $(yaml-quote ${ZONE})
|
|
||||||
EXTRA_DOCKER_OPTS: $(yaml-quote ${EXTRA_DOCKER_OPTS:-})
|
EXTRA_DOCKER_OPTS: $(yaml-quote ${EXTRA_DOCKER_OPTS:-})
|
||||||
EOF
|
EOF
|
||||||
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
|
if [ -n "${KUBEPROXY_TEST_ARGS:-}" ]; then
|
||||||
|
@ -328,6 +328,8 @@ func (p *sdLogProvider) tryGetName(sdLogEntry sd.LogEntry) (string, bool) {
|
|||||||
|
|
||||||
func convertLogEntry(sdLogEntry sd.LogEntry) (entry utils.LogEntry, err error) {
|
func convertLogEntry(sdLogEntry sd.LogEntry) (entry utils.LogEntry, err error) {
|
||||||
entry = utils.LogEntry{LogName: sdLogEntry.LogName}
|
entry = utils.LogEntry{LogName: sdLogEntry.LogName}
|
||||||
|
entry.Location = sdLogEntry.Resource.Labels["location"]
|
||||||
|
|
||||||
if sdLogEntry.TextPayload != "" {
|
if sdLogEntry.TextPayload != "" {
|
||||||
entry.TextPayload = sdLogEntry.TextPayload
|
entry.TextPayload = sdLogEntry.TextPayload
|
||||||
return
|
return
|
||||||
|
@ -32,6 +32,7 @@ var (
|
|||||||
type LogEntry struct {
|
type LogEntry struct {
|
||||||
LogName string
|
LogName string
|
||||||
TextPayload string
|
TextPayload string
|
||||||
|
Location string
|
||||||
JSONPayload map[string]interface{}
|
JSONPayload map[string]interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -48,6 +48,9 @@ func UntilFirstEntryFromLog(log string) IngestionPred {
|
|||||||
return func(_ string, entries []LogEntry) (bool, error) {
|
return func(_ string, entries []LogEntry) (bool, error) {
|
||||||
for _, e := range entries {
|
for _, e := range entries {
|
||||||
if e.LogName == log {
|
if e.LogName == log {
|
||||||
|
if e.Location != framework.TestContext.CloudConfig.Zone {
|
||||||
|
return false, fmt.Errorf("Bad location in logs '%s' != '%d'", e.Location, framework.TestContext.CloudConfig.Zone)
|
||||||
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user