Merge pull request #59657 from x13n/manual-fluentd-gcp-scaler

Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Enable scaling fluentd-gcp resources using ScalingPolicy.

See https://github.com/justinsb/scaler for more details about ScalingPolicy resource.

**What this PR does / why we need it**:
This is adding a way to override fluentd-gcp resources in a running cluster. The resources syncing for fluentd-gcp is decoupled from addon manager.

**Special notes for your reviewer**:

**Release note**:
```release-note
fluentd-gcp resources can be modified via a ScalingPolicy
```

cc @kawych @justinsb
This commit is contained in:
Kubernetes Submit Queue 2018-02-15 03:42:14 -08:00 committed by GitHub
commit d3bacb914c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 134 additions and 38 deletions

View File

@ -32,12 +32,6 @@ spec:
env: env:
- name: FLUENTD_ARGS - name: FLUENTD_ARGS
value: --no-supervisor -q value: --no-supervisor -q
resources:
limits:
memory: 300Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts: volumeMounts:
- name: varlog - name: varlog
mountPath: /var/log mountPath: /var/log

View File

@ -0,0 +1,34 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: fluentd-gcp-scaler
namespace: kube-system
labels:
k8s-app: fluentd-gcp-scaler
version: v0.1.0
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: fluentd-gcp-scaler
template:
metadata:
labels:
k8s-app: fluentd-gcp-scaler
spec:
serviceAccountName: fluentd-gcp-scaler
containers:
- name: fluentd-gcp-scaler
image: gcr.io/google-containers/fluentd-gcp-scaler:0.1
command:
- /scaler.sh
- --ds-name=fluentd-gcp-v2.0.13
- --scaling-policy=fluentd-gcp-scaling-policy
env:
# Defaults, used if no overrides are found in fluentd-gcp-scaling-policy
- name: CPU_REQUEST
value: 100m
- name: MEMORY_REQUEST
value: 200Mi
- name: MEMORY_LIMIT
value: 300Mi

View File

@ -0,0 +1,13 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: scalingpolicies.scalingpolicy.kope.io
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: scalingpolicy.kope.io
version: v1alpha1
names:
kind: ScalingPolicy
plural: scalingpolicies
scope: Namespaced

View File

@ -0,0 +1,48 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: fluentd-gcp-scaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: system:fluentd-gcp-scaler
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- "extensions"
resources:
- daemonsets
verbs:
- get
- patch
- apiGroups:
- "scalingpolicy.kope.io"
resources:
- scalingpolicies
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: fluentd-gcp-scaler-binding
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: system:fluentd-gcp-scaler
subjects:
- kind: ServiceAccount
name: fluentd-gcp-scaler
namespace: kube-system

View File

@ -354,9 +354,9 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
fi fi
# Fluentd requirements # Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}" FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}" FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}" FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
# Heapster requirements # Heapster requirements
HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}" HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"

View File

@ -389,9 +389,9 @@ if [[ -n "${LOGROTATE_MAX_SIZE:-}" ]]; then
fi fi
# Fluentd requirements # Fluentd requirements
FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-300Mi}" FLUENTD_GCP_MEMORY_LIMIT="${FLUENTD_GCP_MEMORY_LIMIT:-}"
FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-100m}" FLUENTD_GCP_CPU_REQUEST="${FLUENTD_GCP_CPU_REQUEST:-}"
FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-200Mi}" FLUENTD_GCP_MEMORY_REQUEST="${FLUENTD_GCP_MEMORY_REQUEST:-}"
# Heapster requirements # Heapster requirements
HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}" HEAPSTER_GCP_BASE_MEMORY="${HEAPSTER_GCP_BASE_MEMORY:-140Mi}"

View File

@ -1982,50 +1982,57 @@ function copy-manifests {
chmod 644 "${dst_dir}"/* chmod 644 "${dst_dir}"/*
} }
# Fluentd manifest is modified using kubectl, which may not be available at # Fluentd resources are modified using ScalingPolicy CR, which may not be
# this point. Run this as a background process. # available at this point. Run this as a background process.
function wait-for-apiserver-and-update-fluentd { function wait-for-apiserver-and-update-fluentd {
local -r fluentd_gcp_yaml="${1}" local any_overrides=false
local modifying_flags=""
if [[ -n "${FLUENTD_GCP_MEMORY_LIMIT:-}" ]]; then if [[ -n "${FLUENTD_GCP_MEMORY_LIMIT:-}" ]]; then
modifying_flags="${modifying_flags} --limits=memory=${FLUENTD_GCP_MEMORY_LIMIT}" any_overrides=true
fi fi
local request_resources=""
if [[ -n "${FLUENTD_GCP_CPU_REQUEST:-}" ]]; then if [[ -n "${FLUENTD_GCP_CPU_REQUEST:-}" ]]; then
request_resources="cpu=${FLUENTD_GCP_CPU_REQUEST}" any_overrides=true
fi fi
if [[ -n "${FLUENTD_GCP_MEMORY_REQUEST:-}" ]]; then if [[ -n "${FLUENTD_GCP_MEMORY_REQUEST:-}" ]]; then
if [[ -n "${request_resources}" ]]; then any_overrides=true
request_resources="${request_resources},"
fi
request_resources="memory=${FLUENTD_GCP_MEMORY_REQUEST}"
fi fi
if [[ -n "${request_resources}" ]]; then if ! $any_overrides; then
modifying_flags="${modifying_flags} --requests=${request_resources}" # Nothing to do here.
exit
fi fi
until kubectl get nodes # Wait until ScalingPolicy CRD is in place.
until kubectl get scalingpolicies.scalingpolicy.kope.io
do do
sleep 10 sleep 10
done done
local -r temp_fluentd_gcp_yaml="${fluentd_gcp_yaml}.tmp" # Single-shot, not managed by addon manager. Can be later modified or removed
if kubectl set resources --dry-run --local -f ${fluentd_gcp_yaml} ${modifying_flags} \ # at will.
--containers=fluentd-gcp -o yaml > ${temp_fluentd_gcp_yaml}; then cat <<EOF | kubectl apply -f -
mv ${temp_fluentd_gcp_yaml} ${fluentd_gcp_yaml} apiVersion: scalingpolicy.kope.io/v1alpha1
else kind: ScalingPolicy
(echo "Failed to update fluentd resources. Used manifest:" && cat ${temp_fluentd_gcp_yaml}) >&2 metadata:
rm ${temp_fluentd_gcp_yaml} name: fluentd-gcp-scaling-policy
fi namespace: kube-system
spec:
containers:
- name: fluentd-gcp
resources:
requests:
- resource: cpu
base: ${FLUENTD_GCP_CPU_REQUEST:-}
- resource: memory
base: ${FLUENTD_GCP_MEMORY_REQUEST:-}
limits:
- resource: memory
base: ${FLUENTD_GCP_MEMORY_LIMIT:-}
EOF
} }
# Trigger background process that will ultimately update fluentd resource # Trigger background process that will ultimately update fluentd resource
# requirements. # requirements.
function start-fluentd-resource-update { function start-fluentd-resource-update {
local -r fluentd_gcp_yaml="${1}" wait-for-apiserver-and-update-fluentd &
wait-for-apiserver-and-update-fluentd ${fluentd_gcp_yaml} &
} }
# Update {{ container-runtime }} with actual container runtime name. # Update {{ container-runtime }} with actual container runtime name.