mirror of
https://github.com/k3s-io/kubernetes.git
synced 2025-08-01 07:47:56 +00:00
Merge branch 'master' of https://github.com/kubernetes/kubernetes into az-ratelimit
This commit is contained in:
commit
9e4f1441b4
@ -501,9 +501,11 @@ function kube::build::ensure_data_container() {
|
||||
# If the data container exists AND exited successfully, we can use it.
|
||||
# Otherwise nuke it and start over.
|
||||
local ret=0
|
||||
local code=$(docker inspect \
|
||||
local code=0
|
||||
|
||||
code=$(docker inspect \
|
||||
-f '{{.State.ExitCode}}' \
|
||||
"${KUBE_DATA_CONTAINER_NAME}" 2>/dev/null || ret=$?)
|
||||
"${KUBE_DATA_CONTAINER_NAME}" 2>/dev/null) || ret=$?
|
||||
if [[ "${ret}" == 0 && "${code}" != 0 ]]; then
|
||||
kube::build::destroy_container "${KUBE_DATA_CONTAINER_NAME}"
|
||||
ret=1
|
||||
|
@ -59,7 +59,7 @@ http_file(
|
||||
|
||||
docker_pull(
|
||||
name = "debian-iptables-amd64",
|
||||
digest = "sha256:a3b936c0fb98a934eecd2cfb91f73658d402b29116084e778ce9ddb68e55383e",
|
||||
digest = "sha256:fb18678f8203ca1bd2fad2671e3ebd80cb408a1baae423d4ad39c05f4caac4e1",
|
||||
registry = "k8s.gcr.io",
|
||||
repository = "debian-iptables-amd64",
|
||||
tag = "v10", # ignored, but kept here for documentation
|
||||
@ -75,7 +75,7 @@ docker_pull(
|
||||
|
||||
docker_pull(
|
||||
name = "official_busybox",
|
||||
digest = "sha256:be3c11fdba7cfe299214e46edc642e09514dbb9bbefcd0d3836c05a1e0cd0642",
|
||||
digest = "sha256:4cee1979ba0bf7db9fc5d28fb7b798ca69ae95a47c5fecf46327720df4ff352d",
|
||||
registry = "index.docker.io",
|
||||
repository = "library/busybox",
|
||||
tag = "latest", # ignored, but kept here for documentation
|
||||
|
@ -20,6 +20,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
nodeSelector:
|
||||
projectcalico.org/ds-ready: "true"
|
||||
hostNetwork: true
|
||||
|
@ -16,6 +16,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/cpvpa-amd64:v0.6.0
|
||||
name: autoscaler
|
||||
|
@ -16,6 +16,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
|
@ -16,6 +16,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2
|
||||
name: autoscaler
|
||||
|
@ -16,6 +16,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/cpvpa-amd64:v0.6.0
|
||||
name: autoscaler
|
||||
|
@ -57,6 +57,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
name: heapster
|
||||
|
@ -57,6 +57,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
name: heapster
|
||||
|
@ -57,6 +57,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
name: heapster
|
||||
|
@ -22,6 +22,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
|
@ -44,6 +44,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
name: heapster
|
||||
|
@ -44,6 +44,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- image: k8s.gcr.io/heapster-amd64:v1.5.0
|
||||
name: heapster
|
||||
|
@ -27,6 +27,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: kubernetes-dashboard
|
||||
image: k8s.gcr.io/kubernetes-dashboard-amd64:v1.8.0
|
||||
|
@ -78,6 +78,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: autoscaler
|
||||
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.1.2-r2
|
||||
|
@ -84,6 +84,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
|
@ -84,6 +84,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
|
@ -84,6 +84,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
tolerations:
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
|
@ -18,6 +18,7 @@ metadata:
|
||||
labels:
|
||||
k8s-app: etcd-empty-dir-cleanup
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: etcd-empty-dir-cleanup
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default
|
||||
|
@ -72,6 +72,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: fluentd-es
|
||||
containers:
|
||||
- name: fluentd-es
|
||||
|
@ -31,7 +31,7 @@ COPY Gemfile /Gemfile
|
||||
# 2. Install fluentd via ruby.
|
||||
# 3. Remove build dependencies.
|
||||
# 4. Cleanup leftover caches & files.
|
||||
RUN BUILD_DEPS="make gcc g++ libc6-dev ruby-dev" \
|
||||
RUN BUILD_DEPS="make gcc g++ libc6-dev ruby-dev libffi-dev" \
|
||||
&& clean-install $BUILD_DEPS \
|
||||
ca-certificates \
|
||||
libjemalloc1 \
|
||||
|
@ -23,6 +23,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: fluentd-gcp
|
||||
dnsPolicy: Default
|
||||
containers:
|
||||
|
@ -24,6 +24,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: ip-masq-agent
|
||||
hostNetwork: true
|
||||
containers:
|
||||
|
@ -33,6 +33,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccountName: metadata-proxy
|
||||
hostNetwork: true
|
||||
dnsPolicy: Default
|
||||
|
@ -44,6 +44,7 @@ spec:
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccountName: metrics-server
|
||||
containers:
|
||||
- name: metrics-server
|
||||
|
@ -33,6 +33,9 @@
|
||||
},
|
||||
{ "name": "DATA_DIRECTORY",
|
||||
"value": "/var/etcd/data{{ suffix }}"
|
||||
},
|
||||
{ "name": "INITIAL_CLUSTER",
|
||||
"value": "{{ etcd_cluster }}"
|
||||
}
|
||||
],
|
||||
"livenessProbe": {
|
||||
|
@ -1,19 +1,19 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: l7-lb-controller-v0.9.8-alpha.1
|
||||
name: l7-lb-controller-v0.9.8-alpha.2
|
||||
namespace: kube-system
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
labels:
|
||||
k8s-app: gcp-lb-controller
|
||||
version: v0.9.8-alpha.1
|
||||
version: v0.9.8-alpha.2
|
||||
kubernetes.io/name: "GLBC"
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 600
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- image: k8s.gcr.io/ingress-gce-glbc-amd64:0.9.8-alpha.1
|
||||
- image: k8s.gcr.io/ingress-gce-glbc-amd64:0.9.8-alpha.2
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
@ -44,7 +44,7 @@ spec:
|
||||
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
|
||||
- sh
|
||||
- -c
|
||||
- 'exec /glbc -v=3 --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
|
||||
- 'exec /glbc --verbose --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /etc/gce.conf
|
||||
|
@ -16,4 +16,4 @@ FROM BASEIMAGE
|
||||
|
||||
EXPOSE 2379 2380 4001 7001
|
||||
COPY etcd* etcdctl* /usr/local/bin/
|
||||
COPY migrate-if-needed.sh attachlease rollback /usr/local/bin/
|
||||
COPY migrate-if-needed.sh start-stop-etcd.sh attachlease rollback /usr/local/bin/
|
||||
|
@ -28,6 +28,8 @@
|
||||
# That binary will be set to the last tag from $(TAGS).
|
||||
TAGS?=2.2.1 2.3.7 3.0.17 3.1.11 3.2.14
|
||||
REGISTRY_TAG?=3.2.14
|
||||
# ROLLBACK_REGISTRY_TAG specified the tag that REGISTRY_TAG may be rolled back to.
|
||||
ROLLBACK_REGISTRY_TAG?=3.1.11
|
||||
ARCH?=amd64
|
||||
REGISTRY?=k8s.gcr.io
|
||||
# golang version should match the golang version from https://github.com/coreos/etcd/releases for REGISTRY_TAG version of etcd.
|
||||
@ -57,10 +59,10 @@ build:
|
||||
find ./ -maxdepth 1 -type f | xargs -I {} cp {} $(TEMP_DIR)
|
||||
|
||||
# Compile attachlease
|
||||
docker run -i -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -v $(TEMP_DIR):/build -e GOARCH=$(ARCH) golang:$(GOLANG_VERSION) \
|
||||
docker run --interactive -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -v $(TEMP_DIR):/build -e GOARCH=$(ARCH) golang:$(GOLANG_VERSION) \
|
||||
/bin/bash -c "CGO_ENABLED=0 go build -o /build/attachlease k8s.io/kubernetes/cluster/images/etcd/attachlease"
|
||||
# Compile rollback
|
||||
docker run -i -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -v $(TEMP_DIR):/build -e GOARCH=$(ARCH) golang:$(GOLANG_VERSION) \
|
||||
docker run --interactive -v $(shell pwd)/../../../:/go/src/k8s.io/kubernetes -v $(TEMP_DIR):/build -e GOARCH=$(ARCH) golang:$(GOLANG_VERSION) \
|
||||
/bin/bash -c "CGO_ENABLED=0 go build -o /build/rollback k8s.io/kubernetes/cluster/images/etcd/rollback"
|
||||
|
||||
|
||||
@ -81,7 +83,7 @@ else
|
||||
# For each release create a tmp dir 'etcd_release_tmp_dir' and unpack the release tar there.
|
||||
for tag in $(TAGS); do \
|
||||
etcd_release_tmp_dir=$(shell mktemp -d); \
|
||||
docker run -i -v $$etcd_release_tmp_dir:/etcdbin golang:$(GOLANG_VERSION) /bin/bash -c \
|
||||
docker run --interactive -v $$etcd_release_tmp_dir:/etcdbin golang:$(GOLANG_VERSION) /bin/bash -c \
|
||||
"git clone https://github.com/coreos/etcd /go/src/github.com/coreos/etcd \
|
||||
&& cd /go/src/github.com/coreos/etcd \
|
||||
&& git checkout v$$tag \
|
||||
@ -114,5 +116,127 @@ ifeq ($(ARCH),amd64)
|
||||
gcloud docker -- push $(REGISTRY)/etcd:$(REGISTRY_TAG)
|
||||
endif
|
||||
|
||||
all: build
|
||||
.PHONY: build push
|
||||
ETCD2_ROLLBACK_NEW_TAG=3.0.17
|
||||
ETCD2_ROLLBACK_OLD_TAG=2.2.1
|
||||
|
||||
# Test a rollback to etcd2 from the earliest etcd3 version.
|
||||
test-rollback-etcd2:
|
||||
mkdir -p $(TEMP_DIR)/rollback-etcd2
|
||||
cd $(TEMP_DIR)/rollback-etcd2
|
||||
|
||||
@echo "Starting $(ETCD2_ROLLBACK_NEW_TAG) etcd and writing some sample data."
|
||||
docker run --tty --interactive -v $(TEMP_DIR)/rollback-etcd2:/var/etcd \
|
||||
-e "TARGET_STORAGE=etcd3" \
|
||||
-e "TARGET_VERSION=$(ETCD2_ROLLBACK_NEW_TAG)" \
|
||||
-e "DATA_DIRECTORY=/var/etcd/data" \
|
||||
gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \
|
||||
'INITIAL_CLUSTER=etcd-$$(hostname)=http://localhost:2380 \
|
||||
/usr/local/bin/migrate-if-needed.sh && \
|
||||
source /usr/local/bin/start-stop-etcd.sh && \
|
||||
START_STORAGE=etcd3 START_VERSION=$(ETCD2_ROLLBACK_NEW_TAG) start_etcd && \
|
||||
ETCDCTL_API=3 /usr/local/bin/etcdctl-$(ETCD2_ROLLBACK_NEW_TAG) --endpoints http://127.0.0.1:$${ETCD_PORT} put /registry/k1 value1 && \
|
||||
stop_etcd && \
|
||||
[ $$(cat /var/etcd/data/version.txt) = $(ETCD2_ROLLBACK_NEW_TAG)/etcd3 ]'
|
||||
|
||||
@echo "Rolling back to the previous version of etcd and recording keyspace to a flat file."
|
||||
docker run --tty --interactive -v $(TEMP_DIR)/rollback-etcd2:/var/etcd \
|
||||
-e "TARGET_STORAGE=etcd2" \
|
||||
-e "TARGET_VERSION=$(ETCD2_ROLLBACK_OLD_TAG)" \
|
||||
-e "DATA_DIRECTORY=/var/etcd/data" \
|
||||
gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \
|
||||
'INITIAL_CLUSTER=etcd-$$(hostname)=http://localhost:2380 \
|
||||
/usr/local/bin/migrate-if-needed.sh && \
|
||||
source /usr/local/bin/start-stop-etcd.sh && \
|
||||
START_STORAGE=etcd2 START_VERSION=$(ETCD2_ROLLBACK_OLD_TAG) start_etcd && \
|
||||
/usr/local/bin/etcdctl-$(ETCD2_ROLLBACK_OLD_TAG) --endpoint 127.0.0.1:$${ETCD_PORT} get /registry/k1 > /var/etcd/keyspace.txt && \
|
||||
stop_etcd'
|
||||
|
||||
@echo "Checking if rollback successfully downgraded etcd to $(ETCD2_ROLLBACK_OLD_TAG)"
|
||||
docker run --tty --interactive -v $(TEMP_DIR)/rollback-etcd2:/var/etcd \
|
||||
gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \
|
||||
'[ $$(cat /var/etcd/data/version.txt) = $(ETCD2_ROLLBACK_OLD_TAG)/etcd2 ] && \
|
||||
grep -q value1 /var/etcd/keyspace.txt'
|
||||
|
||||
# Test a rollback from the latest version to the previous version.
|
||||
test-rollback:
|
||||
mkdir -p $(TEMP_DIR)/rollback-test
|
||||
cd $(TEMP_DIR)/rollback-test
|
||||
|
||||
@echo "Starting $(REGISTRY_TAG) etcd and writing some sample data."
|
||||
docker run --tty --interactive -v $(TEMP_DIR)/rollback-test:/var/etcd \
|
||||
-e "TARGET_STORAGE=etcd3" \
|
||||
-e "TARGET_VERSION=$(REGISTRY_TAG)" \
|
||||
-e "DATA_DIRECTORY=/var/etcd/data" \
|
||||
gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \
|
||||
'INITIAL_CLUSTER=etcd-$$(hostname)=http://localhost:2380 \
|
||||
/usr/local/bin/migrate-if-needed.sh && \
|
||||
source /usr/local/bin/start-stop-etcd.sh && \
|
||||
START_STORAGE=etcd3 START_VERSION=$(REGISTRY_TAG) start_etcd && \
|
||||
ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints http://127.0.0.1:$${ETCD_PORT} put /registry/k1 value1 && \
|
||||
stop_etcd'
|
||||
|
||||
@echo "Rolling back to the previous version of etcd and recording keyspace to a flat file."
|
||||
docker run --tty --interactive -v $(TEMP_DIR)/rollback-test:/var/etcd \
|
||||
-e "TARGET_STORAGE=etcd3" \
|
||||
-e "TARGET_VERSION=$(ROLLBACK_REGISTRY_TAG)" \
|
||||
-e "DATA_DIRECTORY=/var/etcd/data" \
|
||||
gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \
|
||||
'INITIAL_CLUSTER=etcd-$$(hostname)=http://localhost:2380 \
|
||||
/usr/local/bin/migrate-if-needed.sh && \
|
||||
source /usr/local/bin/start-stop-etcd.sh && \
|
||||
START_STORAGE=etcd3 START_VERSION=$(ROLLBACK_REGISTRY_TAG) start_etcd && \
|
||||
ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints http://127.0.0.1:$${ETCD_PORT} get --prefix / > /var/etcd/keyspace.txt && \
|
||||
stop_etcd'
|
||||
|
||||
@echo "Checking if rollback successfully downgraded etcd to $(ROLLBACK_REGISTRY_TAG)"
|
||||
docker run --tty --interactive -v $(TEMP_DIR)/rollback-test:/var/etcd \
|
||||
gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \
|
||||
'[ $$(cat /var/etcd/data/version.txt) = $(ROLLBACK_REGISTRY_TAG)/etcd3 ] && \
|
||||
grep -q value1 /var/etcd/keyspace.txt'
|
||||
|
||||
# Test migrating from each supported versions to the latest version.
|
||||
test-migrate:
|
||||
for tag in $(TAGS); do \
|
||||
echo "Testing migration from $${tag} to $(REGISTRY_TAG)" && \
|
||||
mkdir -p $(TEMP_DIR)/migrate-$${tag} && \
|
||||
cd $(TEMP_DIR)/migrate-$${tag} && \
|
||||
MAJOR_VERSION=$$(echo $${tag} | cut -c 1) && \
|
||||
echo "Starting etcd $${tag} and writing sample data to keyspace" && \
|
||||
docker run --tty --interactive -v $(TEMP_DIR)/migrate-$${tag}:/var/etcd \
|
||||
-e "TARGET_STORAGE=etcd$${MAJOR_VERSION}" \
|
||||
-e "TARGET_VERSION=$${tag}" \
|
||||
-e "DATA_DIRECTORY=/var/etcd/data" \
|
||||
gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \
|
||||
"INITIAL_CLUSTER=etcd-\$$(hostname)=http://localhost:2380 \
|
||||
/usr/local/bin/migrate-if-needed.sh && \
|
||||
source /usr/local/bin/start-stop-etcd.sh && \
|
||||
START_STORAGE=etcd$${MAJOR_VERSION} START_VERSION=$${tag} start_etcd && \
|
||||
if [ $${MAJOR_VERSION} == 2 ]; then \
|
||||
/usr/local/bin/etcdctl --endpoint http://127.0.0.1:\$${ETCD_PORT} set /registry/k1 value1; \
|
||||
else \
|
||||
ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints http://127.0.0.1:\$${ETCD_PORT} put /registry/k1 value1; \
|
||||
fi && \
|
||||
stop_etcd" && \
|
||||
echo " Migrating from $${tag} to $(REGISTRY_TAG) and capturing keyspace" && \
|
||||
docker run --tty --interactive -v $(TEMP_DIR)/migrate-$${tag}:/var/etcd \
|
||||
-e "TARGET_STORAGE=etcd3" \
|
||||
-e "TARGET_VERSION=$(REGISTRY_TAG)" \
|
||||
-e "DATA_DIRECTORY=/var/etcd/data" \
|
||||
gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \
|
||||
'INITIAL_CLUSTER=etcd-$$(hostname)=http://localhost:2380 \
|
||||
/usr/local/bin/migrate-if-needed.sh && \
|
||||
source /usr/local/bin/start-stop-etcd.sh && \
|
||||
START_STORAGE=etcd3 START_VERSION=$(REGISTRY_TAG) start_etcd && \
|
||||
ETCDCTL_API=3 /usr/local/bin/etcdctl --endpoints http://127.0.0.1:$${ETCD_PORT} get --prefix / > /var/etcd/keyspace.txt && \
|
||||
stop_etcd' && \
|
||||
echo "Checking if migrate from $${tag} successfully upgraded etcd to $(REGISTRY_TAG)" && \
|
||||
docker run --tty --interactive -v $(TEMP_DIR)/migrate-$${tag}:/var/etcd \
|
||||
gcr.io/google_containers/etcd-$(ARCH):$(REGISTRY_TAG) /bin/sh -c \
|
||||
'[ $$(cat /var/etcd/data/version.txt) = $(REGISTRY_TAG)/etcd3 ] && \
|
||||
grep -q value1 /var/etcd/keyspace.txt'; \
|
||||
done
|
||||
|
||||
test: test-rollback test-rollback-etcd2 test-migrate
|
||||
|
||||
all: build test
|
||||
.PHONY: build push test-rollback test-rollback-etcd2 test-migrate test
|
||||
|
@ -7,6 +7,14 @@ For other architectures, `etcd` is cross-compiled from source. Arch-specific `bu
|
||||
|
||||
#### How to release
|
||||
|
||||
First, run the migration and rollback tests.
|
||||
|
||||
```console
|
||||
$ make build test
|
||||
```
|
||||
|
||||
Next, build and push the docker images for all supported architectures.
|
||||
|
||||
```console
|
||||
# Build for linux/amd64 (default)
|
||||
$ make push ARCH=amd64
|
||||
|
@ -18,7 +18,7 @@
|
||||
# This script performs etcd upgrade based on the following environmental
|
||||
# variables:
|
||||
# TARGET_STORAGE - API of etcd to be used (supported: 'etcd2', 'etcd3')
|
||||
# TARGET_VERSION - etcd release to be used (supported: '2.2.1', '2.3.7', '3.0.17')
|
||||
# TARGET_VERSION - etcd release to be used (supported: '2.2.1', '2.3.7', '3.0.17', '3.1.11', '3.2.14')
|
||||
# DATA_DIRECTORY - directory with etcd data
|
||||
#
|
||||
# The current etcd version and storage format is detected based on the
|
||||
@ -28,7 +28,8 @@
|
||||
# The update workflow support the following upgrade steps:
|
||||
# - 2.2.1/etcd2 -> 2.3.7/etcd2
|
||||
# - 2.3.7/etcd2 -> 3.0.17/etcd2
|
||||
# - 3.0.17/etcd2 -> 3.0.17/etcd3
|
||||
# - 3.0.17/etcd3 -> 3.1.11/etcd3
|
||||
# - 3.1.11/etcd3 -> 3.2.14/etcd3
|
||||
#
|
||||
# NOTE: The releases supported in this script has to match release binaries
|
||||
# present in the etcd image (to make this script work correctly).
|
||||
@ -39,6 +40,72 @@
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
|
||||
source $(dirname "$0")/start-stop-etcd.sh
|
||||
|
||||
# Rollback to previous minor version of etcd 3.x, if needed.
|
||||
#
|
||||
# Warning: For HA etcd clusters (any cluster with more than one member), all members must be stopped before rolling back, zero
|
||||
# downtime rollbacks are not supported.
|
||||
rollback_etcd3_minor_version() {
|
||||
if [ ${TARGET_MINOR_VERSION} != $((${CURRENT_MINOR_VERSION}-1)) ]; then
|
||||
echo "Rollback from ${CURRENT_VERSION} to ${TARGET_VERSION} not supported, only rollbacks to the previous minor version are supported."
|
||||
exit 1
|
||||
fi
|
||||
echo "Performing etcd ${CURRENT_VERSION} -> ${TARGET_VERSION} rollback"
|
||||
ROLLBACK_BACKUP_DIR="${DATA_DIRECTORY}.bak"
|
||||
rm -rf "${ROLLBACK_BACKUP_DIR}"
|
||||
SNAPSHOT_FILE="${DATA_DIRECTORY}.snapshot.db"
|
||||
rm -rf "${SNAPSHOT_FILE}"
|
||||
ETCD_CMD="/usr/local/bin/etcd-${CURRENT_VERSION}"
|
||||
ETCDCTL_CMD="/usr/local/bin/etcdctl-${CURRENT_VERSION}"
|
||||
|
||||
# Start CURRENT_VERSION of etcd.
|
||||
START_VERSION="${CURRENT_VERSION}"
|
||||
START_STORAGE="${CURRENT_STORAGE}"
|
||||
echo "Starting etcd version ${START_VERSION} to capture rollback snapshot."
|
||||
if ! start_etcd; then
|
||||
echo "Unable to automatically downgrade etcd: starting etcd version ${START_VERSION} to capture rollback snapshot failed."
|
||||
echo "See https://coreos.com/etcd/docs/3.2.13/op-guide/recovery.html for manual downgrade options."
|
||||
exit 1
|
||||
else
|
||||
ETCDCTL_API=3 ${ETCDCTL_CMD} snapshot --endpoints "http://127.0.0.1:${ETCD_PORT}" save "${SNAPSHOT_FILE}"
|
||||
fi
|
||||
stop_etcd
|
||||
|
||||
# Backup the data before rolling back.
|
||||
mv "${DATA_DIRECTORY}" "${ROLLBACK_BACKUP_DIR}"
|
||||
ETCDCTL_CMD="/usr/local/bin/etcdctl-${TARGET_VERSION}"
|
||||
NAME="etcd-$(hostname)"
|
||||
ETCDCTL_API=3 ${ETCDCTL_CMD} snapshot restore "${SNAPSHOT_FILE}" \
|
||||
--data-dir "${DATA_DIRECTORY}" --name "${NAME}" --initial-cluster "${INITIAL_CLUSTER}"
|
||||
|
||||
CURRENT_VERSION="${TARGET_VERSION}"
|
||||
echo "${CURRENT_VERSION}/${CURRENT_STORAGE}" > "${DATA_DIRECTORY}/${VERSION_FILE}"
|
||||
}
|
||||
|
||||
# Rollback from "3.0.x" version in 'etcd3' mode to "2.2.1" version in 'etcd2' mode, if needed.
|
||||
rollback_to_etcd2() {
|
||||
if [ "$(echo ${CURRENT_VERSION} | cut -c1-4)" != "3.0." -o "${TARGET_VERSION}" != "2.2.1" ]; then
|
||||
echo "etcd3 -> etcd2 downgrade is supported only between 3.0.x and 2.2.1"
|
||||
return 0
|
||||
fi
|
||||
echo "Backup and remove all existing v2 data"
|
||||
ROLLBACK_BACKUP_DIR="${DATA_DIRECTORY}.bak"
|
||||
rm -rf "${ROLLBACK_BACKUP_DIR}"
|
||||
mkdir -p "${ROLLBACK_BACKUP_DIR}"
|
||||
cp -r "${DATA_DIRECTORY}" "${ROLLBACK_BACKUP_DIR}"
|
||||
echo "Performing etcd3 -> etcd2 rollback"
|
||||
${ROLLBACK} --data-dir "${DATA_DIRECTORY}"
|
||||
if [ "$?" -ne "0" ]; then
|
||||
echo "Rollback to etcd2 failed"
|
||||
exit 1
|
||||
fi
|
||||
CURRENT_STORAGE="etcd2"
|
||||
CURRENT_VERSION="2.2.1"
|
||||
echo "${CURRENT_VERSION}/${CURRENT_STORAGE}" > "${DATA_DIRECTORY}/${VERSION_FILE}"
|
||||
}
|
||||
|
||||
|
||||
if [ -z "${TARGET_STORAGE:-}" ]; then
|
||||
echo "TARGET_STORAGE variable unset - unexpected failure"
|
||||
exit 1
|
||||
@ -51,6 +118,10 @@ if [ -z "${DATA_DIRECTORY:-}" ]; then
|
||||
echo "DATA_DIRECTORY variable unset - unexpected failure"
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "${INITIAL_CLUSTER:-}" ]; then
|
||||
echo "Warn: INITIAL_CLUSTER variable unset - defaulting to etcd-$(hostname)=http://localhost:2380"
|
||||
INITIAL_CLUSTER="etcd-$(hostname)=http://localhost:2380"
|
||||
fi
|
||||
|
||||
echo "$(date +'%Y-%m-%d %H:%M:%S') Detecting if migration is needed"
|
||||
|
||||
@ -68,7 +139,7 @@ fi
|
||||
# NOTE: SUPPORTED_VERSION has to match release binaries present in the
|
||||
# etcd image (to make this script work correctly).
|
||||
# We cannot use array since sh doesn't support it.
|
||||
SUPPORTED_VERSIONS_STRING="2.2.1 2.3.7 3.0.17"
|
||||
SUPPORTED_VERSIONS_STRING="2.2.1 2.3.7 3.0.17 3.1.11 3.2.14"
|
||||
SUPPORTED_VERSIONS=$(echo "${SUPPORTED_VERSIONS_STRING}" | tr " " "\n")
|
||||
|
||||
VERSION_FILE="version.txt"
|
||||
@ -96,58 +167,6 @@ if [ -z "$(ls -A ${DATA_DIRECTORY})" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Starts 'etcd' version ${START_VERSION} and writes to it:
|
||||
# 'etcd_version' -> "${START_VERSION}"
|
||||
# Successful write confirms that etcd is up and running.
|
||||
# Sets ETCD_PID at the end.
|
||||
# Returns 0 if etcd was successfully started, non-0 otherwise.
|
||||
start_etcd() {
|
||||
# Use random ports, so that apiserver cannot connect to etcd.
|
||||
ETCD_PORT=18629
|
||||
ETCD_PEER_PORT=2380
|
||||
# Avoid collisions between etcd and event-etcd.
|
||||
case "${DATA_DIRECTORY}" in
|
||||
*event*)
|
||||
ETCD_PORT=18631
|
||||
ETCD_PEER_PORT=2381
|
||||
;;
|
||||
esac
|
||||
local ETCD_CMD="${ETCD:-/usr/local/bin/etcd-${START_VERSION}}"
|
||||
local ETCDCTL_CMD="${ETCDCTL:-/usr/local/bin/etcdctl-${START_VERSION}}"
|
||||
local API_VERSION="$(echo ${START_STORAGE} | cut -c5-5)"
|
||||
if [ "${API_VERSION}" = "2" ]; then
|
||||
ETCDCTL_CMD="${ETCDCTL_CMD} --debug --endpoint=http://127.0.0.1:${ETCD_PORT} set"
|
||||
else
|
||||
ETCDCTL_CMD="${ETCDCTL_CMD} --endpoints=http://127.0.0.1:${ETCD_PORT} put"
|
||||
fi
|
||||
${ETCD_CMD} \
|
||||
--name="etcd-$(hostname)" \
|
||||
--debug \
|
||||
--data-dir=${DATA_DIRECTORY} \
|
||||
--listen-client-urls http://127.0.0.1:${ETCD_PORT} \
|
||||
--advertise-client-urls http://127.0.0.1:${ETCD_PORT} \
|
||||
--listen-peer-urls http://127.0.0.1:${ETCD_PEER_PORT} \
|
||||
--initial-advertise-peer-urls http://127.0.0.1:${ETCD_PEER_PORT} &
|
||||
ETCD_PID=$!
|
||||
# Wait until we can write to etcd.
|
||||
for i in $(seq 240); do
|
||||
sleep 0.5
|
||||
ETCDCTL_API="${API_VERSION}" ${ETCDCTL_CMD} 'etcd_version' ${START_VERSION}
|
||||
if [ "$?" -eq "0" ]; then
|
||||
echo "Etcd on port ${ETCD_PORT} is up."
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
echo "Timeout while waiting for etcd on port ${ETCD_PORT}"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Stops etcd with ${ETCD_PID} pid.
|
||||
stop_etcd() {
|
||||
kill "${ETCD_PID-}" >/dev/null 2>&1 || :
|
||||
wait "${ETCD_PID-}" >/dev/null 2>&1 || :
|
||||
}
|
||||
|
||||
ATTACHLEASE="${ATTACHLEASE:-/usr/local/bin/attachlease}"
|
||||
ROLLBACK="${ROLLBACK:-/usr/local/bin/rollback}"
|
||||
|
||||
@ -163,6 +182,24 @@ if [ "${CURRENT_VERSION}" = "2.2.1" -a "${CURRENT_VERSION}" != "${TARGET_VERSION
|
||||
echo "Backup done in ${BACKUP_DIR}"
|
||||
fi
|
||||
|
||||
CURRENT_MINOR_VERSION="$(echo ${CURRENT_VERSION} | awk -F'.' '{print $2}')"
|
||||
TARGET_MINOR_VERSION="$(echo ${TARGET_VERSION} | awk -F'.' '{print $2}')"
|
||||
|
||||
# "rollback-if-needed"
|
||||
case "${CURRENT_STORAGE}-${TARGET_STORAGE}" in
|
||||
"etcd3-etcd3")
|
||||
[ ${TARGET_MINOR_VERSION} -lt ${CURRENT_MINOR_VERSION} ] && rollback_etcd3_minor_version
|
||||
break
|
||||
;;
|
||||
"etcd3-etcd2")
|
||||
rollback_to_etcd2
|
||||
break
|
||||
;;
|
||||
*)
|
||||
break
|
||||
;;
|
||||
esac
|
||||
|
||||
# Do the roll-forward migration if needed.
|
||||
# The migration goes as following:
|
||||
# 1. for all versions starting one after the current version of etcd
|
||||
@ -227,7 +264,7 @@ for step in ${SUPPORTED_VERSIONS}; do
|
||||
echo "Starting etcd ${step} in v3 mode failed"
|
||||
exit 1
|
||||
fi
|
||||
${ETCDCTL_CMD} rm --recursive "${ETCD_DATA_PREFIX}"
|
||||
${ETCDCTL_CMD} --endpoints "http://127.0.0.1:${ETCD_PORT}" rm --recursive "${ETCD_DATA_PREFIX}"
|
||||
# Kill etcd and wait until this is down.
|
||||
stop_etcd
|
||||
echo "Successfully remove v2 data"
|
||||
@ -239,28 +276,4 @@ for step in ${SUPPORTED_VERSIONS}; do
|
||||
fi
|
||||
done
|
||||
|
||||
# Do the rollback of needed.
|
||||
# NOTE: Rollback is only supported from "3.0.x" version in 'etcd3' mode to
|
||||
# "2.2.1" version in 'etcd2' mode.
|
||||
if [ "${CURRENT_STORAGE}" = "etcd3" -a "${TARGET_STORAGE}" = "etcd2" ]; then
|
||||
if [ "$(echo ${CURRENT_VERSION} | cut -c1-4)" != "3.0." -o "${TARGET_VERSION}" != "2.2.1" ]; then
|
||||
echo "etcd3 -> etcd2 downgrade is supported only between 3.0.x and 2.2.1"
|
||||
return 0
|
||||
fi
|
||||
echo "Backup and remove all existing v2 data"
|
||||
ROLLBACK_BACKUP_DIR="${DATA_DIRECTORY}.bak"
|
||||
rm -rf "${ROLLBACK_BACKUP_DIR}"
|
||||
mkdir -p "${ROLLBACK_BACKUP_DIR}"
|
||||
cp -r "${DATA_DIRECTORY}" "${ROLLBACK_BACKUP_DIR}"
|
||||
echo "Performing etcd3 -> etcd2 rollback"
|
||||
${ROLLBACK} --data-dir "${DATA_DIRECTORY}"
|
||||
if [ "$?" -ne "0" ]; then
|
||||
echo "Rollback to etcd2 failed"
|
||||
exit 1
|
||||
fi
|
||||
CURRENT_STORAGE="etcd2"
|
||||
CURRENT_VERSION="2.2.1"
|
||||
echo "${CURRENT_VERSION}/${CURRENT_STORAGE}" > "${DATA_DIRECTORY}/${VERSION_FILE}"
|
||||
fi
|
||||
|
||||
echo "$(date +'%Y-%m-%d %H:%M:%S') Migration finished"
|
||||
|
68
cluster/images/etcd/start-stop-etcd.sh
Executable file
68
cluster/images/etcd/start-stop-etcd.sh
Executable file
@ -0,0 +1,68 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Copyright 2016 The Kubernetes Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Starts 'etcd' version ${START_VERSION} and writes to it:
|
||||
# 'etcd_version' -> "${START_VERSION}"
|
||||
# Successful write confirms that etcd is up and running.
|
||||
# Sets ETCD_PID at the end.
|
||||
# Returns 0 if etcd was successfully started, non-0 otherwise.
|
||||
start_etcd() {
|
||||
# Use random ports, so that apiserver cannot connect to etcd.
|
||||
ETCD_PORT=18629
|
||||
ETCD_PEER_PORT=2380
|
||||
# Avoid collisions between etcd and event-etcd.
|
||||
case "${DATA_DIRECTORY}" in
|
||||
*event*)
|
||||
ETCD_PORT=18631
|
||||
ETCD_PEER_PORT=2381
|
||||
;;
|
||||
esac
|
||||
local ETCD_CMD="${ETCD:-/usr/local/bin/etcd-${START_VERSION}}"
|
||||
local ETCDCTL_CMD="${ETCDCTL:-/usr/local/bin/etcdctl-${START_VERSION}}"
|
||||
local API_VERSION="$(echo ${START_STORAGE} | cut -c5-5)"
|
||||
if [ "${API_VERSION}" = "2" ]; then
|
||||
ETCDCTL_CMD="${ETCDCTL_CMD} --debug --endpoint=http://127.0.0.1:${ETCD_PORT} set"
|
||||
else
|
||||
ETCDCTL_CMD="${ETCDCTL_CMD} --endpoints=http://127.0.0.1:${ETCD_PORT} put"
|
||||
fi
|
||||
${ETCD_CMD} \
|
||||
--name="etcd-$(hostname)" \
|
||||
--initial-cluster="etcd-$(hostname)=http://127.0.0.1:${ETCD_PEER_PORT}" \
|
||||
--debug \
|
||||
--data-dir=${DATA_DIRECTORY} \
|
||||
--listen-client-urls http://127.0.0.1:${ETCD_PORT} \
|
||||
--advertise-client-urls http://127.0.0.1:${ETCD_PORT} \
|
||||
--listen-peer-urls http://127.0.0.1:${ETCD_PEER_PORT} \
|
||||
--initial-advertise-peer-urls http://127.0.0.1:${ETCD_PEER_PORT} &
|
||||
ETCD_PID=$!
|
||||
# Wait until we can write to etcd.
|
||||
for i in $(seq 240); do
|
||||
sleep 0.5
|
||||
ETCDCTL_API="${API_VERSION}" ${ETCDCTL_CMD} 'etcd_version' ${START_VERSION}
|
||||
if [ "$?" -eq "0" ]; then
|
||||
echo "Etcd on port ${ETCD_PORT} is up."
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
echo "Timeout while waiting for etcd on port ${ETCD_PORT}"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Stops etcd with ${ETCD_PID} pid.
|
||||
stop_etcd() {
|
||||
kill "${ETCD_PID-}" >/dev/null 2>&1 || :
|
||||
wait "${ETCD_PID-}" >/dev/null 2>&1 || :
|
||||
}
|
@ -927,6 +927,15 @@ def notify_master_gpu_not_enabled(kube_control):
|
||||
kube_control.set_gpu(False)
|
||||
|
||||
|
||||
@when('kube-control.connected')
|
||||
@when('config.changed.kubelet-extra-args')
|
||||
def maybe_request_new_credentials(kube_control):
|
||||
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
|
||||
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
|
||||
if data_changed('cloud_provider', cloud_provider):
|
||||
request_kubelet_and_proxy_credentials(kube_control)
|
||||
|
||||
|
||||
@when('kube-control.connected')
|
||||
def request_kubelet_and_proxy_credentials(kube_control):
|
||||
""" Request kubelet node authorization with a well formed kubelet user.
|
||||
@ -935,14 +944,14 @@ def request_kubelet_and_proxy_credentials(kube_control):
|
||||
# The kube-cotrol interface is created to support RBAC.
|
||||
# At this point we might as well do the right thing and return the hostname
|
||||
# even if it will only be used when we enable RBAC
|
||||
nodeuser = 'system:node:{}'.format(gethostname().lower())
|
||||
nodeuser = 'system:node:{}'.format(get_node_name().lower())
|
||||
kube_control.set_auth_request(nodeuser)
|
||||
|
||||
|
||||
@when('kube-control.connected')
|
||||
def catch_change_in_creds(kube_control):
|
||||
"""Request a service restart in case credential updates were detected."""
|
||||
nodeuser = 'system:node:{}'.format(gethostname().lower())
|
||||
nodeuser = 'system:node:{}'.format(get_node_name().lower())
|
||||
creds = kube_control.get_auth_credentials(nodeuser)
|
||||
if creds \
|
||||
and data_changed('kube-control.creds', creds) \
|
||||
|
@ -13,7 +13,7 @@ filegroup(
|
||||
":package-srcs",
|
||||
"//cmd/clicheck:all-srcs",
|
||||
"//cmd/cloud-controller-manager:all-srcs",
|
||||
"//cmd/controller-manager/app/options:all-srcs",
|
||||
"//cmd/controller-manager/app:all-srcs",
|
||||
"//cmd/gendocs:all-srcs",
|
||||
"//cmd/genkubedocs:all-srcs",
|
||||
"//cmd/genman:all-srcs",
|
||||
|
@ -10,8 +10,9 @@ go_library(
|
||||
srcs = ["controllermanager.go"],
|
||||
importpath = "k8s.io/kubernetes/cmd/cloud-controller-manager/app",
|
||||
deps = [
|
||||
"//cmd/cloud-controller-manager/app/config:go_default_library",
|
||||
"//cmd/cloud-controller-manager/app/options:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//cmd/controller-manager/app:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/controller:go_default_library",
|
||||
"//pkg/controller/cloud:go_default_library",
|
||||
@ -20,17 +21,12 @@ go_library(
|
||||
"//pkg/util/configz:go_default_library",
|
||||
"//pkg/version/verflag:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/spf13/cobra:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
@ -48,6 +44,7 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//cmd/cloud-controller-manager/app/config:all-srcs",
|
||||
"//cmd/cloud-controller-manager/app/options:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
|
23
cmd/cloud-controller-manager/app/config/BUILD
Normal file
23
cmd/cloud-controller-manager/app/config/BUILD
Normal file
@ -0,0 +1,23 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["config.go"],
|
||||
importpath = "k8s.io/kubernetes/cmd/cloud-controller-manager/app/config",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//cmd/controller-manager/app:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
55
cmd/cloud-controller-manager/app/config/config.go
Normal file
55
cmd/cloud-controller-manager/app/config/config.go
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app"
|
||||
)
|
||||
|
||||
// ExtraConfig are part of Config, also can place your custom config here.
|
||||
type ExtraConfig struct {
|
||||
NodeStatusUpdateFrequency time.Duration
|
||||
}
|
||||
|
||||
// Config is the main context object for the cloud controller manager.
|
||||
type Config struct {
|
||||
Generic genericcontrollermanager.Config
|
||||
Extra ExtraConfig
|
||||
}
|
||||
|
||||
type completedConfig struct {
|
||||
Generic genericcontrollermanager.CompletedConfig
|
||||
Extra *ExtraConfig
|
||||
}
|
||||
|
||||
// CompletedConfig same as Config, just to swap private object.
|
||||
type CompletedConfig struct {
|
||||
// Embed a private pointer that cannot be instantiated outside of this package.
|
||||
*completedConfig
|
||||
}
|
||||
|
||||
// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver.
|
||||
func (c *Config) Complete() *CompletedConfig {
|
||||
cc := completedConfig{
|
||||
c.Generic.Complete(),
|
||||
&c.Extra,
|
||||
}
|
||||
|
||||
return &CompletedConfig{&cc}
|
||||
}
|
@ -20,32 +20,24 @@ import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
"k8s.io/client-go/tools/record"
|
||||
cloudcontrollerconfig "k8s.io/kubernetes/cmd/cloud-controller-manager/app/config"
|
||||
"k8s.io/kubernetes/cmd/cloud-controller-manager/app/options"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
cloudcontrollers "k8s.io/kubernetes/pkg/controller/cloud"
|
||||
@ -62,7 +54,7 @@ const (
|
||||
|
||||
// NewCloudControllerManagerCommand creates a *cobra.Command object with default parameters
|
||||
func NewCloudControllerManagerCommand() *cobra.Command {
|
||||
s := options.NewCloudControllerManagerServer()
|
||||
s := options.NewCloudControllerManagerOptions()
|
||||
cmd := &cobra.Command{
|
||||
Use: "cloud-controller-manager",
|
||||
Long: `The Cloud controller manager is a daemon that embeds
|
||||
@ -70,7 +62,13 @@ the cloud specific control loops shipped with Kubernetes.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
verflag.PrintAndExitIfRequested()
|
||||
|
||||
if err := Run(s); err != nil {
|
||||
c, err := s.Config()
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := Run(c.Complete()); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
@ -83,84 +81,73 @@ the cloud specific control loops shipped with Kubernetes.`,
|
||||
}
|
||||
|
||||
// resyncPeriod computes the time interval a shared informer waits before resyncing with the api server
|
||||
func resyncPeriod(s *options.CloudControllerManagerServer) func() time.Duration {
|
||||
func resyncPeriod(c *cloudcontrollerconfig.CompletedConfig) func() time.Duration {
|
||||
return func() time.Duration {
|
||||
factor := rand.Float64() + 1
|
||||
return time.Duration(float64(s.MinResyncPeriod.Nanoseconds()) * factor)
|
||||
return time.Duration(float64(c.Generic.ComponentConfig.MinResyncPeriod.Nanoseconds()) * factor)
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs the ExternalCMServer. This should never exit.
|
||||
func Run(s *options.CloudControllerManagerServer) error {
|
||||
if s.CloudProvider == "" {
|
||||
glog.Fatalf("--cloud-provider cannot be empty")
|
||||
}
|
||||
|
||||
cloud, err := cloudprovider.InitCloudProvider(s.CloudProvider, s.CloudConfigFile)
|
||||
func Run(c *cloudcontrollerconfig.CompletedConfig) error {
|
||||
cloud, err := cloudprovider.InitCloudProvider(c.Generic.ComponentConfig.CloudProvider, c.Generic.ComponentConfig.CloudConfigFile)
|
||||
if err != nil {
|
||||
glog.Fatalf("Cloud provider could not be initialized: %v", err)
|
||||
}
|
||||
|
||||
if cloud == nil {
|
||||
glog.Fatalf("cloud provider is nil")
|
||||
}
|
||||
|
||||
if cloud.HasClusterID() == false {
|
||||
if s.AllowUntaggedCloud == true {
|
||||
if c.Generic.ComponentConfig.AllowUntaggedCloud == true {
|
||||
glog.Warning("detected a cluster without a ClusterID. A ClusterID will be required in the future. Please tag your cluster to avoid any future issues")
|
||||
} else {
|
||||
glog.Fatalf("no ClusterID found. A ClusterID is required for the cloud provider to function properly. This check can be bypassed by setting the allow-untagged-cloud option")
|
||||
}
|
||||
}
|
||||
|
||||
if c, err := configz.New("componentconfig"); err == nil {
|
||||
c.Set(s.KubeControllerManagerConfiguration)
|
||||
// setup /configz endpoint
|
||||
if cz, err := configz.New("componentconfig"); err == nil {
|
||||
cz.Set(c.Generic.ComponentConfig)
|
||||
} else {
|
||||
glog.Errorf("unable to register configz: %s", err)
|
||||
}
|
||||
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
glog.Errorf("unable to register configz: %c", err)
|
||||
}
|
||||
|
||||
// Set the ContentType of the requests from kube client
|
||||
kubeconfig.ContentConfig.ContentType = s.ContentType
|
||||
// Override kubeconfig qps/burst settings from flags
|
||||
kubeconfig.QPS = s.KubeAPIQPS
|
||||
kubeconfig.Burst = int(s.KubeAPIBurst)
|
||||
kubeClient, err := kubernetes.NewForConfig(restclient.AddUserAgent(kubeconfig, "cloud-controller-manager"))
|
||||
if err != nil {
|
||||
glog.Fatalf("Invalid API configuration: %v", err)
|
||||
// Start the controller manager HTTP server
|
||||
stopCh := make(chan struct{})
|
||||
if c.Generic.SecureServing != nil {
|
||||
if err := genericcontrollermanager.Serve(&c.Generic, c.Generic.SecureServing.Serve, stopCh); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if c.Generic.InsecureServing != nil {
|
||||
if err := genericcontrollermanager.Serve(&c.Generic, c.Generic.InsecureServing.Serve, stopCh); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
leaderElectionClient := kubernetes.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
||||
|
||||
// Start the external controller manager server
|
||||
go startHTTP(s)
|
||||
|
||||
recorder := createRecorder(kubeClient)
|
||||
|
||||
run := func(stop <-chan struct{}) {
|
||||
rootClientBuilder := controller.SimpleControllerClientBuilder{
|
||||
ClientConfig: kubeconfig,
|
||||
ClientConfig: c.Generic.Kubeconfig,
|
||||
}
|
||||
var clientBuilder controller.ControllerClientBuilder
|
||||
if s.UseServiceAccountCredentials {
|
||||
if c.Generic.ComponentConfig.UseServiceAccountCredentials {
|
||||
clientBuilder = controller.SAControllerClientBuilder{
|
||||
ClientConfig: restclient.AnonymousClientConfig(kubeconfig),
|
||||
CoreClient: kubeClient.CoreV1(),
|
||||
AuthenticationClient: kubeClient.AuthenticationV1(),
|
||||
ClientConfig: restclient.AnonymousClientConfig(c.Generic.Kubeconfig),
|
||||
CoreClient: c.Generic.Client.CoreV1(),
|
||||
AuthenticationClient: c.Generic.Client.AuthenticationV1(),
|
||||
Namespace: "kube-system",
|
||||
}
|
||||
} else {
|
||||
clientBuilder = rootClientBuilder
|
||||
}
|
||||
|
||||
if err := StartControllers(s, kubeconfig, rootClientBuilder, clientBuilder, stop, recorder, cloud); err != nil {
|
||||
if err := startControllers(c, c.Generic.Kubeconfig, rootClientBuilder, clientBuilder, stop, c.Generic.EventRecorder, cloud); err != nil {
|
||||
glog.Fatalf("error running controllers: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if !s.LeaderElection.LeaderElect {
|
||||
if !c.Generic.ComponentConfig.LeaderElection.LeaderElect {
|
||||
run(nil)
|
||||
panic("unreachable")
|
||||
}
|
||||
@ -174,13 +161,13 @@ func Run(s *options.CloudControllerManagerServer) error {
|
||||
id = id + "_" + string(uuid.NewUUID())
|
||||
|
||||
// Lock required for leader election
|
||||
rl, err := resourcelock.New(s.LeaderElection.ResourceLock,
|
||||
rl, err := resourcelock.New(c.Generic.ComponentConfig.LeaderElection.ResourceLock,
|
||||
"kube-system",
|
||||
"cloud-controller-manager",
|
||||
leaderElectionClient.CoreV1(),
|
||||
c.Generic.LeaderElectionClient.CoreV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
EventRecorder: recorder,
|
||||
EventRecorder: c.Generic.EventRecorder,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating lock: %v", err)
|
||||
@ -189,9 +176,9 @@ func Run(s *options.CloudControllerManagerServer) error {
|
||||
// Try and become the leader and start cloud controller manager loops
|
||||
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
|
||||
Lock: rl,
|
||||
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
|
||||
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
|
||||
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,
|
||||
LeaseDuration: c.Generic.ComponentConfig.LeaderElection.LeaseDuration.Duration,
|
||||
RenewDeadline: c.Generic.ComponentConfig.LeaderElection.RenewDeadline.Duration,
|
||||
RetryPeriod: c.Generic.ComponentConfig.LeaderElection.RetryPeriod.Duration,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: run,
|
||||
OnStoppedLeading: func() {
|
||||
@ -202,36 +189,36 @@ func Run(s *options.CloudControllerManagerServer) error {
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// StartControllers starts the cloud specific controller loops.
|
||||
func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restclient.Config, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}, recorder record.EventRecorder, cloud cloudprovider.Interface) error {
|
||||
// startControllers starts the cloud specific controller loops.
|
||||
func startControllers(c *cloudcontrollerconfig.CompletedConfig, kubeconfig *restclient.Config, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}, recorder record.EventRecorder, cloud cloudprovider.Interface) error {
|
||||
// Function to build the kube client object
|
||||
client := func(serviceAccountName string) kubernetes.Interface {
|
||||
return clientBuilder.ClientOrDie(serviceAccountName)
|
||||
}
|
||||
|
||||
if cloud != nil {
|
||||
// Initialize the cloud provider with a reference to the clientBuilder
|
||||
cloud.Initialize(clientBuilder)
|
||||
}
|
||||
|
||||
// TODO: move this setup into Config
|
||||
versionedClient := rootClientBuilder.ClientOrDie("shared-informers")
|
||||
sharedInformers := informers.NewSharedInformerFactory(versionedClient, resyncPeriod(s)())
|
||||
sharedInformers := informers.NewSharedInformerFactory(versionedClient, resyncPeriod(c)())
|
||||
|
||||
// Start the CloudNodeController
|
||||
nodeController := cloudcontrollers.NewCloudNodeController(
|
||||
sharedInformers.Core().V1().Nodes(),
|
||||
client("cloud-node-controller"), cloud,
|
||||
s.NodeMonitorPeriod.Duration,
|
||||
s.NodeStatusUpdateFrequency.Duration)
|
||||
c.Generic.ComponentConfig.NodeMonitorPeriod.Duration,
|
||||
c.Extra.NodeStatusUpdateFrequency)
|
||||
|
||||
nodeController.Run()
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
|
||||
// Start the PersistentVolumeLabelController
|
||||
pvlController := cloudcontrollers.NewPersistentVolumeLabelController(client("pvl-controller"), cloud)
|
||||
threads := 5
|
||||
go pvlController.Run(threads, stop)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
|
||||
// Start the service controller
|
||||
serviceController, err := servicecontroller.New(
|
||||
@ -239,34 +226,34 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc
|
||||
client("service-controller"),
|
||||
sharedInformers.Core().V1().Services(),
|
||||
sharedInformers.Core().V1().Nodes(),
|
||||
s.ClusterName,
|
||||
c.Generic.ComponentConfig.ClusterName,
|
||||
)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to start service controller: %v", err)
|
||||
} else {
|
||||
go serviceController.Run(stop, int(s.ConcurrentServiceSyncs))
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
go serviceController.Run(stop, int(c.Generic.ComponentConfig.ConcurrentServiceSyncs))
|
||||
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
}
|
||||
|
||||
// If CIDRs should be allocated for pods and set on the CloudProvider, then start the route controller
|
||||
if s.AllocateNodeCIDRs && s.ConfigureCloudRoutes {
|
||||
if c.Generic.ComponentConfig.AllocateNodeCIDRs && c.Generic.ComponentConfig.ConfigureCloudRoutes {
|
||||
if routes, ok := cloud.Routes(); !ok {
|
||||
glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
|
||||
} else {
|
||||
var clusterCIDR *net.IPNet
|
||||
if len(strings.TrimSpace(s.ClusterCIDR)) != 0 {
|
||||
_, clusterCIDR, err = net.ParseCIDR(s.ClusterCIDR)
|
||||
if len(strings.TrimSpace(c.Generic.ComponentConfig.ClusterCIDR)) != 0 {
|
||||
_, clusterCIDR, err = net.ParseCIDR(c.Generic.ComponentConfig.ClusterCIDR)
|
||||
if err != nil {
|
||||
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", s.ClusterCIDR, err)
|
||||
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", c.Generic.ComponentConfig.ClusterCIDR, err)
|
||||
}
|
||||
}
|
||||
|
||||
routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), s.ClusterName, clusterCIDR)
|
||||
go routeController.Run(stop, s.RouteReconciliationPeriod.Duration)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
routeController := routecontroller.New(routes, client("route-controller"), sharedInformers.Core().V1().Nodes(), c.Generic.ComponentConfig.ClusterName, clusterCIDR)
|
||||
go routeController.Run(stop, c.Generic.ComponentConfig.RouteReconciliationPeriod.Duration)
|
||||
time.Sleep(wait.Jitter(c.Generic.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
}
|
||||
} else {
|
||||
glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", s.AllocateNodeCIDRs, s.ConfigureCloudRoutes)
|
||||
glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", c.Generic.ComponentConfig.AllocateNodeCIDRs, c.Generic.ComponentConfig.ConfigureCloudRoutes)
|
||||
}
|
||||
|
||||
// If apiserver is not running we should wait for some time and fail only then. This is particularly
|
||||
@ -286,32 +273,3 @@ func StartControllers(s *options.CloudControllerManagerServer, kubeconfig *restc
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
func startHTTP(s *options.CloudControllerManagerServer) {
|
||||
mux := http.NewServeMux()
|
||||
healthz.InstallHandler(mux)
|
||||
if s.EnableProfiling {
|
||||
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
||||
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
||||
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
||||
if s.EnableContentionProfiling {
|
||||
goruntime.SetBlockProfileRate(1)
|
||||
}
|
||||
}
|
||||
configz.InstallHandler(mux)
|
||||
mux.Handle("/metrics", prometheus.Handler())
|
||||
|
||||
server := &http.Server{
|
||||
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
|
||||
Handler: mux,
|
||||
}
|
||||
glog.Fatal(server.ListenAndServe())
|
||||
}
|
||||
|
||||
func createRecorder(kubeClient *kubernetes.Clientset) record.EventRecorder {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "cloud-controller-manager"})
|
||||
}
|
||||
|
@ -11,12 +11,14 @@ go_library(
|
||||
srcs = ["options.go"],
|
||||
importpath = "k8s.io/kubernetes/cmd/cloud-controller-manager/app/options",
|
||||
deps = [
|
||||
"//cmd/cloud-controller-manager/app/config:go_default_library",
|
||||
"//cmd/controller-manager/app/options:go_default_library",
|
||||
"//pkg/client/leaderelectionconfig:go_default_library",
|
||||
"//pkg/features:go_default_library",
|
||||
"//pkg/master/ports:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -45,5 +47,6 @@ go_test(
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -17,10 +17,13 @@ limitations under the License.
|
||||
package options
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
cloudcontrollerconfig "k8s.io/kubernetes/cmd/cloud-controller-manager/app/config"
|
||||
cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelectionconfig"
|
||||
"k8s.io/kubernetes/pkg/master/ports"
|
||||
@ -31,39 +34,81 @@ import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// CloudControllerManagerServer is the main context object for the controller manager.
|
||||
type CloudControllerManagerServer struct {
|
||||
cmoptions.ControllerManagerServer
|
||||
// CloudControllerManagerOptions is the main context object for the controller manager.
|
||||
type CloudControllerManagerOptions struct {
|
||||
Generic cmoptions.GenericControllerManagerOptions
|
||||
|
||||
// NodeStatusUpdateFrequency is the frequency at which the controller updates nodes' status
|
||||
NodeStatusUpdateFrequency metav1.Duration
|
||||
}
|
||||
|
||||
// NewCloudControllerManagerServer creates a new ExternalCMServer with a default config.
|
||||
func NewCloudControllerManagerServer() *CloudControllerManagerServer {
|
||||
s := CloudControllerManagerServer{
|
||||
// NewCloudControllerManagerOptions creates a new ExternalCMServer with a default config.
|
||||
func NewCloudControllerManagerOptions() *CloudControllerManagerOptions {
|
||||
componentConfig := cmoptions.NewDefaultControllerManagerComponentConfig(ports.InsecureCloudControllerManagerPort)
|
||||
|
||||
s := CloudControllerManagerOptions{
|
||||
// The common/default are kept in 'cmd/kube-controller-manager/app/options/util.go'.
|
||||
// Please make common changes there and put anything cloud specific here.
|
||||
ControllerManagerServer: cmoptions.ControllerManagerServer{
|
||||
KubeControllerManagerConfiguration: cmoptions.GetDefaultControllerOptions(ports.CloudControllerManagerPort),
|
||||
},
|
||||
Generic: cmoptions.NewGenericControllerManagerOptions(componentConfig),
|
||||
NodeStatusUpdateFrequency: metav1.Duration{Duration: 5 * time.Minute},
|
||||
}
|
||||
s.LeaderElection.LeaderElect = true
|
||||
s.Generic.ComponentConfig.LeaderElection.LeaderElect = true
|
||||
|
||||
s.Generic.SecureServing.ServerCert.CertDirectory = "/var/run/kubernetes"
|
||||
s.Generic.SecureServing.ServerCert.PairName = "cloud-controller-manager"
|
||||
|
||||
return &s
|
||||
}
|
||||
|
||||
// AddFlags adds flags for a specific ExternalCMServer to the specified FlagSet
|
||||
func (s *CloudControllerManagerServer) AddFlags(fs *pflag.FlagSet) {
|
||||
cmoptions.AddDefaultControllerFlags(&s.ControllerManagerServer, fs)
|
||||
fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider of cloud services. Cannot be empty.")
|
||||
fs.DurationVar(&s.NodeStatusUpdateFrequency.Duration, "node-status-update-frequency", s.NodeStatusUpdateFrequency.Duration, "Specifies how often the controller updates nodes' status.")
|
||||
// TODO: remove --service-account-private-key-file 6 months after 1.8 is released (~1.10)
|
||||
fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.")
|
||||
fs.MarkDeprecated("service-account-private-key-file", "This flag is currently no-op and will be deleted.")
|
||||
fs.Int32Var(&s.ConcurrentServiceSyncs, "concurrent-service-syncs", s.ConcurrentServiceSyncs, "The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load")
|
||||
func (o *CloudControllerManagerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
o.Generic.AddFlags(fs)
|
||||
|
||||
leaderelectionconfig.BindFlags(&s.LeaderElection, fs)
|
||||
fs.StringVar(&o.Generic.ComponentConfig.CloudProvider, "cloud-provider", o.Generic.ComponentConfig.CloudProvider, "The provider of cloud services. Cannot be empty.")
|
||||
fs.DurationVar(&o.NodeStatusUpdateFrequency.Duration, "node-status-update-frequency", o.NodeStatusUpdateFrequency.Duration, "Specifies how often the controller updates nodes' status.")
|
||||
// TODO: remove --service-account-private-key-file 6 months after 1.8 is released (~1.10)
|
||||
fs.StringVar(&o.Generic.ComponentConfig.ServiceAccountKeyFile, "service-account-private-key-file", o.Generic.ComponentConfig.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.")
|
||||
fs.MarkDeprecated("service-account-private-key-file", "This flag is currently no-op and will be deleted.")
|
||||
fs.Int32Var(&o.Generic.ComponentConfig.ConcurrentServiceSyncs, "concurrent-service-syncs", o.Generic.ComponentConfig.ConcurrentServiceSyncs, "The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load")
|
||||
|
||||
leaderelectionconfig.BindFlags(&o.Generic.ComponentConfig.LeaderElection, fs)
|
||||
|
||||
utilfeature.DefaultFeatureGate.AddFlag(fs)
|
||||
}
|
||||
|
||||
// ApplyTo fills up cloud controller manager config with options.
|
||||
func (o *CloudControllerManagerOptions) ApplyTo(c *cloudcontrollerconfig.Config) error {
|
||||
if err := o.Generic.ApplyTo(&c.Generic, "cloud-controller-manager"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.Extra.NodeStatusUpdateFrequency = o.NodeStatusUpdateFrequency.Duration
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate is used to validate config before launching the cloud controller manager
|
||||
func (o *CloudControllerManagerOptions) Validate() error {
|
||||
errors := []error{}
|
||||
errors = append(errors, o.Generic.Validate()...)
|
||||
|
||||
if len(o.Generic.ComponentConfig.CloudProvider) == 0 {
|
||||
errors = append(errors, fmt.Errorf("--cloud-provider cannot be empty"))
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errors)
|
||||
}
|
||||
|
||||
// Config return a cloud controller manager config objective
|
||||
func (o CloudControllerManagerOptions) Config() (*cloudcontrollerconfig.Config, error) {
|
||||
if err := o.Validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &cloudcontrollerconfig.Config{}
|
||||
if err := o.ApplyTo(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package options
|
||||
|
||||
import (
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
@ -25,13 +26,14 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
)
|
||||
|
||||
func TestAddFlags(t *testing.T) {
|
||||
f := pflag.NewFlagSet("addflagstest", pflag.ContinueOnError)
|
||||
s := NewCloudControllerManagerServer()
|
||||
s := NewCloudControllerManagerOptions()
|
||||
s.AddFlags(f)
|
||||
|
||||
args := []string{
|
||||
@ -62,16 +64,19 @@ func TestAddFlags(t *testing.T) {
|
||||
"--route-reconciliation-period=30s",
|
||||
"--min-resync-period=100m",
|
||||
"--use-service-account-credentials=false",
|
||||
"--cert-dir=/a/b/c",
|
||||
"--bind-address=192.168.4.21",
|
||||
"--secure-port=10001",
|
||||
}
|
||||
f.Parse(args)
|
||||
|
||||
expected := &CloudControllerManagerServer{
|
||||
ControllerManagerServer: cmoptions.ControllerManagerServer{
|
||||
KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{
|
||||
expected := &CloudControllerManagerOptions{
|
||||
Generic: cmoptions.GenericControllerManagerOptions{
|
||||
ComponentConfig: componentconfig.KubeControllerManagerConfiguration{
|
||||
CloudProvider: "gce",
|
||||
CloudConfigFile: "/cloud-config",
|
||||
Port: 10000,
|
||||
Address: "192.168.4.10",
|
||||
Port: 10253, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config
|
||||
Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config
|
||||
ConcurrentEndpointSyncs: 5,
|
||||
ConcurrentRSSyncs: 5,
|
||||
ConcurrentResourceQuotaSyncs: 5,
|
||||
@ -138,6 +143,19 @@ func TestAddFlags(t *testing.T) {
|
||||
CIDRAllocatorType: "RangeAllocator",
|
||||
Controllers: []string{"*"},
|
||||
},
|
||||
SecureServing: &apiserveroptions.SecureServingOptions{
|
||||
BindPort: 10001,
|
||||
BindAddress: net.ParseIP("192.168.4.21"),
|
||||
ServerCert: apiserveroptions.GeneratableKeyCert{
|
||||
CertDirectory: "/a/b/c",
|
||||
PairName: "cloud-controller-manager",
|
||||
},
|
||||
},
|
||||
InsecureServing: &cmoptions.InsecureServingOptions{
|
||||
BindAddress: net.ParseIP("192.168.4.10"),
|
||||
BindPort: int(10000),
|
||||
BindNetwork: "tcp",
|
||||
},
|
||||
Kubeconfig: "/kubeconfig",
|
||||
Master: "192.168.4.20",
|
||||
},
|
||||
|
44
cmd/controller-manager/app/BUILD
Normal file
44
cmd/controller-manager/app/BUILD
Normal file
@ -0,0 +1,44 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"config.go",
|
||||
"insecure_serving.go",
|
||||
"serve.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/cmd/controller-manager/app",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/util/configz:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/endpoints/filters:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/filters:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//cmd/controller-manager/app/options:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
65
cmd/controller-manager/app/config.go
Normal file
65
cmd/controller-manager/app/config.go
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
apiserver "k8s.io/apiserver/pkg/server"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
)
|
||||
|
||||
// Config is the main context object for the controller manager.
|
||||
type Config struct {
|
||||
// TODO: split up the component config. This is not generic.
|
||||
ComponentConfig componentconfig.KubeControllerManagerConfiguration
|
||||
|
||||
SecureServing *apiserver.SecureServingInfo
|
||||
// TODO: remove deprecated insecure serving
|
||||
InsecureServing *InsecureServingInfo
|
||||
Authentication apiserver.AuthenticationInfo
|
||||
Authorization apiserver.AuthorizationInfo
|
||||
|
||||
// the general kube client
|
||||
Client *clientset.Clientset
|
||||
|
||||
// the client only used for leader election
|
||||
LeaderElectionClient *clientset.Clientset
|
||||
|
||||
// the rest config for the master
|
||||
Kubeconfig *restclient.Config
|
||||
|
||||
// the event sink
|
||||
EventRecorder record.EventRecorder
|
||||
}
|
||||
|
||||
type completedConfig struct {
|
||||
*Config
|
||||
}
|
||||
|
||||
// CompletedConfig same as Config, just to swap private object.
|
||||
type CompletedConfig struct {
|
||||
// Embed a private pointer that cannot be instantiated outside of this package.
|
||||
*completedConfig
|
||||
}
|
||||
|
||||
// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver.
|
||||
func (c *Config) Complete() CompletedConfig {
|
||||
cc := completedConfig{c}
|
||||
return CompletedConfig{&cc}
|
||||
}
|
46
cmd/controller-manager/app/insecure_serving.go
Normal file
46
cmd/controller-manager/app/insecure_serving.go
Normal file
@ -0,0 +1,46 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apiserver/pkg/server"
|
||||
)
|
||||
|
||||
// InsecureServingInfo is the main context object for the insecure http server.
|
||||
type InsecureServingInfo struct {
|
||||
// Listener is the secure server network listener.
|
||||
Listener net.Listener
|
||||
}
|
||||
|
||||
// Serve starts an insecure http server with the given handler. It fails only if
|
||||
// the initial listen call fails. It does not block.
|
||||
func (s *InsecureServingInfo) Serve(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) error {
|
||||
insecureServer := &http.Server{
|
||||
Addr: s.Listener.Addr().String(),
|
||||
Handler: handler,
|
||||
MaxHeaderBytes: 1 << 20,
|
||||
}
|
||||
|
||||
glog.Infof("Serving insecurely on %s", s.Listener.Addr())
|
||||
return server.RunServer(insecureServer, s.Listener, shutdownTimeout, stopCh)
|
||||
}
|
@ -2,15 +2,28 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["utils.go"],
|
||||
srcs = [
|
||||
"insecure_serving.go",
|
||||
"options.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/cmd/controller-manager/app/options",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//cmd/controller-manager/app:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/leaderelectionconfig:go_default_library",
|
||||
"//vendor/github.com/cloudflare/cfssl/helpers:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
112
cmd/controller-manager/app/options/insecure_serving.go
Normal file
112
cmd/controller-manager/app/options/insecure_serving.go
Normal file
@ -0,0 +1,112 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/apiserver/pkg/server/options"
|
||||
genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
)
|
||||
|
||||
// InsecureServingOptions are for creating an unauthenticated, unauthorized, insecure port.
|
||||
// No one should be using these anymore.
|
||||
type InsecureServingOptions struct {
|
||||
BindAddress net.IP
|
||||
BindPort int
|
||||
// BindNetwork is the type of network to bind to - defaults to "tcp", accepts "tcp",
|
||||
// "tcp4", and "tcp6".
|
||||
BindNetwork string
|
||||
|
||||
// Listener is the secure server network listener.
|
||||
// either Listener or BindAddress/BindPort/BindNetwork is set,
|
||||
// if Listener is set, use it and omit BindAddress/BindPort/BindNetwork.
|
||||
Listener net.Listener
|
||||
}
|
||||
|
||||
// Validate ensures that the insecure port values within the range of the port.
|
||||
func (s *InsecureServingOptions) Validate() []error {
|
||||
errors := []error{}
|
||||
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.BindPort < 0 || s.BindPort > 32767 {
|
||||
errors = append(errors, fmt.Errorf("--insecure-port %v must be between 0 and 32767, inclusive. 0 for turning off insecure (HTTP) port", s.BindPort))
|
||||
}
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
// AddFlags adds flags related to insecure serving for controller manager to the specified FlagSet.
|
||||
func (s *InsecureServingOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// AddDeprecatedFlags adds deprecated flags related to insecure serving for controller manager to the specified FlagSet.
|
||||
// TODO: remove it until kops stop using `--address`
|
||||
func (s *InsecureServingOptions) AddDeprecatedFlags(fs *pflag.FlagSet) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
fs.IPVar(&s.BindAddress, "address", s.BindAddress,
|
||||
"DEPRECATED: the IP address on which to listen for the --port port. See --bind-address instead.")
|
||||
// MarkDeprecated hides the flag from the help. We don't want that:
|
||||
// fs.MarkDeprecated("address", "see --bind-address instead.")
|
||||
|
||||
fs.IntVar(&s.BindPort, "port", s.BindPort, "DEPRECATED: the port on which to serve HTTP insecurely without authentication and authorization. If 0, don't serve HTTPS at all. See --secure-port instead.")
|
||||
// MarkDeprecated hides the flag from the help. We don't want that:
|
||||
// fs.MarkDeprecated("port", "see --secure-port instead.")
|
||||
}
|
||||
|
||||
// ApplyTo adds InsecureServingOptions to the insecureserverinfo amd kube-controller manager configuration.
|
||||
// Note: the double pointer allows to set the *InsecureServingInfo to nil without referencing the struct hosting this pointer.
|
||||
func (s *InsecureServingOptions) ApplyTo(c **genericcontrollermanager.InsecureServingInfo, cfg *componentconfig.KubeControllerManagerConfiguration) error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
if s.BindPort <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if s.Listener == nil {
|
||||
var err error
|
||||
addr := net.JoinHostPort(s.BindAddress.String(), fmt.Sprintf("%d", s.BindPort))
|
||||
s.Listener, s.BindPort, err = options.CreateListener(s.BindNetwork, addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create listener: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
*c = &genericcontrollermanager.InsecureServingInfo{
|
||||
Listener: s.Listener,
|
||||
}
|
||||
|
||||
// sync back to component config
|
||||
// TODO: find more elegant way than synching back the values.
|
||||
cfg.Port = int32(s.BindPort)
|
||||
cfg.Address = s.BindAddress.String()
|
||||
|
||||
return nil
|
||||
}
|
242
cmd/controller-manager/app/options/options.go
Normal file
242
cmd/controller-manager/app/options/options.go
Normal file
@ -0,0 +1,242 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/record"
|
||||
genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelectionconfig"
|
||||
)
|
||||
|
||||
// GenericControllerManagerOptions is the common structure for a controller manager. It works with NewGenericControllerManagerOptions
|
||||
// and AddDefaultControllerFlags to create the common components of kube-controller-manager and cloud-controller-manager.
|
||||
type GenericControllerManagerOptions struct {
|
||||
// TODO: turn ComponentConfig into modular option structs. This is not generic.
|
||||
ComponentConfig componentconfig.KubeControllerManagerConfiguration
|
||||
|
||||
SecureServing *apiserveroptions.SecureServingOptions
|
||||
// TODO: remove insecure serving mode
|
||||
InsecureServing *InsecureServingOptions
|
||||
Authentication *apiserveroptions.DelegatingAuthenticationOptions
|
||||
Authorization *apiserveroptions.DelegatingAuthorizationOptions
|
||||
|
||||
Master string
|
||||
Kubeconfig string
|
||||
}
|
||||
|
||||
const (
|
||||
// These defaults are deprecated and exported so that we can warn if
|
||||
// they are being used.
|
||||
|
||||
// DefaultClusterSigningCertFile is deprecated. Do not use.
|
||||
DefaultClusterSigningCertFile = "/etc/kubernetes/ca/ca.pem"
|
||||
// DefaultClusterSigningKeyFile is deprecated. Do not use.
|
||||
DefaultClusterSigningKeyFile = "/etc/kubernetes/ca/ca.key"
|
||||
)
|
||||
|
||||
// NewGenericControllerManagerOptions returns common/default configuration values for both
|
||||
// the kube-controller-manager and the cloud-contoller-manager. Any common changes should
|
||||
// be made here. Any individual changes should be made in that controller.
|
||||
func NewGenericControllerManagerOptions(componentConfig componentconfig.KubeControllerManagerConfiguration) GenericControllerManagerOptions {
|
||||
o := GenericControllerManagerOptions{
|
||||
ComponentConfig: componentConfig,
|
||||
SecureServing: apiserveroptions.NewSecureServingOptions(),
|
||||
InsecureServing: &InsecureServingOptions{
|
||||
BindAddress: net.ParseIP(componentConfig.Address),
|
||||
BindPort: int(componentConfig.Port),
|
||||
BindNetwork: "tcp",
|
||||
},
|
||||
Authentication: nil, // TODO: enable with apiserveroptions.NewDelegatingAuthenticationOptions()
|
||||
Authorization: nil, // TODO: enable with apiserveroptions.NewDelegatingAuthorizationOptions()
|
||||
}
|
||||
|
||||
// disable secure serving for now
|
||||
// TODO: enable HTTPS by default
|
||||
o.SecureServing.BindPort = 0
|
||||
|
||||
return o
|
||||
}
|
||||
|
||||
// NewDefaultControllerManagerComponentConfig returns default kube-controller manager configuration object.
|
||||
func NewDefaultControllerManagerComponentConfig(insecurePort int32) componentconfig.KubeControllerManagerConfiguration {
|
||||
return componentconfig.KubeControllerManagerConfiguration{
|
||||
Controllers: []string{"*"},
|
||||
Port: insecurePort,
|
||||
Address: "0.0.0.0",
|
||||
ConcurrentEndpointSyncs: 5,
|
||||
ConcurrentServiceSyncs: 1,
|
||||
ConcurrentRCSyncs: 5,
|
||||
ConcurrentRSSyncs: 5,
|
||||
ConcurrentDaemonSetSyncs: 2,
|
||||
ConcurrentJobSyncs: 5,
|
||||
ConcurrentResourceQuotaSyncs: 5,
|
||||
ConcurrentDeploymentSyncs: 5,
|
||||
ConcurrentNamespaceSyncs: 10,
|
||||
ConcurrentSATokenSyncs: 5,
|
||||
RouteReconciliationPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
ResourceQuotaSyncPeriod: metav1.Duration{Duration: 5 * time.Minute},
|
||||
NamespaceSyncPeriod: metav1.Duration{Duration: 5 * time.Minute},
|
||||
PVClaimBinderSyncPeriod: metav1.Duration{Duration: 15 * time.Second},
|
||||
HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 30 * time.Second},
|
||||
HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 3 * time.Minute},
|
||||
HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 5 * time.Minute},
|
||||
HorizontalPodAutoscalerTolerance: 0.1,
|
||||
DeploymentControllerSyncPeriod: metav1.Duration{Duration: 30 * time.Second},
|
||||
MinResyncPeriod: metav1.Duration{Duration: 12 * time.Hour},
|
||||
RegisterRetryCount: 10,
|
||||
PodEvictionTimeout: metav1.Duration{Duration: 5 * time.Minute},
|
||||
NodeMonitorGracePeriod: metav1.Duration{Duration: 40 * time.Second},
|
||||
NodeStartupGracePeriod: metav1.Duration{Duration: 60 * time.Second},
|
||||
NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
ClusterName: "kubernetes",
|
||||
NodeCIDRMaskSize: 24,
|
||||
ConfigureCloudRoutes: true,
|
||||
TerminatedPodGCThreshold: 12500,
|
||||
VolumeConfiguration: componentconfig.VolumeConfiguration{
|
||||
EnableHostPathProvisioning: false,
|
||||
EnableDynamicProvisioning: true,
|
||||
PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{
|
||||
MaximumRetry: 3,
|
||||
MinimumTimeoutNFS: 300,
|
||||
IncrementTimeoutNFS: 30,
|
||||
MinimumTimeoutHostPath: 60,
|
||||
IncrementTimeoutHostPath: 30,
|
||||
},
|
||||
FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
|
||||
},
|
||||
ContentType: "application/vnd.kubernetes.protobuf",
|
||||
KubeAPIQPS: 20.0,
|
||||
KubeAPIBurst: 30,
|
||||
LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(),
|
||||
ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second},
|
||||
EnableGarbageCollector: true,
|
||||
ConcurrentGCSyncs: 20,
|
||||
ClusterSigningCertFile: DefaultClusterSigningCertFile,
|
||||
ClusterSigningKeyFile: DefaultClusterSigningKeyFile,
|
||||
ClusterSigningDuration: metav1.Duration{Duration: helpers.OneYear},
|
||||
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 60 * time.Second},
|
||||
EnableTaintManager: true,
|
||||
HorizontalPodAutoscalerUseRESTClients: true,
|
||||
}
|
||||
}
|
||||
|
||||
// AddFlags adds common/default flags for both the kube and cloud Controller Manager Server to the
|
||||
// specified FlagSet. Any common changes should be made here. Any individual changes should be made in that controller.
|
||||
func (o *GenericControllerManagerOptions) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.BoolVar(&o.ComponentConfig.UseServiceAccountCredentials, "use-service-account-credentials", o.ComponentConfig.UseServiceAccountCredentials, "If true, use individual service account credentials for each controller.")
|
||||
fs.StringVar(&o.ComponentConfig.CloudConfigFile, "cloud-config", o.ComponentConfig.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
|
||||
fs.BoolVar(&o.ComponentConfig.AllowUntaggedCloud, "allow-untagged-cloud", false, "Allow the cluster to run without the cluster-id on cloud instances. This is a legacy mode of operation and a cluster-id will be required in the future.")
|
||||
fs.MarkDeprecated("allow-untagged-cloud", "This flag is deprecated and will be removed in a future release. A cluster-id will be required on cloud instances.")
|
||||
fs.DurationVar(&o.ComponentConfig.RouteReconciliationPeriod.Duration, "route-reconciliation-period", o.ComponentConfig.RouteReconciliationPeriod.Duration, "The period for reconciling routes created for Nodes by cloud provider.")
|
||||
fs.DurationVar(&o.ComponentConfig.MinResyncPeriod.Duration, "min-resync-period", o.ComponentConfig.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.")
|
||||
fs.DurationVar(&o.ComponentConfig.NodeMonitorPeriod.Duration, "node-monitor-period", o.ComponentConfig.NodeMonitorPeriod.Duration,
|
||||
"The period for syncing NodeStatus in NodeController.")
|
||||
fs.BoolVar(&o.ComponentConfig.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
|
||||
fs.BoolVar(&o.ComponentConfig.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled.")
|
||||
fs.StringVar(&o.ComponentConfig.ClusterName, "cluster-name", o.ComponentConfig.ClusterName, "The instance prefix for the cluster.")
|
||||
fs.StringVar(&o.ComponentConfig.ClusterCIDR, "cluster-cidr", o.ComponentConfig.ClusterCIDR, "CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true")
|
||||
fs.BoolVar(&o.ComponentConfig.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
|
||||
fs.StringVar(&o.ComponentConfig.CIDRAllocatorType, "cidr-allocator-type", "RangeAllocator", "Type of CIDR allocator to use")
|
||||
fs.BoolVar(&o.ComponentConfig.ConfigureCloudRoutes, "configure-cloud-routes", true, "Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider.")
|
||||
fs.StringVar(&o.Master, "master", o.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig).")
|
||||
fs.StringVar(&o.Kubeconfig, "kubeconfig", o.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")
|
||||
fs.StringVar(&o.ComponentConfig.ContentType, "kube-api-content-type", o.ComponentConfig.ContentType, "Content type of requests sent to apiserver.")
|
||||
fs.Float32Var(&o.ComponentConfig.KubeAPIQPS, "kube-api-qps", o.ComponentConfig.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver.")
|
||||
fs.Int32Var(&o.ComponentConfig.KubeAPIBurst, "kube-api-burst", o.ComponentConfig.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver.")
|
||||
fs.DurationVar(&o.ComponentConfig.ControllerStartInterval.Duration, "controller-start-interval", o.ComponentConfig.ControllerStartInterval.Duration, "Interval between starting controller managers.")
|
||||
|
||||
o.SecureServing.AddFlags(fs)
|
||||
o.InsecureServing.AddFlags(fs)
|
||||
o.InsecureServing.AddDeprecatedFlags(fs)
|
||||
o.Authentication.AddFlags(fs)
|
||||
o.Authorization.AddFlags(fs)
|
||||
}
|
||||
|
||||
// ApplyTo fills up controller manager config with options and userAgent
|
||||
func (o *GenericControllerManagerOptions) ApplyTo(c *genericcontrollermanager.Config, userAgent string) error {
|
||||
c.ComponentConfig = o.ComponentConfig
|
||||
|
||||
if err := o.SecureServing.ApplyTo(&c.SecureServing); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := o.InsecureServing.ApplyTo(&c.InsecureServing, &c.ComponentConfig); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := o.Authentication.ApplyTo(&c.Authentication, c.SecureServing, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := o.Authorization.ApplyTo(&c.Authorization); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
c.Kubeconfig, err = clientcmd.BuildConfigFromFlags(o.Master, o.Kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Kubeconfig.ContentConfig.ContentType = o.ComponentConfig.ContentType
|
||||
c.Kubeconfig.QPS = o.ComponentConfig.KubeAPIQPS
|
||||
c.Kubeconfig.Burst = int(o.ComponentConfig.KubeAPIBurst)
|
||||
|
||||
c.Client, err = clientset.NewForConfig(restclient.AddUserAgent(c.Kubeconfig, userAgent))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.LeaderElectionClient = clientset.NewForConfigOrDie(restclient.AddUserAgent(c.Kubeconfig, "leader-election"))
|
||||
|
||||
c.EventRecorder = createRecorder(c.Client, userAgent)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate checks GenericControllerManagerOptions and return a slice of found errors.
|
||||
func (o *GenericControllerManagerOptions) Validate() []error {
|
||||
errors := []error{}
|
||||
errors = append(errors, o.SecureServing.Validate()...)
|
||||
errors = append(errors, o.InsecureServing.Validate()...)
|
||||
errors = append(errors, o.Authentication.Validate()...)
|
||||
errors = append(errors, o.Authorization.Validate()...)
|
||||
|
||||
// TODO: validate component config, master and kubeconfig
|
||||
|
||||
return errors
|
||||
}
|
||||
|
||||
func createRecorder(kubeClient *kubernetes.Clientset, userAgent string) record.EventRecorder {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: userAgent})
|
||||
}
|
@ -1,139 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package options
|
||||
|
||||
import (
|
||||
"github.com/cloudflare/cfssl/helpers"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelectionconfig"
|
||||
)
|
||||
|
||||
// ControllerManagerServer is the common structure for a controller manager. It works with GetDefaultControllerOptions
|
||||
// and AddDefaultControllerFlags to create the common components of kube-controller-manager and cloud-controller-manager.
|
||||
type ControllerManagerServer struct {
|
||||
componentconfig.KubeControllerManagerConfiguration
|
||||
|
||||
Master string
|
||||
Kubeconfig string
|
||||
}
|
||||
|
||||
const (
|
||||
// These defaults are deprecated and exported so that we can warn if
|
||||
// they are being used.
|
||||
|
||||
// DefaultClusterSigningCertFile is deprecated. Do not use.
|
||||
DefaultClusterSigningCertFile = "/etc/kubernetes/ca/ca.pem"
|
||||
// DefaultClusterSigningKeyFile is deprecated. Do not use.
|
||||
DefaultClusterSigningKeyFile = "/etc/kubernetes/ca/ca.key"
|
||||
)
|
||||
|
||||
// GetDefaultControllerOptions returns common/default configuration values for both
|
||||
// the kube-controller-manager and the cloud-contoller-manager. Any common changes should
|
||||
// be made here. Any individual changes should be made in that controller.
|
||||
func GetDefaultControllerOptions(port int32) componentconfig.KubeControllerManagerConfiguration {
|
||||
return componentconfig.KubeControllerManagerConfiguration{
|
||||
Controllers: []string{"*"},
|
||||
Port: port,
|
||||
Address: "0.0.0.0",
|
||||
ConcurrentEndpointSyncs: 5,
|
||||
ConcurrentServiceSyncs: 1,
|
||||
ConcurrentRCSyncs: 5,
|
||||
ConcurrentRSSyncs: 5,
|
||||
ConcurrentDaemonSetSyncs: 2,
|
||||
ConcurrentJobSyncs: 5,
|
||||
ConcurrentResourceQuotaSyncs: 5,
|
||||
ConcurrentDeploymentSyncs: 5,
|
||||
ConcurrentNamespaceSyncs: 10,
|
||||
ConcurrentSATokenSyncs: 5,
|
||||
RouteReconciliationPeriod: metav1.Duration{Duration: 10 * time.Second},
|
||||
ResourceQuotaSyncPeriod: metav1.Duration{Duration: 5 * time.Minute},
|
||||
NamespaceSyncPeriod: metav1.Duration{Duration: 5 * time.Minute},
|
||||
PVClaimBinderSyncPeriod: metav1.Duration{Duration: 15 * time.Second},
|
||||
HorizontalPodAutoscalerSyncPeriod: metav1.Duration{Duration: 30 * time.Second},
|
||||
HorizontalPodAutoscalerUpscaleForbiddenWindow: metav1.Duration{Duration: 3 * time.Minute},
|
||||
HorizontalPodAutoscalerDownscaleForbiddenWindow: metav1.Duration{Duration: 5 * time.Minute},
|
||||
HorizontalPodAutoscalerTolerance: 0.1,
|
||||
DeploymentControllerSyncPeriod: metav1.Duration{Duration: 30 * time.Second},
|
||||
MinResyncPeriod: metav1.Duration{Duration: 12 * time.Hour},
|
||||
RegisterRetryCount: 10,
|
||||
PodEvictionTimeout: metav1.Duration{Duration: 5 * time.Minute},
|
||||
NodeMonitorGracePeriod: metav1.Duration{Duration: 40 * time.Second},
|
||||
NodeStartupGracePeriod: metav1.Duration{Duration: 60 * time.Second},
|
||||
NodeMonitorPeriod: metav1.Duration{Duration: 5 * time.Second},
|
||||
ClusterName: "kubernetes",
|
||||
NodeCIDRMaskSize: 24,
|
||||
ConfigureCloudRoutes: true,
|
||||
TerminatedPodGCThreshold: 12500,
|
||||
VolumeConfiguration: componentconfig.VolumeConfiguration{
|
||||
EnableHostPathProvisioning: false,
|
||||
EnableDynamicProvisioning: true,
|
||||
PersistentVolumeRecyclerConfiguration: componentconfig.PersistentVolumeRecyclerConfiguration{
|
||||
MaximumRetry: 3,
|
||||
MinimumTimeoutNFS: 300,
|
||||
IncrementTimeoutNFS: 30,
|
||||
MinimumTimeoutHostPath: 60,
|
||||
IncrementTimeoutHostPath: 30,
|
||||
},
|
||||
FlexVolumePluginDir: "/usr/libexec/kubernetes/kubelet-plugins/volume/exec/",
|
||||
},
|
||||
ContentType: "application/vnd.kubernetes.protobuf",
|
||||
KubeAPIQPS: 20.0,
|
||||
KubeAPIBurst: 30,
|
||||
LeaderElection: leaderelectionconfig.DefaultLeaderElectionConfiguration(),
|
||||
ControllerStartInterval: metav1.Duration{Duration: 0 * time.Second},
|
||||
EnableGarbageCollector: true,
|
||||
ConcurrentGCSyncs: 20,
|
||||
ClusterSigningCertFile: DefaultClusterSigningCertFile,
|
||||
ClusterSigningKeyFile: DefaultClusterSigningKeyFile,
|
||||
ClusterSigningDuration: metav1.Duration{Duration: helpers.OneYear},
|
||||
ReconcilerSyncLoopPeriod: metav1.Duration{Duration: 60 * time.Second},
|
||||
EnableTaintManager: true,
|
||||
HorizontalPodAutoscalerUseRESTClients: true,
|
||||
}
|
||||
}
|
||||
|
||||
// AddDefaultControllerFlags adds common/default flags for both the kube and cloud Controller Manager Server to the
|
||||
// specified FlagSet. Any common changes should be made here. Any individual changes should be made in that controller.
|
||||
func AddDefaultControllerFlags(s *ControllerManagerServer, fs *pflag.FlagSet) {
|
||||
fs.Int32Var(&s.Port, "port", s.Port, "The port that the controller-manager's http service runs on.")
|
||||
fs.Var(componentconfig.IPVar{Val: &s.Address}, "address", "The IP address to serve on (set to 0.0.0.0 for all interfaces).")
|
||||
fs.BoolVar(&s.UseServiceAccountCredentials, "use-service-account-credentials", s.UseServiceAccountCredentials, "If true, use individual service account credentials for each controller.")
|
||||
fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile, "The path to the cloud provider configuration file. Empty string for no configuration file.")
|
||||
fs.BoolVar(&s.AllowUntaggedCloud, "allow-untagged-cloud", false, "Allow the cluster to run without the cluster-id on cloud instances. This is a legacy mode of operation and a cluster-id will be required in the future.")
|
||||
fs.MarkDeprecated("allow-untagged-cloud", "This flag is deprecated and will be removed in a future release. A cluster-id will be required on cloud instances.")
|
||||
fs.DurationVar(&s.RouteReconciliationPeriod.Duration, "route-reconciliation-period", s.RouteReconciliationPeriod.Duration, "The period for reconciling routes created for Nodes by cloud provider.")
|
||||
fs.DurationVar(&s.MinResyncPeriod.Duration, "min-resync-period", s.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.")
|
||||
fs.DurationVar(&s.NodeMonitorPeriod.Duration, "node-monitor-period", s.NodeMonitorPeriod.Duration,
|
||||
"The period for syncing NodeStatus in NodeController.")
|
||||
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
|
||||
fs.BoolVar(&s.EnableContentionProfiling, "contention-profiling", false, "Enable lock contention profiling, if profiling is enabled.")
|
||||
fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster.")
|
||||
fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true")
|
||||
fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
|
||||
fs.StringVar(&s.CIDRAllocatorType, "cidr-allocator-type", "RangeAllocator", "Type of CIDR allocator to use")
|
||||
fs.BoolVar(&s.ConfigureCloudRoutes, "configure-cloud-routes", true, "Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider.")
|
||||
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig).")
|
||||
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")
|
||||
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "Content type of requests sent to apiserver.")
|
||||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver.")
|
||||
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver.")
|
||||
fs.DurationVar(&s.ControllerStartInterval.Duration, "controller-start-interval", s.ControllerStartInterval.Duration, "Interval between starting controller managers.")
|
||||
}
|
65
cmd/controller-manager/app/serve.go
Normal file
65
cmd/controller-manager/app/serve.go
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package app
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
goruntime "runtime"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
genericapifilters "k8s.io/apiserver/pkg/endpoints/filters"
|
||||
apirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
genericfilters "k8s.io/apiserver/pkg/server/filters"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/util/configz"
|
||||
)
|
||||
|
||||
type serveFunc func(handler http.Handler, shutdownTimeout time.Duration, stopCh <-chan struct{}) error
|
||||
|
||||
// Serve creates a base handler chain for a controller manager. It runs the
|
||||
// the chain with the given serveFunc.
|
||||
func Serve(c *CompletedConfig, serveFunc serveFunc, stopCh <-chan struct{}) error {
|
||||
mux := http.NewServeMux()
|
||||
healthz.InstallHandler(mux)
|
||||
if c.ComponentConfig.EnableProfiling {
|
||||
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
||||
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
||||
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
||||
if c.ComponentConfig.EnableContentionProfiling {
|
||||
goruntime.SetBlockProfileRate(1)
|
||||
}
|
||||
}
|
||||
configz.InstallHandler(mux)
|
||||
mux.Handle("/metrics", prometheus.Handler())
|
||||
|
||||
requestContextMapper := apirequest.NewRequestContextMapper()
|
||||
requestInfoResolver := &apirequest.RequestInfoFactory{}
|
||||
failedHandler := genericapifilters.Unauthorized(requestContextMapper, legacyscheme.Codecs, false)
|
||||
|
||||
handler := genericapifilters.WithAuthorization(mux, requestContextMapper, c.Authorization.Authorizer, legacyscheme.Codecs)
|
||||
handler = genericapifilters.WithAuthentication(handler, requestContextMapper, c.Authentication.Authenticator, failedHandler)
|
||||
handler = genericapifilters.WithRequestInfo(handler, requestInfoResolver, requestContextMapper)
|
||||
handler = apirequest.WithRequestContext(handler, requestContextMapper)
|
||||
handler = genericfilters.WithPanicRecovery(handler)
|
||||
|
||||
return serveFunc(handler, 0, stopCh)
|
||||
}
|
@ -42,7 +42,7 @@ import (
|
||||
type ServerRunOptions struct {
|
||||
GenericServerRunOptions *genericoptions.ServerRunOptions
|
||||
Etcd *genericoptions.EtcdOptions
|
||||
SecureServing *genericoptions.SecureServingOptions
|
||||
SecureServing *genericoptions.SecureServingOptionsWithLoopback
|
||||
InsecureServing *kubeoptions.InsecureServingOptions
|
||||
Audit *genericoptions.AuditOptions
|
||||
Features *genericoptions.FeatureOptions
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
genericoptions "k8s.io/apiserver/pkg/server/options"
|
||||
"k8s.io/apiserver/pkg/storage/storagebackend"
|
||||
utilflag "k8s.io/apiserver/pkg/util/flag"
|
||||
auditwebhook "k8s.io/apiserver/plugin/pkg/audit/webhook"
|
||||
@ -137,14 +138,14 @@ func TestAddFlags(t *testing.T) {
|
||||
EnableWatchCache: true,
|
||||
DefaultWatchCacheSize: 100,
|
||||
},
|
||||
SecureServing: &apiserveroptions.SecureServingOptions{
|
||||
SecureServing: genericoptions.WithLoopback(&apiserveroptions.SecureServingOptions{
|
||||
BindAddress: net.ParseIP("192.168.10.20"),
|
||||
BindPort: 6443,
|
||||
ServerCert: apiserveroptions.GeneratableKeyCert{
|
||||
CertDirectory: "/var/run/kubernetes",
|
||||
PairName: "apiserver",
|
||||
},
|
||||
},
|
||||
}),
|
||||
InsecureServing: &kubeoptions.InsecureServingOptions{
|
||||
BindAddress: net.ParseIP("127.0.0.1"),
|
||||
BindPort: 8080,
|
||||
|
@ -336,7 +336,6 @@ func CreateKubeAPIServerConfig(s *options.ServerRunOptions, nodeTunneler tunnele
|
||||
EnableCoreControllers: true,
|
||||
EventTTL: s.EventTTL,
|
||||
KubeletClientConfig: s.KubeletConfig,
|
||||
EnableUISupport: true,
|
||||
EnableLogsSupport: s.EnableLogsHandler,
|
||||
ProxyTransport: proxyTransport,
|
||||
|
||||
@ -450,12 +449,12 @@ func BuildGenericConfig(s *options.ServerRunOptions, proxyTransport *http.Transp
|
||||
)
|
||||
}
|
||||
|
||||
genericConfig.Authenticator, genericConfig.OpenAPIConfig.SecurityDefinitions, err = BuildAuthenticator(s, storageFactory, client, sharedInformers)
|
||||
genericConfig.Authentication.Authenticator, genericConfig.OpenAPIConfig.SecurityDefinitions, err = BuildAuthenticator(s, storageFactory, client, sharedInformers)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, fmt.Errorf("invalid authentication config: %v", err)
|
||||
}
|
||||
|
||||
genericConfig.Authorizer, genericConfig.RuleResolver, err = BuildAuthorizer(s, sharedInformers, versionedInformers)
|
||||
genericConfig.Authorization.Authorizer, genericConfig.RuleResolver, err = BuildAuthorizer(s, sharedInformers, versionedInformers)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, nil, fmt.Errorf("invalid authorization config: %v", err)
|
||||
}
|
||||
@ -634,7 +633,7 @@ func BuildStorageFactory(s *options.ServerRunOptions, apiResourceConfig *servers
|
||||
|
||||
func defaultOptions(s *options.ServerRunOptions) error {
|
||||
// set defaults
|
||||
if err := s.GenericServerRunOptions.DefaultAdvertiseAddress(s.SecureServing); err != nil {
|
||||
if err := s.GenericServerRunOptions.DefaultAdvertiseAddress(s.SecureServing.SecureServingOptions); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kubeoptions.DefaultAdvertiseAddress(s.GenericServerRunOptions, s.InsecureServing); err != nil {
|
||||
|
@ -25,7 +25,9 @@ go_library(
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app",
|
||||
deps = [
|
||||
"//cmd/controller-manager/app:go_default_library",
|
||||
"//cmd/controller-manager/app/options:go_default_library",
|
||||
"//cmd/kube-controller-manager/app/config:go_default_library",
|
||||
"//cmd/kube-controller-manager/app/options:go_default_library",
|
||||
"//pkg/api/legacyscheme:go_default_library",
|
||||
"//pkg/apis/apps/install:go_default_library",
|
||||
@ -110,7 +112,6 @@ go_library(
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/vsphere_volume:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
|
||||
"//vendor/github.com/spf13/cobra:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/api/meta:go_default_library",
|
||||
@ -119,20 +120,16 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/healthz:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery:go_default_library",
|
||||
"//vendor/k8s.io/client-go/discovery/cached:go_default_library",
|
||||
"//vendor/k8s.io/client-go/dynamic:go_default_library",
|
||||
"//vendor/k8s.io/client-go/informers:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/client-go/rest:go_default_library",
|
||||
"//vendor/k8s.io/client-go/scale:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/record:go_default_library",
|
||||
"//vendor/k8s.io/client-go/util/cert:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/clientset_generated/clientset/typed/metrics/v1beta1:go_default_library",
|
||||
"//vendor/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
|
||||
@ -150,6 +147,7 @@ filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//cmd/kube-controller-manager/app/config:all-srcs",
|
||||
"//cmd/kube-controller-manager/app/options:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
|
@ -38,7 +38,7 @@ func startHPAController(ctx ControllerContext) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if ctx.Options.HorizontalPodAutoscalerUseRESTClients {
|
||||
if ctx.ComponentConfig.HorizontalPodAutoscalerUseRESTClients {
|
||||
// use the new-style clients if support for custom metrics is enabled
|
||||
return startHPAControllerWithRESTClient(ctx)
|
||||
}
|
||||
@ -88,7 +88,7 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me
|
||||
replicaCalc := podautoscaler.NewReplicaCalculator(
|
||||
metricsClient,
|
||||
hpaClient.CoreV1(),
|
||||
ctx.Options.HorizontalPodAutoscalerTolerance,
|
||||
ctx.ComponentConfig.HorizontalPodAutoscalerTolerance,
|
||||
)
|
||||
go podautoscaler.NewHorizontalController(
|
||||
hpaClientGoClient.CoreV1(),
|
||||
@ -97,9 +97,9 @@ func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient me
|
||||
restMapper,
|
||||
replicaCalc,
|
||||
ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
|
||||
ctx.Options.HorizontalPodAutoscalerSyncPeriod.Duration,
|
||||
ctx.Options.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration,
|
||||
ctx.Options.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration,
|
||||
ctx.ComponentConfig.HorizontalPodAutoscalerSyncPeriod.Duration,
|
||||
ctx.ComponentConfig.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration,
|
||||
ctx.ComponentConfig.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration,
|
||||
).Run(ctx.Stop)
|
||||
return true, nil
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ func startJobController(ctx ControllerContext) (bool, error) {
|
||||
ctx.InformerFactory.Core().V1().Pods(),
|
||||
ctx.InformerFactory.Batch().V1().Jobs(),
|
||||
ctx.ClientBuilder.ClientOrDie("job-controller"),
|
||||
).Run(int(ctx.Options.ConcurrentJobSyncs), ctx.Stop)
|
||||
).Run(int(ctx.ComponentConfig.ConcurrentJobSyncs), ctx.Stop)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -37,7 +37,7 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) {
|
||||
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}] {
|
||||
return false, nil
|
||||
}
|
||||
if ctx.Options.ClusterSigningCertFile == "" || ctx.Options.ClusterSigningKeyFile == "" {
|
||||
if ctx.ComponentConfig.ClusterSigningCertFile == "" || ctx.ComponentConfig.ClusterSigningKeyFile == "" {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -52,15 +52,15 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) {
|
||||
// bail out of startController without logging.
|
||||
var keyFileExists, keyUsesDefault, certFileExists, certUsesDefault bool
|
||||
|
||||
_, err := os.Stat(ctx.Options.ClusterSigningCertFile)
|
||||
_, err := os.Stat(ctx.ComponentConfig.ClusterSigningCertFile)
|
||||
certFileExists = !os.IsNotExist(err)
|
||||
|
||||
certUsesDefault = (ctx.Options.ClusterSigningCertFile == cmoptions.DefaultClusterSigningCertFile)
|
||||
certUsesDefault = (ctx.ComponentConfig.ClusterSigningCertFile == cmoptions.DefaultClusterSigningCertFile)
|
||||
|
||||
_, err = os.Stat(ctx.Options.ClusterSigningKeyFile)
|
||||
_, err = os.Stat(ctx.ComponentConfig.ClusterSigningKeyFile)
|
||||
keyFileExists = !os.IsNotExist(err)
|
||||
|
||||
keyUsesDefault = (ctx.Options.ClusterSigningKeyFile == cmoptions.DefaultClusterSigningKeyFile)
|
||||
keyUsesDefault = (ctx.ComponentConfig.ClusterSigningKeyFile == cmoptions.DefaultClusterSigningKeyFile)
|
||||
|
||||
switch {
|
||||
case (keyFileExists && keyUsesDefault) || (certFileExists && certUsesDefault):
|
||||
@ -84,9 +84,9 @@ func startCSRSigningController(ctx ControllerContext) (bool, error) {
|
||||
signer, err := signer.NewCSRSigningController(
|
||||
c,
|
||||
ctx.InformerFactory.Certificates().V1beta1().CertificateSigningRequests(),
|
||||
ctx.Options.ClusterSigningCertFile,
|
||||
ctx.Options.ClusterSigningKeyFile,
|
||||
ctx.Options.ClusterSigningDuration.Duration,
|
||||
ctx.ComponentConfig.ClusterSigningCertFile,
|
||||
ctx.ComponentConfig.ClusterSigningKeyFile,
|
||||
ctx.ComponentConfig.ClusterSigningDuration.Duration,
|
||||
)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to start certificate controller: %v", err)
|
||||
|
23
cmd/kube-controller-manager/app/config/BUILD
Normal file
23
cmd/kube-controller-manager/app/config/BUILD
Normal file
@ -0,0 +1,23 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["config.go"],
|
||||
importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app/config",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["//cmd/controller-manager/app:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
55
cmd/kube-controller-manager/app/config/config.go
Normal file
55
cmd/kube-controller-manager/app/config/config.go
Normal file
@ -0,0 +1,55 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
genericcontrollermanager "k8s.io/kubernetes/cmd/controller-manager/app"
|
||||
)
|
||||
|
||||
// ExtraConfig are part of Config, also can place your custom config here.
|
||||
type ExtraConfig struct {
|
||||
NodeStatusUpdateFrequency time.Duration
|
||||
}
|
||||
|
||||
// Config is the main context object for the controller manager.
|
||||
type Config struct {
|
||||
Generic genericcontrollermanager.Config
|
||||
Extra ExtraConfig
|
||||
}
|
||||
|
||||
type completedConfig struct {
|
||||
Generic genericcontrollermanager.CompletedConfig
|
||||
Extra *ExtraConfig
|
||||
}
|
||||
|
||||
// CompletedConfig same as Config, just to swap private object.
|
||||
type CompletedConfig struct {
|
||||
// Embed a private pointer that cannot be instantiated outside of this package.
|
||||
*completedConfig
|
||||
}
|
||||
|
||||
// Complete fills in any fields not set that are required to have valid data. It's mutating the receiver.
|
||||
func (c *Config) Complete() *CompletedConfig {
|
||||
cc := completedConfig{
|
||||
c.Generic.Complete(),
|
||||
&c.Extra,
|
||||
}
|
||||
|
||||
return &CompletedConfig{&cc}
|
||||
}
|
@ -24,46 +24,34 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/pprof"
|
||||
"os"
|
||||
goruntime "runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/discovery"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/record"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
genericcontrollerconfig "k8s.io/kubernetes/cmd/controller-manager/app"
|
||||
"k8s.io/kubernetes/cmd/kube-controller-manager/app/config"
|
||||
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
"k8s.io/kubernetes/pkg/util/configz"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/kubernetes/pkg/version/verflag"
|
||||
)
|
||||
|
||||
@ -81,7 +69,7 @@ const (
|
||||
|
||||
// NewControllerManagerCommand creates a *cobra.Command object with default parameters
|
||||
func NewControllerManagerCommand() *cobra.Command {
|
||||
s := options.NewCMServer()
|
||||
s := options.NewKubeControllerManagerOptions()
|
||||
cmd := &cobra.Command{
|
||||
Use: "kube-controller-manager",
|
||||
Long: `The Kubernetes controller manager is a daemon that embeds
|
||||
@ -94,7 +82,17 @@ Kubernetes today are the replication controller, endpoints controller, namespace
|
||||
controller, and serviceaccounts controller.`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
verflag.PrintAndExitIfRequested()
|
||||
Run(s)
|
||||
|
||||
c, err := s.Config(KnownControllers(), ControllersDisabledByDefault.List())
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := Run(c.Complete()); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
},
|
||||
}
|
||||
s.AddFlags(cmd.Flags(), KnownControllers(), ControllersDisabledByDefault.List())
|
||||
@ -105,59 +103,58 @@ controller, and serviceaccounts controller.`,
|
||||
// ResyncPeriod returns a function which generates a duration each time it is
|
||||
// invoked; this is so that multiple controllers don't get into lock-step and all
|
||||
// hammer the apiserver with list requests simultaneously.
|
||||
func ResyncPeriod(s *options.CMServer) func() time.Duration {
|
||||
func ResyncPeriod(c *config.CompletedConfig) func() time.Duration {
|
||||
return func() time.Duration {
|
||||
factor := rand.Float64() + 1
|
||||
return time.Duration(float64(s.MinResyncPeriod.Nanoseconds()) * factor)
|
||||
return time.Duration(float64(c.Generic.ComponentConfig.MinResyncPeriod.Nanoseconds()) * factor)
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs the CMServer. This should never exit.
|
||||
func Run(s *options.CMServer) error {
|
||||
// Run runs the KubeControllerManagerOptions. This should never exit.
|
||||
func Run(c *config.CompletedConfig) error {
|
||||
// To help debugging, immediately log version
|
||||
glog.Infof("Version: %+v", version.Get())
|
||||
if err := s.Validate(KnownControllers(), ControllersDisabledByDefault.List()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c, err := configz.New("componentconfig"); err == nil {
|
||||
c.Set(s.KubeControllerManagerConfiguration)
|
||||
if cfgz, err := configz.New("componentconfig"); err == nil {
|
||||
cfgz.Set(c.Generic.ComponentConfig)
|
||||
} else {
|
||||
glog.Errorf("unable to register configz: %s", err)
|
||||
glog.Errorf("unable to register configz: %c", err)
|
||||
}
|
||||
|
||||
kubeClient, leaderElectionClient, kubeconfig, err := createClients(s)
|
||||
if err != nil {
|
||||
return err
|
||||
// Start the controller manager HTTP server
|
||||
stopCh := make(chan struct{})
|
||||
if c.Generic.SecureServing != nil {
|
||||
if err := genericcontrollerconfig.Serve(&c.Generic, c.Generic.SecureServing.Serve, stopCh); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if s.Port >= 0 {
|
||||
go startHTTP(s)
|
||||
if c.Generic.InsecureServing != nil {
|
||||
if err := genericcontrollerconfig.Serve(&c.Generic, c.Generic.InsecureServing.Serve, stopCh); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
recorder := createRecorder(kubeClient)
|
||||
|
||||
run := func(stop <-chan struct{}) {
|
||||
rootClientBuilder := controller.SimpleControllerClientBuilder{
|
||||
ClientConfig: kubeconfig,
|
||||
ClientConfig: c.Generic.Kubeconfig,
|
||||
}
|
||||
var clientBuilder controller.ControllerClientBuilder
|
||||
if s.UseServiceAccountCredentials {
|
||||
if len(s.ServiceAccountKeyFile) == 0 {
|
||||
// It's possible another controller process is creating the tokens for us.
|
||||
if c.Generic.ComponentConfig.UseServiceAccountCredentials {
|
||||
if len(c.Generic.ComponentConfig.ServiceAccountKeyFile) == 0 {
|
||||
// It'c possible another controller process is creating the tokens for us.
|
||||
// If one isn't, we'll timeout and exit when our client builder is unable to create the tokens.
|
||||
glog.Warningf("--use-service-account-credentials was specified without providing a --service-account-private-key-file")
|
||||
}
|
||||
clientBuilder = controller.SAControllerClientBuilder{
|
||||
ClientConfig: restclient.AnonymousClientConfig(kubeconfig),
|
||||
CoreClient: kubeClient.CoreV1(),
|
||||
AuthenticationClient: kubeClient.AuthenticationV1(),
|
||||
ClientConfig: restclient.AnonymousClientConfig(c.Generic.Kubeconfig),
|
||||
CoreClient: c.Generic.Client.CoreV1(),
|
||||
AuthenticationClient: c.Generic.Client.AuthenticationV1(),
|
||||
Namespace: "kube-system",
|
||||
}
|
||||
} else {
|
||||
clientBuilder = rootClientBuilder
|
||||
}
|
||||
ctx, err := CreateControllerContext(s, rootClientBuilder, clientBuilder, stop)
|
||||
ctx, err := CreateControllerContext(c, rootClientBuilder, clientBuilder, stop)
|
||||
if err != nil {
|
||||
glog.Fatalf("error building controller context: %v", err)
|
||||
}
|
||||
@ -173,7 +170,7 @@ func Run(s *options.CMServer) error {
|
||||
select {}
|
||||
}
|
||||
|
||||
if !s.LeaderElection.LeaderElect {
|
||||
if !c.Generic.ComponentConfig.LeaderElection.LeaderElect {
|
||||
run(wait.NeverStop)
|
||||
panic("unreachable")
|
||||
}
|
||||
@ -182,16 +179,16 @@ func Run(s *options.CMServer) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// add a uniquifier so that two processes on the same host don't accidentally both become active
|
||||
id = id + "_" + string(uuid.NewUUID())
|
||||
|
||||
rl, err := resourcelock.New(s.LeaderElection.ResourceLock,
|
||||
rl, err := resourcelock.New(c.Generic.ComponentConfig.LeaderElection.ResourceLock,
|
||||
"kube-system",
|
||||
"kube-controller-manager",
|
||||
leaderElectionClient.CoreV1(),
|
||||
c.Generic.LeaderElectionClient.CoreV1(),
|
||||
resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
EventRecorder: recorder,
|
||||
EventRecorder: c.Generic.EventRecorder,
|
||||
})
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating lock: %v", err)
|
||||
@ -199,9 +196,9 @@ func Run(s *options.CMServer) error {
|
||||
|
||||
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
|
||||
Lock: rl,
|
||||
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
|
||||
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
|
||||
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,
|
||||
LeaseDuration: c.Generic.ComponentConfig.LeaderElection.LeaseDuration.Duration,
|
||||
RenewDeadline: c.Generic.ComponentConfig.LeaderElection.RenewDeadline.Duration,
|
||||
RetryPeriod: c.Generic.ComponentConfig.LeaderElection.RetryPeriod.Duration,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: run,
|
||||
OnStoppedLeading: func() {
|
||||
@ -212,53 +209,6 @@ func Run(s *options.CMServer) error {
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
func startHTTP(s *options.CMServer) {
|
||||
mux := http.NewServeMux()
|
||||
healthz.InstallHandler(mux)
|
||||
if s.EnableProfiling {
|
||||
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
||||
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
||||
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
||||
mux.HandleFunc("/debug/pprof/trace", pprof.Trace)
|
||||
if s.EnableContentionProfiling {
|
||||
goruntime.SetBlockProfileRate(1)
|
||||
}
|
||||
}
|
||||
configz.InstallHandler(mux)
|
||||
mux.Handle("/metrics", prometheus.Handler())
|
||||
|
||||
server := &http.Server{
|
||||
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
|
||||
Handler: mux,
|
||||
}
|
||||
glog.Fatal(server.ListenAndServe())
|
||||
}
|
||||
|
||||
func createRecorder(kubeClient *clientset.Clientset) record.EventRecorder {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events("")})
|
||||
return eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "controller-manager"})
|
||||
}
|
||||
|
||||
func createClients(s *options.CMServer) (*clientset.Clientset, *clientset.Clientset, *restclient.Config, error) {
|
||||
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
|
||||
if err != nil {
|
||||
return nil, nil, nil, err
|
||||
}
|
||||
|
||||
kubeconfig.ContentConfig.ContentType = s.ContentType
|
||||
// Override kubeconfig qps/burst settings from flags
|
||||
kubeconfig.QPS = s.KubeAPIQPS
|
||||
kubeconfig.Burst = int(s.KubeAPIBurst)
|
||||
kubeClient, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "controller-manager"))
|
||||
if err != nil {
|
||||
glog.Fatalf("Invalid API configuration: %v", err)
|
||||
}
|
||||
leaderElectionClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
||||
return kubeClient, leaderElectionClient, kubeconfig, nil
|
||||
}
|
||||
|
||||
type ControllerContext struct {
|
||||
// ClientBuilder will provide a client for this controller to use
|
||||
ClientBuilder controller.ControllerClientBuilder
|
||||
@ -267,7 +217,7 @@ type ControllerContext struct {
|
||||
InformerFactory informers.SharedInformerFactory
|
||||
|
||||
// Options provides access to init options for a given controller
|
||||
Options options.CMServer
|
||||
ComponentConfig componentconfig.KubeControllerManagerConfiguration
|
||||
|
||||
// AvailableResources is a map listing currently available resources
|
||||
AvailableResources map[schema.GroupVersionResource]bool
|
||||
@ -287,10 +237,15 @@ type ControllerContext struct {
|
||||
// InformersStarted is closed after all of the controllers have been initialized and are running. After this point it is safe,
|
||||
// for an individual controller to start the shared informers. Before it is closed, they should not.
|
||||
InformersStarted chan struct{}
|
||||
|
||||
// ResyncPeriod generates a duration each time it is invoked; this is so that
|
||||
// multiple controllers don't get into lock-step and all hammer the apiserver
|
||||
// with list requests simultaneously.
|
||||
ResyncPeriod func() time.Duration
|
||||
}
|
||||
|
||||
func (c ControllerContext) IsControllerEnabled(name string) bool {
|
||||
return IsControllerEnabled(name, ControllersDisabledByDefault, c.Options.Controllers...)
|
||||
return IsControllerEnabled(name, ControllersDisabledByDefault, c.ComponentConfig.Controllers...)
|
||||
}
|
||||
|
||||
func IsControllerEnabled(name string, disabledByDefaultControllers sets.String, controllers ...string) bool {
|
||||
@ -446,7 +401,7 @@ func GetAvailableResources(clientBuilder controller.ControllerClientBuilder) (ma
|
||||
// CreateControllerContext creates a context struct containing references to resources needed by the
|
||||
// controllers such as the cloud provider and clientBuilder. rootClientBuilder is only used for
|
||||
// the shared-informers client and token controller.
|
||||
func CreateControllerContext(s *options.CMServer, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}) (ControllerContext, error) {
|
||||
func CreateControllerContext(s *config.CompletedConfig, rootClientBuilder, clientBuilder controller.ControllerClientBuilder, stop <-chan struct{}) (ControllerContext, error) {
|
||||
versionedClient := rootClientBuilder.ClientOrDie("shared-informers")
|
||||
sharedInformers := informers.NewSharedInformerFactory(versionedClient, ResyncPeriod(s)())
|
||||
|
||||
@ -455,8 +410,8 @@ func CreateControllerContext(s *options.CMServer, rootClientBuilder, clientBuild
|
||||
return ControllerContext{}, err
|
||||
}
|
||||
|
||||
cloud, loopMode, err := createCloudProvider(s.CloudProvider, s.ExternalCloudVolumePlugin,
|
||||
s.CloudConfigFile, s.AllowUntaggedCloud, sharedInformers)
|
||||
cloud, loopMode, err := createCloudProvider(s.Generic.ComponentConfig.CloudProvider, s.Generic.ComponentConfig.ExternalCloudVolumePlugin,
|
||||
s.Generic.ComponentConfig.CloudConfigFile, s.Generic.ComponentConfig.AllowUntaggedCloud, sharedInformers)
|
||||
if err != nil {
|
||||
return ControllerContext{}, err
|
||||
}
|
||||
@ -464,12 +419,13 @@ func CreateControllerContext(s *options.CMServer, rootClientBuilder, clientBuild
|
||||
ctx := ControllerContext{
|
||||
ClientBuilder: clientBuilder,
|
||||
InformerFactory: sharedInformers,
|
||||
Options: *s,
|
||||
ComponentConfig: s.Generic.ComponentConfig,
|
||||
AvailableResources: availableResources,
|
||||
Cloud: cloud,
|
||||
LoopMode: loopMode,
|
||||
Stop: stop,
|
||||
InformersStarted: make(chan struct{}),
|
||||
ResyncPeriod: ResyncPeriod(s),
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
@ -493,7 +449,7 @@ func StartControllers(ctx ControllerContext, startSATokenController InitFunc, co
|
||||
continue
|
||||
}
|
||||
|
||||
time.Sleep(wait.Jitter(ctx.Options.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
time.Sleep(wait.Jitter(ctx.ComponentConfig.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
|
||||
glog.V(1).Infof("Starting %q", controllerName)
|
||||
started, err := initFn(ctx)
|
||||
@ -524,23 +480,23 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if len(ctx.Options.ServiceAccountKeyFile) == 0 {
|
||||
if len(ctx.ComponentConfig.ServiceAccountKeyFile) == 0 {
|
||||
glog.Warningf("%q is disabled because there is no private key", saTokenControllerName)
|
||||
return false, nil
|
||||
}
|
||||
privateKey, err := certutil.PrivateKeyFromFile(ctx.Options.ServiceAccountKeyFile)
|
||||
privateKey, err := certutil.PrivateKeyFromFile(ctx.ComponentConfig.ServiceAccountKeyFile)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error reading key for service account token controller: %v", err)
|
||||
}
|
||||
|
||||
var rootCA []byte
|
||||
if ctx.Options.RootCAFile != "" {
|
||||
rootCA, err = ioutil.ReadFile(ctx.Options.RootCAFile)
|
||||
if ctx.ComponentConfig.RootCAFile != "" {
|
||||
rootCA, err = ioutil.ReadFile(ctx.ComponentConfig.RootCAFile)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error reading root-ca-file at %s: %v", ctx.Options.RootCAFile, err)
|
||||
return true, fmt.Errorf("error reading root-ca-file at %s: %v", ctx.ComponentConfig.RootCAFile, err)
|
||||
}
|
||||
if _, err := certutil.ParseCertsPEM(rootCA); err != nil {
|
||||
return true, fmt.Errorf("error parsing root-ca-file at %s: %v", ctx.Options.RootCAFile, err)
|
||||
return true, fmt.Errorf("error parsing root-ca-file at %s: %v", ctx.ComponentConfig.RootCAFile, err)
|
||||
}
|
||||
} else {
|
||||
rootCA = c.rootClientBuilder.ConfigOrDie("tokens-controller").CAData
|
||||
@ -558,7 +514,7 @@ func (c serviceAccountTokenControllerStarter) startServiceAccountTokenController
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error creating Tokens controller: %v", err)
|
||||
}
|
||||
go controller.Run(int(ctx.Options.ConcurrentSATokenSyncs), ctx.Stop)
|
||||
go controller.Run(int(ctx.ComponentConfig.ConcurrentSATokenSyncs), ctx.Stop)
|
||||
|
||||
// start the first set of informers now so that other controllers can start
|
||||
ctx.InformerFactory.Start(ctx.Stop)
|
||||
|
@ -68,33 +68,33 @@ func startServiceController(ctx ControllerContext) (bool, error) {
|
||||
ctx.ClientBuilder.ClientOrDie("service-controller"),
|
||||
ctx.InformerFactory.Core().V1().Services(),
|
||||
ctx.InformerFactory.Core().V1().Nodes(),
|
||||
ctx.Options.ClusterName,
|
||||
ctx.ComponentConfig.ClusterName,
|
||||
)
|
||||
if err != nil {
|
||||
// This error shouldn't fail. It lives like this as a legacy.
|
||||
glog.Errorf("Failed to start service controller: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
go serviceController.Run(ctx.Stop, int(ctx.Options.ConcurrentServiceSyncs))
|
||||
go serviceController.Run(ctx.Stop, int(ctx.ComponentConfig.ConcurrentServiceSyncs))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func startNodeIpamController(ctx ControllerContext) (bool, error) {
|
||||
var clusterCIDR *net.IPNet = nil
|
||||
var serviceCIDR *net.IPNet = nil
|
||||
if ctx.Options.AllocateNodeCIDRs {
|
||||
if ctx.ComponentConfig.AllocateNodeCIDRs {
|
||||
var err error
|
||||
if len(strings.TrimSpace(ctx.Options.ClusterCIDR)) != 0 {
|
||||
_, clusterCIDR, err = net.ParseCIDR(ctx.Options.ClusterCIDR)
|
||||
if len(strings.TrimSpace(ctx.ComponentConfig.ClusterCIDR)) != 0 {
|
||||
_, clusterCIDR, err = net.ParseCIDR(ctx.ComponentConfig.ClusterCIDR)
|
||||
if err != nil {
|
||||
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.Options.ClusterCIDR, err)
|
||||
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.ClusterCIDR, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(strings.TrimSpace(ctx.Options.ServiceCIDR)) != 0 {
|
||||
_, serviceCIDR, err = net.ParseCIDR(ctx.Options.ServiceCIDR)
|
||||
if len(strings.TrimSpace(ctx.ComponentConfig.ServiceCIDR)) != 0 {
|
||||
_, serviceCIDR, err = net.ParseCIDR(ctx.ComponentConfig.ServiceCIDR)
|
||||
if err != nil {
|
||||
glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.Options.ServiceCIDR, err)
|
||||
glog.Warningf("Unsuccessful parsing of service CIDR %v: %v", ctx.ComponentConfig.ServiceCIDR, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -105,9 +105,9 @@ func startNodeIpamController(ctx ControllerContext) (bool, error) {
|
||||
ctx.ClientBuilder.ClientOrDie("node-controller"),
|
||||
clusterCIDR,
|
||||
serviceCIDR,
|
||||
int(ctx.Options.NodeCIDRMaskSize),
|
||||
ctx.Options.AllocateNodeCIDRs,
|
||||
ipam.CIDRAllocatorType(ctx.Options.CIDRAllocatorType),
|
||||
int(ctx.ComponentConfig.NodeCIDRMaskSize),
|
||||
ctx.ComponentConfig.AllocateNodeCIDRs,
|
||||
ipam.CIDRAllocatorType(ctx.ComponentConfig.CIDRAllocatorType),
|
||||
)
|
||||
if err != nil {
|
||||
return true, err
|
||||
@ -123,15 +123,15 @@ func startNodeLifecycleController(ctx ControllerContext) (bool, error) {
|
||||
ctx.InformerFactory.Extensions().V1beta1().DaemonSets(),
|
||||
ctx.Cloud,
|
||||
ctx.ClientBuilder.ClientOrDie("node-controller"),
|
||||
ctx.Options.NodeMonitorPeriod.Duration,
|
||||
ctx.Options.NodeStartupGracePeriod.Duration,
|
||||
ctx.Options.NodeMonitorGracePeriod.Duration,
|
||||
ctx.Options.PodEvictionTimeout.Duration,
|
||||
ctx.Options.NodeEvictionRate,
|
||||
ctx.Options.SecondaryNodeEvictionRate,
|
||||
ctx.Options.LargeClusterSizeThreshold,
|
||||
ctx.Options.UnhealthyZoneThreshold,
|
||||
ctx.Options.EnableTaintManager,
|
||||
ctx.ComponentConfig.NodeMonitorPeriod.Duration,
|
||||
ctx.ComponentConfig.NodeStartupGracePeriod.Duration,
|
||||
ctx.ComponentConfig.NodeMonitorGracePeriod.Duration,
|
||||
ctx.ComponentConfig.PodEvictionTimeout.Duration,
|
||||
ctx.ComponentConfig.NodeEvictionRate,
|
||||
ctx.ComponentConfig.SecondaryNodeEvictionRate,
|
||||
ctx.ComponentConfig.LargeClusterSizeThreshold,
|
||||
ctx.ComponentConfig.UnhealthyZoneThreshold,
|
||||
ctx.ComponentConfig.EnableTaintManager,
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.TaintBasedEvictions),
|
||||
utilfeature.DefaultFeatureGate.Enabled(features.TaintNodesByCondition),
|
||||
)
|
||||
@ -143,8 +143,8 @@ func startNodeLifecycleController(ctx ControllerContext) (bool, error) {
|
||||
}
|
||||
|
||||
func startRouteController(ctx ControllerContext) (bool, error) {
|
||||
if !ctx.Options.AllocateNodeCIDRs || !ctx.Options.ConfigureCloudRoutes {
|
||||
glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", ctx.Options.AllocateNodeCIDRs, ctx.Options.ConfigureCloudRoutes)
|
||||
if !ctx.ComponentConfig.AllocateNodeCIDRs || !ctx.ComponentConfig.ConfigureCloudRoutes {
|
||||
glog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", ctx.ComponentConfig.AllocateNodeCIDRs, ctx.ComponentConfig.ConfigureCloudRoutes)
|
||||
return false, nil
|
||||
}
|
||||
if ctx.Cloud == nil {
|
||||
@ -156,27 +156,27 @@ func startRouteController(ctx ControllerContext) (bool, error) {
|
||||
glog.Warning("configure-cloud-routes is set, but cloud provider does not support routes. Will not configure cloud provider routes.")
|
||||
return false, nil
|
||||
}
|
||||
_, clusterCIDR, err := net.ParseCIDR(ctx.Options.ClusterCIDR)
|
||||
_, clusterCIDR, err := net.ParseCIDR(ctx.ComponentConfig.ClusterCIDR)
|
||||
if err != nil {
|
||||
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.Options.ClusterCIDR, err)
|
||||
glog.Warningf("Unsuccessful parsing of cluster CIDR %v: %v", ctx.ComponentConfig.ClusterCIDR, err)
|
||||
}
|
||||
routeController := routecontroller.New(routes, ctx.ClientBuilder.ClientOrDie("route-controller"), ctx.InformerFactory.Core().V1().Nodes(), ctx.Options.ClusterName, clusterCIDR)
|
||||
go routeController.Run(ctx.Stop, ctx.Options.RouteReconciliationPeriod.Duration)
|
||||
routeController := routecontroller.New(routes, ctx.ClientBuilder.ClientOrDie("route-controller"), ctx.InformerFactory.Core().V1().Nodes(), ctx.ComponentConfig.ClusterName, clusterCIDR)
|
||||
go routeController.Run(ctx.Stop, ctx.ComponentConfig.RouteReconciliationPeriod.Duration)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func startPersistentVolumeBinderController(ctx ControllerContext) (bool, error) {
|
||||
params := persistentvolumecontroller.ControllerParameters{
|
||||
KubeClient: ctx.ClientBuilder.ClientOrDie("persistent-volume-binder"),
|
||||
SyncPeriod: ctx.Options.PVClaimBinderSyncPeriod.Duration,
|
||||
VolumePlugins: ProbeControllerVolumePlugins(ctx.Cloud, ctx.Options.VolumeConfiguration),
|
||||
SyncPeriod: ctx.ComponentConfig.PVClaimBinderSyncPeriod.Duration,
|
||||
VolumePlugins: ProbeControllerVolumePlugins(ctx.Cloud, ctx.ComponentConfig.VolumeConfiguration),
|
||||
Cloud: ctx.Cloud,
|
||||
ClusterName: ctx.Options.ClusterName,
|
||||
ClusterName: ctx.ComponentConfig.ClusterName,
|
||||
VolumeInformer: ctx.InformerFactory.Core().V1().PersistentVolumes(),
|
||||
ClaimInformer: ctx.InformerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: ctx.InformerFactory.Storage().V1().StorageClasses(),
|
||||
PodInformer: ctx.InformerFactory.Core().V1().Pods(),
|
||||
EnableDynamicProvisioning: ctx.Options.VolumeConfiguration.EnableDynamicProvisioning,
|
||||
EnableDynamicProvisioning: ctx.ComponentConfig.VolumeConfiguration.EnableDynamicProvisioning,
|
||||
}
|
||||
volumeController, volumeControllerErr := persistentvolumecontroller.NewController(params)
|
||||
if volumeControllerErr != nil {
|
||||
@ -187,7 +187,7 @@ func startPersistentVolumeBinderController(ctx ControllerContext) (bool, error)
|
||||
}
|
||||
|
||||
func startAttachDetachController(ctx ControllerContext) (bool, error) {
|
||||
if ctx.Options.ReconcilerSyncLoopPeriod.Duration < time.Second {
|
||||
if ctx.ComponentConfig.ReconcilerSyncLoopPeriod.Duration < time.Second {
|
||||
return true, fmt.Errorf("Duration time must be greater than one second as set via command line option reconcile-sync-loop-period.")
|
||||
}
|
||||
attachDetachController, attachDetachControllerErr :=
|
||||
@ -199,9 +199,9 @@ func startAttachDetachController(ctx ControllerContext) (bool, error) {
|
||||
ctx.InformerFactory.Core().V1().PersistentVolumes(),
|
||||
ctx.Cloud,
|
||||
ProbeAttachableVolumePlugins(),
|
||||
GetDynamicPluginProber(ctx.Options.VolumeConfiguration),
|
||||
ctx.Options.DisableAttachDetachReconcilerSync,
|
||||
ctx.Options.ReconcilerSyncLoopPeriod.Duration,
|
||||
GetDynamicPluginProber(ctx.ComponentConfig.VolumeConfiguration),
|
||||
ctx.ComponentConfig.DisableAttachDetachReconcilerSync,
|
||||
ctx.ComponentConfig.ReconcilerSyncLoopPeriod.Duration,
|
||||
attachdetach.DefaultTimerConfig,
|
||||
)
|
||||
if attachDetachControllerErr != nil {
|
||||
@ -218,7 +218,7 @@ func startVolumeExpandController(ctx ControllerContext) (bool, error) {
|
||||
ctx.InformerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ctx.InformerFactory.Core().V1().PersistentVolumes(),
|
||||
ctx.Cloud,
|
||||
ProbeExpandableVolumePlugins(ctx.Options.VolumeConfiguration))
|
||||
ProbeExpandableVolumePlugins(ctx.ComponentConfig.VolumeConfiguration))
|
||||
|
||||
if expandControllerErr != nil {
|
||||
return true, fmt.Errorf("Failed to start volume expand controller : %v", expandControllerErr)
|
||||
@ -235,7 +235,7 @@ func startEndpointController(ctx ControllerContext) (bool, error) {
|
||||
ctx.InformerFactory.Core().V1().Services(),
|
||||
ctx.InformerFactory.Core().V1().Endpoints(),
|
||||
ctx.ClientBuilder.ClientOrDie("endpoint-controller"),
|
||||
).Run(int(ctx.Options.ConcurrentEndpointSyncs), ctx.Stop)
|
||||
).Run(int(ctx.ComponentConfig.ConcurrentEndpointSyncs), ctx.Stop)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -245,7 +245,7 @@ func startReplicationController(ctx ControllerContext) (bool, error) {
|
||||
ctx.InformerFactory.Core().V1().ReplicationControllers(),
|
||||
ctx.ClientBuilder.ClientOrDie("replication-controller"),
|
||||
replicationcontroller.BurstReplicas,
|
||||
).Run(int(ctx.Options.ConcurrentRCSyncs), ctx.Stop)
|
||||
).Run(int(ctx.ComponentConfig.ConcurrentRCSyncs), ctx.Stop)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -253,7 +253,7 @@ func startPodGCController(ctx ControllerContext) (bool, error) {
|
||||
go podgc.NewPodGC(
|
||||
ctx.ClientBuilder.ClientOrDie("pod-garbage-collector"),
|
||||
ctx.InformerFactory.Core().V1().Pods(),
|
||||
int(ctx.Options.TerminatedPodGCThreshold),
|
||||
int(ctx.ComponentConfig.TerminatedPodGCThreshold),
|
||||
).Run(ctx.Stop)
|
||||
return true, nil
|
||||
}
|
||||
@ -267,9 +267,9 @@ func startResourceQuotaController(ctx ControllerContext) (bool, error) {
|
||||
resourceQuotaControllerOptions := &resourcequotacontroller.ResourceQuotaControllerOptions{
|
||||
QuotaClient: resourceQuotaControllerClient.CoreV1(),
|
||||
ResourceQuotaInformer: ctx.InformerFactory.Core().V1().ResourceQuotas(),
|
||||
ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.Options.ResourceQuotaSyncPeriod.Duration),
|
||||
ResyncPeriod: controller.StaticResyncPeriodFunc(ctx.ComponentConfig.ResourceQuotaSyncPeriod.Duration),
|
||||
InformerFactory: ctx.InformerFactory,
|
||||
ReplenishmentResyncPeriod: ResyncPeriod(&ctx.Options),
|
||||
ReplenishmentResyncPeriod: ctx.ResyncPeriod,
|
||||
DiscoveryFunc: discoveryFunc,
|
||||
IgnoredResourcesFunc: quotaConfiguration.IgnoredResources,
|
||||
InformersStarted: ctx.InformersStarted,
|
||||
@ -285,7 +285,7 @@ func startResourceQuotaController(ctx ControllerContext) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
go resourceQuotaController.Run(int(ctx.Options.ConcurrentResourceQuotaSyncs), ctx.Stop)
|
||||
go resourceQuotaController.Run(int(ctx.ComponentConfig.ConcurrentResourceQuotaSyncs), ctx.Stop)
|
||||
|
||||
// Periodically the quota controller to detect new resource types
|
||||
go resourceQuotaController.Sync(discoveryFunc, 30*time.Second, ctx.Stop)
|
||||
@ -313,10 +313,10 @@ func startNamespaceController(ctx ControllerContext) (bool, error) {
|
||||
namespaceClientPool,
|
||||
discoverResourcesFn,
|
||||
ctx.InformerFactory.Core().V1().Namespaces(),
|
||||
ctx.Options.NamespaceSyncPeriod.Duration,
|
||||
ctx.ComponentConfig.NamespaceSyncPeriod.Duration,
|
||||
v1.FinalizerKubernetes,
|
||||
)
|
||||
go namespaceController.Run(int(ctx.Options.ConcurrentNamespaceSyncs), ctx.Stop)
|
||||
go namespaceController.Run(int(ctx.ComponentConfig.ConcurrentNamespaceSyncs), ctx.Stop)
|
||||
|
||||
return true, nil
|
||||
}
|
||||
@ -344,7 +344,7 @@ func startTTLController(ctx ControllerContext) (bool, error) {
|
||||
}
|
||||
|
||||
func startGarbageCollectorController(ctx ControllerContext) (bool, error) {
|
||||
if !ctx.Options.EnableGarbageCollector {
|
||||
if !ctx.ComponentConfig.EnableGarbageCollector {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@ -367,7 +367,7 @@ func startGarbageCollectorController(ctx ControllerContext) (bool, error) {
|
||||
// Get an initial set of deletable resources to prime the garbage collector.
|
||||
deletableResources := garbagecollector.GetDeletableResources(discoveryClient)
|
||||
ignoredResources := make(map[schema.GroupResource]struct{})
|
||||
for _, r := range ctx.Options.GCIgnoredResources {
|
||||
for _, r := range ctx.ComponentConfig.GCIgnoredResources {
|
||||
ignoredResources[schema.GroupResource{Group: r.Group, Resource: r.Resource}] = struct{}{}
|
||||
}
|
||||
garbageCollector, err := garbagecollector.NewGarbageCollector(
|
||||
@ -384,7 +384,7 @@ func startGarbageCollectorController(ctx ControllerContext) (bool, error) {
|
||||
}
|
||||
|
||||
// Start the garbage collector.
|
||||
workers := int(ctx.Options.ConcurrentGCSyncs)
|
||||
workers := int(ctx.ComponentConfig.ConcurrentGCSyncs)
|
||||
go garbageCollector.Run(workers, ctx.Stop)
|
||||
|
||||
// Periodically refresh the RESTMapper with new discovery information and sync
|
||||
|
@ -43,7 +43,7 @@ func startDaemonSetController(ctx ControllerContext) (bool, error) {
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error creating DaemonSets controller: %v", err)
|
||||
}
|
||||
go dsc.Run(int(ctx.Options.ConcurrentDaemonSetSyncs), ctx.Stop)
|
||||
go dsc.Run(int(ctx.ComponentConfig.ConcurrentDaemonSetSyncs), ctx.Stop)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -60,7 +60,7 @@ func startDeploymentController(ctx ControllerContext) (bool, error) {
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("error creating Deployment controller: %v", err)
|
||||
}
|
||||
go dc.Run(int(ctx.Options.ConcurrentDeploymentSyncs), ctx.Stop)
|
||||
go dc.Run(int(ctx.ComponentConfig.ConcurrentDeploymentSyncs), ctx.Stop)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -73,6 +73,6 @@ func startReplicaSetController(ctx ControllerContext) (bool, error) {
|
||||
ctx.InformerFactory.Core().V1().Pods(),
|
||||
ctx.ClientBuilder.ClientOrDie("replicaset-controller"),
|
||||
replicaset.BurstReplicas,
|
||||
).Run(int(ctx.Options.ConcurrentRSSyncs), ctx.Stop)
|
||||
).Run(int(ctx.ComponentConfig.ConcurrentRSSyncs), ctx.Stop)
|
||||
return true, nil
|
||||
}
|
||||
|
@ -12,6 +12,7 @@ go_library(
|
||||
importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app/options",
|
||||
deps = [
|
||||
"//cmd/controller-manager/app/options:go_default_library",
|
||||
"//cmd/kube-controller-manager/app/config:go_default_library",
|
||||
"//pkg/apis/componentconfig:go_default_library",
|
||||
"//pkg/client/leaderelectionconfig:go_default_library",
|
||||
"//pkg/controller/garbagecollector:go_default_library",
|
||||
@ -49,5 +50,6 @@ go_test(
|
||||
"//vendor/github.com/spf13/pflag:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/diff:go_default_library",
|
||||
"//vendor/k8s.io/apiserver/pkg/server/options:go_default_library",
|
||||
],
|
||||
)
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options"
|
||||
kubecontrollerconfig "k8s.io/kubernetes/cmd/kube-controller-manager/app/config"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelectionconfig"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
||||
@ -37,117 +38,127 @@ import (
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
|
||||
// CMServer is the main context object for the controller manager.
|
||||
type CMServer struct {
|
||||
cmoptions.ControllerManagerServer
|
||||
// KubeControllerManagerOptions is the main context object for the controller manager.
|
||||
type KubeControllerManagerOptions struct {
|
||||
Generic cmoptions.GenericControllerManagerOptions
|
||||
}
|
||||
|
||||
// NewCMServer creates a new CMServer with a default config.
|
||||
func NewCMServer() *CMServer {
|
||||
// NewKubeControllerManagerOptions creates a new KubeControllerManagerOptions with a default config.
|
||||
func NewKubeControllerManagerOptions() *KubeControllerManagerOptions {
|
||||
componentConfig := cmoptions.NewDefaultControllerManagerComponentConfig(ports.InsecureKubeControllerManagerPort)
|
||||
s := KubeControllerManagerOptions{
|
||||
// The common/default are kept in 'cmd/kube-controller-manager/app/options/util.go'.
|
||||
// Please make common changes there but put anything kube-controller specific here.
|
||||
Generic: cmoptions.NewGenericControllerManagerOptions(componentConfig),
|
||||
}
|
||||
|
||||
s.Generic.SecureServing.ServerCert.CertDirectory = "/var/run/kubernetes"
|
||||
s.Generic.SecureServing.ServerCert.PairName = "kube-controller-manager"
|
||||
|
||||
gcIgnoredResources := make([]componentconfig.GroupResource, 0, len(garbagecollector.DefaultIgnoredResources()))
|
||||
for r := range garbagecollector.DefaultIgnoredResources() {
|
||||
gcIgnoredResources = append(gcIgnoredResources, componentconfig.GroupResource{Group: r.Group, Resource: r.Resource})
|
||||
}
|
||||
s.Generic.ComponentConfig.GCIgnoredResources = gcIgnoredResources
|
||||
s.Generic.ComponentConfig.LeaderElection.LeaderElect = true
|
||||
|
||||
s := CMServer{
|
||||
// The common/default are kept in 'cmd/kube-controller-manager/app/options/util.go'.
|
||||
// Please make common changes there but put anything kube-controller specific here.
|
||||
ControllerManagerServer: cmoptions.ControllerManagerServer{
|
||||
KubeControllerManagerConfiguration: cmoptions.GetDefaultControllerOptions(ports.ControllerManagerPort),
|
||||
},
|
||||
}
|
||||
s.KubeControllerManagerConfiguration.GCIgnoredResources = gcIgnoredResources
|
||||
s.LeaderElection.LeaderElect = true
|
||||
return &s
|
||||
}
|
||||
|
||||
// AddFlags adds flags for a specific CMServer to the specified FlagSet
|
||||
func (s *CMServer) AddFlags(fs *pflag.FlagSet, allControllers []string, disabledByDefaultControllers []string) {
|
||||
cmoptions.AddDefaultControllerFlags(&s.ControllerManagerServer, fs)
|
||||
// AddFlags adds flags for a specific KubeControllerManagerOptions to the specified FlagSet
|
||||
func (s *KubeControllerManagerOptions) AddFlags(fs *pflag.FlagSet, allControllers []string, disabledByDefaultControllers []string) {
|
||||
s.Generic.AddFlags(fs)
|
||||
|
||||
fs.StringSliceVar(&s.Controllers, "controllers", s.Controllers, fmt.Sprintf(""+
|
||||
fs.StringSliceVar(&s.Generic.ComponentConfig.Controllers, "controllers", s.Generic.ComponentConfig.Controllers, fmt.Sprintf(""+
|
||||
"A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+
|
||||
"named 'foo', '-foo' disables the controller named 'foo'.\nAll controllers: %s\nDisabled-by-default controllers: %s",
|
||||
strings.Join(allControllers, ", "), strings.Join(disabledByDefaultControllers, ", ")))
|
||||
fs.StringVar(&s.CloudProvider, "cloud-provider", s.CloudProvider, "The provider for cloud services. Empty string for no provider.")
|
||||
fs.StringVar(&s.ExternalCloudVolumePlugin, "external-cloud-volume-plugin", s.ExternalCloudVolumePlugin, "The plugin to use when cloud provider is set to external. Can be empty, should only be set when cloud-provider is external. Currently used to allow node and volume controllers to work for in tree cloud providers.")
|
||||
fs.Int32Var(&s.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.ConcurrentServiceSyncs, "concurrent-service-syncs", s.ConcurrentServiceSyncs, "The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.ConcurrentRCSyncs, "concurrent_rc_syncs", s.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.ConcurrentRSSyncs, "concurrent-replicaset-syncs", s.ConcurrentRSSyncs, "The number of replica sets that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load")
|
||||
fs.StringVar(&s.Generic.ComponentConfig.CloudProvider, "cloud-provider", s.Generic.ComponentConfig.CloudProvider, "The provider for cloud services. Empty string for no provider.")
|
||||
fs.StringVar(&s.Generic.ComponentConfig.ExternalCloudVolumePlugin, "external-cloud-volume-plugin", s.Generic.ComponentConfig.ExternalCloudVolumePlugin, "The plugin to use when cloud provider is set to external. Can be empty, should only be set when cloud-provider is external. Currently used to allow node and volume controllers to work for in tree cloud providers.")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentEndpointSyncs, "concurrent-endpoint-syncs", s.Generic.ComponentConfig.ConcurrentEndpointSyncs, "The number of endpoint syncing operations that will be done concurrently. Larger number = faster endpoint updating, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentServiceSyncs, "concurrent-service-syncs", s.Generic.ComponentConfig.ConcurrentServiceSyncs, "The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentRCSyncs, "concurrent_rc_syncs", s.Generic.ComponentConfig.ConcurrentRCSyncs, "The number of replication controllers that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentRSSyncs, "concurrent-replicaset-syncs", s.Generic.ComponentConfig.ConcurrentRSSyncs, "The number of replica sets that are allowed to sync concurrently. Larger number = more responsive replica management, but more CPU (and network) load")
|
||||
|
||||
fs.Int32Var(&s.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.ConcurrentSATokenSyncs, "concurrent-serviceaccount-token-syncs", s.ConcurrentSATokenSyncs, "The number of service account token objects that are allowed to sync concurrently. Larger number = more responsive token generation, but more CPU (and network) load")
|
||||
fs.DurationVar(&s.NodeSyncPeriod.Duration, "node-sync-period", 0, ""+
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentResourceQuotaSyncs, "concurrent-resource-quota-syncs", s.Generic.ComponentConfig.ConcurrentResourceQuotaSyncs, "The number of resource quotas that are allowed to sync concurrently. Larger number = more responsive quota management, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentDeploymentSyncs, "concurrent-deployment-syncs", s.Generic.ComponentConfig.ConcurrentDeploymentSyncs, "The number of deployment objects that are allowed to sync concurrently. Larger number = more responsive deployments, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentNamespaceSyncs, "concurrent-namespace-syncs", s.Generic.ComponentConfig.ConcurrentNamespaceSyncs, "The number of namespace objects that are allowed to sync concurrently. Larger number = more responsive namespace termination, but more CPU (and network) load")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentSATokenSyncs, "concurrent-serviceaccount-token-syncs", s.Generic.ComponentConfig.ConcurrentSATokenSyncs, "The number of service account token objects that are allowed to sync concurrently. Larger number = more responsive token generation, but more CPU (and network) load")
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.NodeSyncPeriod.Duration, "node-sync-period", 0, ""+
|
||||
"This flag is deprecated and will be removed in future releases. See node-monitor-period for Node health checking or "+
|
||||
"route-reconciliation-period for cloud provider's route configuration settings.")
|
||||
fs.MarkDeprecated("node-sync-period", "This flag is currently no-op and will be deleted.")
|
||||
fs.DurationVar(&s.ResourceQuotaSyncPeriod.Duration, "resource-quota-sync-period", s.ResourceQuotaSyncPeriod.Duration, "The period for syncing quota usage status in the system")
|
||||
fs.DurationVar(&s.NamespaceSyncPeriod.Duration, "namespace-sync-period", s.NamespaceSyncPeriod.Duration, "The period for syncing namespace life-cycle updates")
|
||||
fs.DurationVar(&s.PVClaimBinderSyncPeriod.Duration, "pvclaimbinder-sync-period", s.PVClaimBinderSyncPeriod.Duration, "The period for syncing persistent volumes and persistent volume claims")
|
||||
fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "pv-recycler-pod-template-filepath-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "The file path to a pod definition used as a template for NFS persistent volume recycling")
|
||||
fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod")
|
||||
fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod")
|
||||
fs.StringVar(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.Int32Var(&s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.BoolVar(&s.VolumeConfiguration.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.VolumeConfiguration.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
|
||||
fs.BoolVar(&s.VolumeConfiguration.EnableDynamicProvisioning, "enable-dynamic-provisioning", s.VolumeConfiguration.EnableDynamicProvisioning, "Enable dynamic provisioning for environments that support it.")
|
||||
fs.StringVar(&s.VolumeConfiguration.FlexVolumePluginDir, "flex-volume-plugin-dir", s.VolumeConfiguration.FlexVolumePluginDir, "Full path of the directory in which the flex volume plugin should search for additional third party volume plugins.")
|
||||
fs.Int32Var(&s.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.")
|
||||
fs.DurationVar(&s.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", s.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.")
|
||||
fs.DurationVar(&s.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-upscale-delay", s.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "The period since last upscale, before another upscale can be performed in horizontal pod autoscaler.")
|
||||
fs.DurationVar(&s.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-downscale-delay", s.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "The period since last downscale, before another downscale can be performed in horizontal pod autoscaler.")
|
||||
fs.Float64Var(&s.HorizontalPodAutoscalerTolerance, "horizontal-pod-autoscaler-tolerance", s.HorizontalPodAutoscalerTolerance, "The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.")
|
||||
fs.DurationVar(&s.DeploymentControllerSyncPeriod.Duration, "deployment-controller-sync-period", s.DeploymentControllerSyncPeriod.Duration, "Period for syncing the deployments.")
|
||||
fs.DurationVar(&s.PodEvictionTimeout.Duration, "pod-eviction-timeout", s.PodEvictionTimeout.Duration, "The grace period for deleting pods on failed nodes.")
|
||||
fs.Float32Var(&s.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.")
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.ResourceQuotaSyncPeriod.Duration, "resource-quota-sync-period", s.Generic.ComponentConfig.ResourceQuotaSyncPeriod.Duration, "The period for syncing quota usage status in the system")
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.NamespaceSyncPeriod.Duration, "namespace-sync-period", s.Generic.ComponentConfig.NamespaceSyncPeriod.Duration, "The period for syncing namespace life-cycle updates")
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.PVClaimBinderSyncPeriod.Duration, "pvclaimbinder-sync-period", s.Generic.ComponentConfig.PVClaimBinderSyncPeriod.Duration, "The period for syncing persistent volumes and persistent volume claims")
|
||||
fs.StringVar(&s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "pv-recycler-pod-template-filepath-nfs", s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathNFS, "The file path to a pod definition used as a template for NFS persistent volume recycling")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "pv-recycler-minimum-timeout-nfs", s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutNFS, "The minimum ActiveDeadlineSeconds to use for an NFS Recycler pod")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "pv-recycler-increment-timeout-nfs", s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutNFS, "the increment of time added per Gi to ActiveDeadlineSeconds for an NFS scrubber pod")
|
||||
fs.StringVar(&s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "pv-recycler-pod-template-filepath-hostpath", s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.PodTemplateFilePathHostPath, "The file path to a pod definition used as a template for HostPath persistent volume recycling. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "pv-recycler-minimum-timeout-hostpath", s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MinimumTimeoutHostPath, "The minimum ActiveDeadlineSeconds to use for a HostPath Recycler pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "pv-recycler-timeout-increment-hostpath", s.Generic.ComponentConfig.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.IncrementTimeoutHostPath, "the increment of time added per Gi to ActiveDeadlineSeconds for a HostPath scrubber pod. This is for development and testing only and will not work in a multi-node cluster.")
|
||||
fs.BoolVar(&s.Generic.ComponentConfig.VolumeConfiguration.EnableHostPathProvisioning, "enable-hostpath-provisioner", s.Generic.ComponentConfig.VolumeConfiguration.EnableHostPathProvisioning, "Enable HostPath PV provisioning when running without a cloud provider. This allows testing and development of provisioning features. HostPath provisioning is not supported in any way, won't work in a multi-node cluster, and should not be used for anything other than testing or development.")
|
||||
fs.BoolVar(&s.Generic.ComponentConfig.VolumeConfiguration.EnableDynamicProvisioning, "enable-dynamic-provisioning", s.Generic.ComponentConfig.VolumeConfiguration.EnableDynamicProvisioning, "Enable dynamic provisioning for environments that support it.")
|
||||
fs.StringVar(&s.Generic.ComponentConfig.VolumeConfiguration.FlexVolumePluginDir, "flex-volume-plugin-dir", s.Generic.ComponentConfig.VolumeConfiguration.FlexVolumePluginDir, "Full path of the directory in which the flex volume plugin should search for additional third party volume plugins.")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.TerminatedPodGCThreshold, "terminated-pod-gc-threshold", s.Generic.ComponentConfig.TerminatedPodGCThreshold, "Number of terminated pods that can exist before the terminated pod garbage collector starts deleting terminated pods. If <= 0, the terminated pod garbage collector is disabled.")
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.HorizontalPodAutoscalerSyncPeriod.Duration, "horizontal-pod-autoscaler-sync-period", s.Generic.ComponentConfig.HorizontalPodAutoscalerSyncPeriod.Duration, "The period for syncing the number of pods in horizontal pod autoscaler.")
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-upscale-delay", s.Generic.ComponentConfig.HorizontalPodAutoscalerUpscaleForbiddenWindow.Duration, "The period since last upscale, before another upscale can be performed in horizontal pod autoscaler.")
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "horizontal-pod-autoscaler-downscale-delay", s.Generic.ComponentConfig.HorizontalPodAutoscalerDownscaleForbiddenWindow.Duration, "The period since last downscale, before another downscale can be performed in horizontal pod autoscaler.")
|
||||
fs.Float64Var(&s.Generic.ComponentConfig.HorizontalPodAutoscalerTolerance, "horizontal-pod-autoscaler-tolerance", s.Generic.ComponentConfig.HorizontalPodAutoscalerTolerance, "The minimum change (from 1.0) in the desired-to-actual metrics ratio for the horizontal pod autoscaler to consider scaling.")
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.DeploymentControllerSyncPeriod.Duration, "deployment-controller-sync-period", s.Generic.ComponentConfig.DeploymentControllerSyncPeriod.Duration, "Period for syncing the deployments.")
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.PodEvictionTimeout.Duration, "pod-eviction-timeout", s.Generic.ComponentConfig.PodEvictionTimeout.Duration, "The grace period for deleting pods on failed nodes.")
|
||||
fs.Float32Var(&s.Generic.ComponentConfig.DeletingPodsQps, "deleting-pods-qps", 0.1, "Number of nodes per second on which pods are deleted in case of node failure.")
|
||||
fs.MarkDeprecated("deleting-pods-qps", "This flag is currently no-op and will be deleted.")
|
||||
fs.Int32Var(&s.DeletingPodsBurst, "deleting-pods-burst", 0, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.DeletingPodsBurst, "deleting-pods-burst", 0, "Number of nodes on which pods are bursty deleted in case of node failure. For more details look into RateLimiter.")
|
||||
fs.MarkDeprecated("deleting-pods-burst", "This flag is currently no-op and will be deleted.")
|
||||
fs.Int32Var(&s.RegisterRetryCount, "register-retry-count", s.RegisterRetryCount, ""+
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.RegisterRetryCount, "register-retry-count", s.Generic.ComponentConfig.RegisterRetryCount, ""+
|
||||
"The number of retries for initial node registration. Retry interval equals node-sync-period.")
|
||||
fs.MarkDeprecated("register-retry-count", "This flag is currently no-op and will be deleted.")
|
||||
fs.DurationVar(&s.NodeMonitorGracePeriod.Duration, "node-monitor-grace-period", s.NodeMonitorGracePeriod.Duration,
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.NodeMonitorGracePeriod.Duration, "node-monitor-grace-period", s.Generic.ComponentConfig.NodeMonitorGracePeriod.Duration,
|
||||
"Amount of time which we allow running Node to be unresponsive before marking it unhealthy. "+
|
||||
"Must be N times more than kubelet's nodeStatusUpdateFrequency, "+
|
||||
"where N means number of retries allowed for kubelet to post node status.")
|
||||
fs.DurationVar(&s.NodeStartupGracePeriod.Duration, "node-startup-grace-period", s.NodeStartupGracePeriod.Duration,
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.NodeStartupGracePeriod.Duration, "node-startup-grace-period", s.Generic.ComponentConfig.NodeStartupGracePeriod.Duration,
|
||||
"Amount of time which we allow starting Node to be unresponsive before marking it unhealthy.")
|
||||
fs.StringVar(&s.ServiceAccountKeyFile, "service-account-private-key-file", s.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.")
|
||||
fs.StringVar(&s.ClusterSigningCertFile, "cluster-signing-cert-file", s.ClusterSigningCertFile, "Filename containing a PEM-encoded X509 CA certificate used to issue cluster-scoped certificates")
|
||||
fs.StringVar(&s.ClusterSigningKeyFile, "cluster-signing-key-file", s.ClusterSigningKeyFile, "Filename containing a PEM-encoded RSA or ECDSA private key used to sign cluster-scoped certificates")
|
||||
fs.DurationVar(&s.ClusterSigningDuration.Duration, "experimental-cluster-signing-duration", s.ClusterSigningDuration.Duration, "The length of duration signed certificates will be given.")
|
||||
fs.StringVar(&s.Generic.ComponentConfig.ServiceAccountKeyFile, "service-account-private-key-file", s.Generic.ComponentConfig.ServiceAccountKeyFile, "Filename containing a PEM-encoded private RSA or ECDSA key used to sign service account tokens.")
|
||||
fs.StringVar(&s.Generic.ComponentConfig.ClusterSigningCertFile, "cluster-signing-cert-file", s.Generic.ComponentConfig.ClusterSigningCertFile, "Filename containing a PEM-encoded X509 CA certificate used to issue cluster-scoped certificates")
|
||||
fs.StringVar(&s.Generic.ComponentConfig.ClusterSigningKeyFile, "cluster-signing-key-file", s.Generic.ComponentConfig.ClusterSigningKeyFile, "Filename containing a PEM-encoded RSA or ECDSA private key used to sign cluster-scoped certificates")
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.ClusterSigningDuration.Duration, "experimental-cluster-signing-duration", s.Generic.ComponentConfig.ClusterSigningDuration.Duration, "The length of duration signed certificates will be given.")
|
||||
var dummy string
|
||||
fs.MarkDeprecated("insecure-experimental-approve-all-kubelet-csrs-for-group", "This flag does nothing.")
|
||||
fs.StringVar(&dummy, "insecure-experimental-approve-all-kubelet-csrs-for-group", "", "This flag does nothing.")
|
||||
fs.StringVar(&s.ServiceCIDR, "service-cluster-ip-range", s.ServiceCIDR, "CIDR Range for Services in cluster. Requires --allocate-node-cidrs to be true")
|
||||
fs.Int32Var(&s.NodeCIDRMaskSize, "node-cidr-mask-size", s.NodeCIDRMaskSize, "Mask size for node cidr in cluster.")
|
||||
fs.StringVar(&s.RootCAFile, "root-ca-file", s.RootCAFile, "If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.")
|
||||
fs.BoolVar(&s.EnableGarbageCollector, "enable-garbage-collector", s.EnableGarbageCollector, "Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-apiserver.")
|
||||
fs.Int32Var(&s.ConcurrentGCSyncs, "concurrent-gc-syncs", s.ConcurrentGCSyncs, "The number of garbage collector workers that are allowed to sync concurrently.")
|
||||
fs.Int32Var(&s.LargeClusterSizeThreshold, "large-cluster-size-threshold", 50, "Number of nodes from which NodeController treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller.")
|
||||
fs.Float32Var(&s.UnhealthyZoneThreshold, "unhealthy-zone-threshold", 0.55, "Fraction of Nodes in a zone which needs to be not Ready (minimum 3) for zone to be treated as unhealthy. ")
|
||||
fs.BoolVar(&s.DisableAttachDetachReconcilerSync, "disable-attach-detach-reconcile-sync", false, "Disable volume attach detach reconciler sync. Disabling this may cause volumes to be mismatched with pods. Use wisely.")
|
||||
fs.DurationVar(&s.ReconcilerSyncLoopPeriod.Duration, "attach-detach-reconcile-sync-period", s.ReconcilerSyncLoopPeriod.Duration, "The reconciler sync wait time between volume attach detach. This duration must be larger than one second, and increasing this value from the default may allow for volumes to be mismatched with pods.")
|
||||
fs.BoolVar(&s.EnableTaintManager, "enable-taint-manager", s.EnableTaintManager, "WARNING: Beta feature. If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.")
|
||||
fs.BoolVar(&s.HorizontalPodAutoscalerUseRESTClients, "horizontal-pod-autoscaler-use-rest-clients", s.HorizontalPodAutoscalerUseRESTClients, "WARNING: alpha feature. If set to true, causes the horizontal pod autoscaler controller to use REST clients through the kube-aggregator, instead of using the legacy metrics client through the API server proxy. This is required for custom metrics support in the horizontal pod autoscaler.")
|
||||
fs.Float32Var(&s.NodeEvictionRate, "node-eviction-rate", 0.1, "Number of nodes per second on which pods are deleted in case of node failure when a zone is healthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters.")
|
||||
fs.Float32Var(&s.SecondaryNodeEvictionRate, "secondary-node-eviction-rate", 0.01, "Number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. This value is implicitly overridden to 0 if the cluster size is smaller than --large-cluster-size-threshold.")
|
||||
fs.StringVar(&s.Generic.ComponentConfig.ServiceCIDR, "service-cluster-ip-range", s.Generic.ComponentConfig.ServiceCIDR, "CIDR Range for Services in cluster. Requires --allocate-node-cidrs to be true")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.NodeCIDRMaskSize, "node-cidr-mask-size", s.Generic.ComponentConfig.NodeCIDRMaskSize, "Mask size for node cidr in cluster.")
|
||||
fs.StringVar(&s.Generic.ComponentConfig.RootCAFile, "root-ca-file", s.Generic.ComponentConfig.RootCAFile, "If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.")
|
||||
fs.BoolVar(&s.Generic.ComponentConfig.EnableGarbageCollector, "enable-garbage-collector", s.Generic.ComponentConfig.EnableGarbageCollector, "Enables the generic garbage collector. MUST be synced with the corresponding flag of the kube-apiserver.")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.ConcurrentGCSyncs, "concurrent-gc-syncs", s.Generic.ComponentConfig.ConcurrentGCSyncs, "The number of garbage collector workers that are allowed to sync concurrently.")
|
||||
fs.Int32Var(&s.Generic.ComponentConfig.LargeClusterSizeThreshold, "large-cluster-size-threshold", 50, "Number of nodes from which NodeController treats the cluster as large for the eviction logic purposes. --secondary-node-eviction-rate is implicitly overridden to 0 for clusters this size or smaller.")
|
||||
fs.Float32Var(&s.Generic.ComponentConfig.UnhealthyZoneThreshold, "unhealthy-zone-threshold", 0.55, "Fraction of Nodes in a zone which needs to be not Ready (minimum 3) for zone to be treated as unhealthy. ")
|
||||
fs.BoolVar(&s.Generic.ComponentConfig.DisableAttachDetachReconcilerSync, "disable-attach-detach-reconcile-sync", false, "Disable volume attach detach reconciler sync. Disabling this may cause volumes to be mismatched with pods. Use wisely.")
|
||||
fs.DurationVar(&s.Generic.ComponentConfig.ReconcilerSyncLoopPeriod.Duration, "attach-detach-reconcile-sync-period", s.Generic.ComponentConfig.ReconcilerSyncLoopPeriod.Duration, "The reconciler sync wait time between volume attach detach. This duration must be larger than one second, and increasing this value from the default may allow for volumes to be mismatched with pods.")
|
||||
fs.BoolVar(&s.Generic.ComponentConfig.EnableTaintManager, "enable-taint-manager", s.Generic.ComponentConfig.EnableTaintManager, "WARNING: Beta feature. If set to true enables NoExecute Taints and will evict all not-tolerating Pod running on Nodes tainted with this kind of Taints.")
|
||||
fs.BoolVar(&s.Generic.ComponentConfig.HorizontalPodAutoscalerUseRESTClients, "horizontal-pod-autoscaler-use-rest-clients", s.Generic.ComponentConfig.HorizontalPodAutoscalerUseRESTClients, "WARNING: alpha feature. If set to true, causes the horizontal pod autoscaler controller to use REST clients through the kube-aggregator, instead of using the legacy metrics client through the API server proxy. This is required for custom metrics support in the horizontal pod autoscaler.")
|
||||
fs.Float32Var(&s.Generic.ComponentConfig.NodeEvictionRate, "node-eviction-rate", 0.1, "Number of nodes per second on which pods are deleted in case of node failure when a zone is healthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters.")
|
||||
fs.Float32Var(&s.Generic.ComponentConfig.SecondaryNodeEvictionRate, "secondary-node-eviction-rate", 0.01, "Number of nodes per second on which pods are deleted in case of node failure when a zone is unhealthy (see --unhealthy-zone-threshold for definition of healthy/unhealthy). Zone refers to entire cluster in non-multizone clusters. This value is implicitly overridden to 0 if the cluster size is smaller than --large-cluster-size-threshold.")
|
||||
|
||||
leaderelectionconfig.BindFlags(&s.LeaderElection, fs)
|
||||
leaderelectionconfig.BindFlags(&s.Generic.ComponentConfig.LeaderElection, fs)
|
||||
|
||||
utilfeature.DefaultFeatureGate.AddFlag(fs)
|
||||
}
|
||||
|
||||
// ApplyTo fills up controller manager config with options.
|
||||
func (s *KubeControllerManagerOptions) ApplyTo(c *kubecontrollerconfig.Config) error {
|
||||
err := s.Generic.ApplyTo(&c.Generic, "controller-manager")
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Validate is used to validate the options and config before launching the controller manager
|
||||
func (s *CMServer) Validate(allControllers []string, disabledByDefaultControllers []string) error {
|
||||
func (s *KubeControllerManagerOptions) Validate(allControllers []string, disabledByDefaultControllers []string) error {
|
||||
var errs []error
|
||||
|
||||
allControllersSet := sets.NewString(allControllers...)
|
||||
for _, controller := range s.Controllers {
|
||||
for _, controller := range s.Generic.ComponentConfig.Controllers {
|
||||
if controller == "*" {
|
||||
continue
|
||||
}
|
||||
@ -162,3 +173,17 @@ func (s *CMServer) Validate(allControllers []string, disabledByDefaultController
|
||||
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// Config return a controller manager config objective
|
||||
func (s KubeControllerManagerOptions) Config(allControllers []string, disabledByDefaultControllers []string) (*kubecontrollerconfig.Config, error) {
|
||||
if err := s.Validate(allControllers, disabledByDefaultControllers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &kubecontrollerconfig.Config{}
|
||||
if err := s.ApplyTo(c); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package options
|
||||
|
||||
import (
|
||||
"net"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
@ -26,13 +27,14 @@ import (
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
apiserveroptions "k8s.io/apiserver/pkg/server/options"
|
||||
cmoptions "k8s.io/kubernetes/cmd/controller-manager/app/options"
|
||||
"k8s.io/kubernetes/pkg/apis/componentconfig"
|
||||
)
|
||||
|
||||
func TestAddFlags(t *testing.T) {
|
||||
f := pflag.NewFlagSet("addflagstest", pflag.ContinueOnError)
|
||||
s := NewCMServer()
|
||||
s := NewKubeControllerManagerOptions()
|
||||
s.AddFlags(f, []string{""}, []string{""})
|
||||
|
||||
args := []string{
|
||||
@ -103,17 +105,20 @@ func TestAddFlags(t *testing.T) {
|
||||
"--terminated-pod-gc-threshold=12000",
|
||||
"--unhealthy-zone-threshold=0.6",
|
||||
"--use-service-account-credentials=true",
|
||||
"--cert-dir=/a/b/c",
|
||||
"--bind-address=192.168.4.21",
|
||||
"--secure-port=10001",
|
||||
}
|
||||
f.Parse(args)
|
||||
// Sort GCIgnoredResources because it's built from a map, which means the
|
||||
// insertion order is random.
|
||||
sort.Sort(sortedGCIgnoredResources(s.GCIgnoredResources))
|
||||
sort.Sort(sortedGCIgnoredResources(s.Generic.ComponentConfig.GCIgnoredResources))
|
||||
|
||||
expected := &CMServer{
|
||||
ControllerManagerServer: cmoptions.ControllerManagerServer{
|
||||
KubeControllerManagerConfiguration: componentconfig.KubeControllerManagerConfiguration{
|
||||
Port: 10000,
|
||||
Address: "192.168.4.10",
|
||||
expected := &KubeControllerManagerOptions{
|
||||
Generic: cmoptions.GenericControllerManagerOptions{
|
||||
ComponentConfig: componentconfig.KubeControllerManagerConfiguration{
|
||||
Port: 10252, // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config
|
||||
Address: "0.0.0.0", // Note: InsecureServingOptions.ApplyTo will write the flag value back into the component config
|
||||
AllocateNodeCIDRs: true,
|
||||
CloudConfigFile: "/cloud-config",
|
||||
CloudProvider: "gce",
|
||||
@ -204,6 +209,19 @@ func TestAddFlags(t *testing.T) {
|
||||
HorizontalPodAutoscalerUseRESTClients: true,
|
||||
UseServiceAccountCredentials: true,
|
||||
},
|
||||
SecureServing: &apiserveroptions.SecureServingOptions{
|
||||
BindPort: 10001,
|
||||
BindAddress: net.ParseIP("192.168.4.21"),
|
||||
ServerCert: apiserveroptions.GeneratableKeyCert{
|
||||
CertDirectory: "/a/b/c",
|
||||
PairName: "kube-controller-manager",
|
||||
},
|
||||
},
|
||||
InsecureServing: &cmoptions.InsecureServingOptions{
|
||||
BindAddress: net.ParseIP("192.168.4.10"),
|
||||
BindPort: int(10000),
|
||||
BindNetwork: "tcp",
|
||||
},
|
||||
Kubeconfig: "/kubeconfig",
|
||||
Master: "192.168.4.20",
|
||||
},
|
||||
@ -211,7 +229,7 @@ func TestAddFlags(t *testing.T) {
|
||||
|
||||
// Sort GCIgnoredResources because it's built from a map, which means the
|
||||
// insertion order is random.
|
||||
sort.Sort(sortedGCIgnoredResources(expected.GCIgnoredResources))
|
||||
sort.Sort(sortedGCIgnoredResources(expected.Generic.ComponentConfig.GCIgnoredResources))
|
||||
|
||||
if !reflect.DeepEqual(expected, s) {
|
||||
t.Errorf("Got different run options than expected.\nDifference detected on:\n%s", diff.ObjectReflectDiff(expected, s))
|
||||
|
@ -22,14 +22,13 @@ package main
|
||||
|
||||
import (
|
||||
goflag "flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"fmt"
|
||||
|
||||
utilflag "k8s.io/apiserver/pkg/util/flag"
|
||||
"k8s.io/apiserver/pkg/util/logs"
|
||||
"k8s.io/kubernetes/cmd/kube-controller-manager/app"
|
||||
|
@ -51,6 +51,10 @@ type MasterConfiguration struct {
|
||||
// If not specified, defaults to Node and RBAC, meaning both the node
|
||||
// authorizer and RBAC are enabled.
|
||||
AuthorizationModes []string
|
||||
// NoTaintMaster will, if set, suppress the tainting of the
|
||||
// master node allowing workloads to be run on it (e.g. in
|
||||
// single node configurations).
|
||||
NoTaintMaster bool
|
||||
|
||||
// Mark the controller and api server pods as privileged as some cloud
|
||||
// controllers like openstack need escalated privileges under some conditions
|
||||
|
@ -51,6 +51,10 @@ type MasterConfiguration struct {
|
||||
// If not specified, defaults to Node and RBAC, meaning both the node
|
||||
// authorizer and RBAC are enabled.
|
||||
AuthorizationModes []string `json:"authorizationModes,omitempty"`
|
||||
// NoTaintMaster will, if set, suppress the tainting of the
|
||||
// master node allowing workloads to be run on it (e.g. in
|
||||
// single node configurations).
|
||||
NoTaintMaster bool `json:"noTaintMaster,omitempty"`
|
||||
|
||||
// Mark the controller and api server pods as privileged as some cloud
|
||||
// controllers like openstack need escalated privileges under some conditions
|
||||
|
@ -229,6 +229,7 @@ func autoConvert_v1alpha1_MasterConfiguration_To_kubeadm_MasterConfiguration(in
|
||||
out.CloudProvider = in.CloudProvider
|
||||
out.NodeName = in.NodeName
|
||||
out.AuthorizationModes = *(*[]string)(unsafe.Pointer(&in.AuthorizationModes))
|
||||
out.NoTaintMaster = in.NoTaintMaster
|
||||
out.PrivilegedPods = in.PrivilegedPods
|
||||
out.Token = in.Token
|
||||
out.TokenTTL = (*v1.Duration)(unsafe.Pointer(in.TokenTTL))
|
||||
@ -275,6 +276,7 @@ func autoConvert_kubeadm_MasterConfiguration_To_v1alpha1_MasterConfiguration(in
|
||||
out.CloudProvider = in.CloudProvider
|
||||
out.NodeName = in.NodeName
|
||||
out.AuthorizationModes = *(*[]string)(unsafe.Pointer(&in.AuthorizationModes))
|
||||
out.NoTaintMaster = in.NoTaintMaster
|
||||
out.PrivilegedPods = in.PrivilegedPods
|
||||
out.Token = in.Token
|
||||
out.TokenTTL = (*v1.Duration)(unsafe.Pointer(in.TokenTTL))
|
||||
|
@ -408,7 +408,7 @@ func (i *Init) Run(out io.Writer) error {
|
||||
}
|
||||
|
||||
// PHASE 4: Mark the master with the right label/taint
|
||||
if err := markmasterphase.MarkMaster(client, i.cfg.NodeName); err != nil {
|
||||
if err := markmasterphase.MarkMaster(client, i.cfg.NodeName, !i.cfg.NoTaintMaster); err != nil {
|
||||
return fmt.Errorf("error marking master: %v", err)
|
||||
}
|
||||
|
||||
|
@ -75,7 +75,7 @@ func NewCmdMarkMaster() *cobra.Command {
|
||||
client, err := kubeconfigutil.ClientSetFromFile(kubeConfigFile)
|
||||
kubeadmutil.CheckErr(err)
|
||||
|
||||
err = markmasterphase.MarkMaster(client, internalcfg.NodeName)
|
||||
err = markmasterphase.MarkMaster(client, internalcfg.NodeName, !internalcfg.NoTaintMaster)
|
||||
kubeadmutil.CheckErr(err)
|
||||
},
|
||||
}
|
||||
|
@ -252,6 +252,7 @@ func resetConfigDir(configPathDir, pkiPathDir string) {
|
||||
filesToClean := []string{
|
||||
filepath.Join(configPathDir, kubeadmconstants.AdminKubeConfigFileName),
|
||||
filepath.Join(configPathDir, kubeadmconstants.KubeletKubeConfigFileName),
|
||||
filepath.Join(configPathDir, kubeadmconstants.KubeletBootstrapKubeConfigFileName),
|
||||
filepath.Join(configPathDir, kubeadmconstants.ControllerManagerKubeConfigFileName),
|
||||
filepath.Join(configPathDir, kubeadmconstants.SchedulerKubeConfigFileName),
|
||||
}
|
||||
|
@ -32,9 +32,13 @@ import (
|
||||
)
|
||||
|
||||
// MarkMaster taints the master and sets the master label
|
||||
func MarkMaster(client clientset.Interface, masterName string) error {
|
||||
func MarkMaster(client clientset.Interface, masterName string, taint bool) error {
|
||||
|
||||
fmt.Printf("[markmaster] Will mark node %s as master by adding a label and a taint\n", masterName)
|
||||
if taint {
|
||||
fmt.Printf("[markmaster] Will mark node %s as master by adding a label and a taint\n", masterName)
|
||||
} else {
|
||||
fmt.Printf("[markmaster] Will mark node %s as master by adding a label\n", masterName)
|
||||
}
|
||||
|
||||
// Loop on every falsy return. Return with an error if raised. Exit successfully if true is returned.
|
||||
return wait.Poll(kubeadmconstants.APICallRetryInterval, kubeadmconstants.MarkMasterTimeout, func() (bool, error) {
|
||||
@ -56,7 +60,7 @@ func MarkMaster(client clientset.Interface, masterName string) error {
|
||||
}
|
||||
|
||||
// The master node should be tainted and labelled accordingly
|
||||
markMasterNode(n)
|
||||
markMasterNode(n, taint)
|
||||
|
||||
newData, err := json.Marshal(n)
|
||||
if err != nil {
|
||||
@ -76,15 +80,23 @@ func MarkMaster(client clientset.Interface, masterName string) error {
|
||||
return false, err
|
||||
}
|
||||
|
||||
fmt.Printf("[markmaster] Master %s tainted and labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "")
|
||||
if taint {
|
||||
fmt.Printf("[markmaster] Master %s tainted and labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "")
|
||||
} else {
|
||||
fmt.Printf("[markmaster] Master %s labelled with key/value: %s=%q\n", masterName, kubeadmconstants.LabelNodeRoleMaster, "")
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func markMasterNode(n *v1.Node) {
|
||||
func markMasterNode(n *v1.Node, taint bool) {
|
||||
n.ObjectMeta.Labels[kubeadmconstants.LabelNodeRoleMaster] = ""
|
||||
addTaintIfNotExists(n, kubeadmconstants.MasterTaint)
|
||||
if taint {
|
||||
addTaintIfNotExists(n, kubeadmconstants.MasterTaint)
|
||||
} else {
|
||||
delTaintIfExists(n, kubeadmconstants.MasterTaint)
|
||||
}
|
||||
}
|
||||
|
||||
func addTaintIfNotExists(n *v1.Node, t v1.Taint) {
|
||||
@ -96,3 +108,14 @@ func addTaintIfNotExists(n *v1.Node, t v1.Taint) {
|
||||
|
||||
n.Spec.Taints = append(n.Spec.Taints, t)
|
||||
}
|
||||
|
||||
func delTaintIfExists(n *v1.Node, t v1.Taint) {
|
||||
var taints []v1.Taint
|
||||
for _, taint := range n.Spec.Taints {
|
||||
if taint == t {
|
||||
continue
|
||||
}
|
||||
taints = append(taints, t)
|
||||
}
|
||||
n.Spec.Taints = taints
|
||||
}
|
||||
|
@ -43,32 +43,51 @@ func TestMarkMaster(t *testing.T) {
|
||||
name string
|
||||
existingLabel string
|
||||
existingTaint *v1.Taint
|
||||
wantTaint bool
|
||||
expectedPatch string
|
||||
}{
|
||||
{
|
||||
"master label and taint missing",
|
||||
"",
|
||||
nil,
|
||||
true,
|
||||
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}},\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}",
|
||||
},
|
||||
{
|
||||
"master label and taint missing but taint not wanted",
|
||||
"",
|
||||
nil,
|
||||
false,
|
||||
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}}}",
|
||||
},
|
||||
{
|
||||
"master label missing",
|
||||
"",
|
||||
&kubeadmconstants.MasterTaint,
|
||||
true,
|
||||
"{\"metadata\":{\"labels\":{\"node-role.kubernetes.io/master\":\"\"}}}",
|
||||
},
|
||||
{
|
||||
"master taint missing",
|
||||
kubeadmconstants.LabelNodeRoleMaster,
|
||||
nil,
|
||||
true,
|
||||
"{\"spec\":{\"taints\":[{\"effect\":\"NoSchedule\",\"key\":\"node-role.kubernetes.io/master\"}]}}",
|
||||
},
|
||||
{
|
||||
"nothing missing",
|
||||
kubeadmconstants.LabelNodeRoleMaster,
|
||||
&kubeadmconstants.MasterTaint,
|
||||
true,
|
||||
"{}",
|
||||
},
|
||||
{
|
||||
"nothing missing but taint unwanted",
|
||||
kubeadmconstants.LabelNodeRoleMaster,
|
||||
&kubeadmconstants.MasterTaint,
|
||||
false,
|
||||
"{\"spec\":{\"taints\":null}}",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
@ -125,7 +144,7 @@ func TestMarkMaster(t *testing.T) {
|
||||
t.Fatalf("MarkMaster(%s): unexpected error building clientset: %v", tc.name, err)
|
||||
}
|
||||
|
||||
err = MarkMaster(cs, hostname)
|
||||
err = MarkMaster(cs, hostname, tc.wantTaint)
|
||||
if err != nil {
|
||||
t.Errorf("MarkMaster(%s) returned unexpected error: %v", tc.name, err)
|
||||
}
|
||||
|
@ -530,7 +530,7 @@ func (eac ExtraArgsCheck) Check() (warnings, errors []error) {
|
||||
}
|
||||
if len(eac.ControllerManagerExtraArgs) > 0 {
|
||||
flags := pflag.NewFlagSet("", pflag.ContinueOnError)
|
||||
s := cmoptions.NewCMServer()
|
||||
s := cmoptions.NewKubeControllerManagerOptions()
|
||||
s.AddFlags(flags, []string{}, []string{})
|
||||
warnings = append(warnings, argsCheck("kube-controller-manager", eac.ControllerManagerExtraArgs, flags)...)
|
||||
}
|
||||
@ -975,6 +975,7 @@ func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.NodeConfigura
|
||||
DirAvailableCheck{Path: filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.ManifestsSubDirName)},
|
||||
FileAvailableCheck{Path: cfg.CACertPath},
|
||||
FileAvailableCheck{Path: filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletKubeConfigFileName)},
|
||||
FileAvailableCheck{Path: filepath.Join(kubeadmconstants.KubernetesDir, kubeadmconstants.KubeletBootstrapKubeConfigFileName)},
|
||||
}
|
||||
if useCRI {
|
||||
checks = append(checks, CRICheck{socket: criSocket, exec: execer})
|
||||
@ -999,19 +1000,27 @@ func RunJoinNodeChecks(execer utilsexec.Interface, cfg *kubeadmapi.NodeConfigura
|
||||
criCtlChecker)
|
||||
}
|
||||
|
||||
var bridgenf6Check Checker
|
||||
for _, server := range cfg.DiscoveryTokenAPIServers {
|
||||
ipstr, _, err := net.SplitHostPort(server)
|
||||
if err == nil {
|
||||
if ip := net.ParseIP(ipstr); ip != nil {
|
||||
if ip.To4() == nil && ip.To16() != nil {
|
||||
checks = append(checks,
|
||||
FileContentCheck{Path: bridgenf6, Content: []byte{'1'}},
|
||||
)
|
||||
break // Ensure that check is added only once
|
||||
checks = append(checks,
|
||||
HTTPProxyCheck{Proto: "https", Host: ipstr},
|
||||
)
|
||||
if bridgenf6Check == nil {
|
||||
if ip := net.ParseIP(ipstr); ip != nil {
|
||||
if ip.To4() == nil && ip.To16() != nil {
|
||||
// This check should be added only once
|
||||
bridgenf6Check = FileContentCheck{Path: bridgenf6, Content: []byte{'1'}}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if bridgenf6Check != nil {
|
||||
checks = append(checks, bridgenf6Check)
|
||||
}
|
||||
|
||||
return RunChecks(checks, os.Stderr, ignorePreflightErrors)
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ DOCKER_OPTS=${DOCKER_OPTS:-""}
|
||||
DOCKER=(docker ${DOCKER_OPTS})
|
||||
DOCKERIZE_KUBELET=${DOCKERIZE_KUBELET:-""}
|
||||
ALLOW_PRIVILEGED=${ALLOW_PRIVILEGED:-""}
|
||||
ALLOW_SECURITY_CONTEXT=${ALLOW_SECURITY_CONTEXT:-""}
|
||||
DENY_SECURITY_CONTEXT_ADMISSION=${DENY_SECURITY_CONTEXT_ADMISSION:-""}
|
||||
PSP_ADMISSION=${PSP_ADMISSION:-""}
|
||||
NODE_ADMISSION=${NODE_ADMISSION:-""}
|
||||
RUNTIME_CONFIG=${RUNTIME_CONFIG:-""}
|
||||
@ -418,7 +418,7 @@ function set_service_accounts {
|
||||
|
||||
function start_apiserver {
|
||||
security_admission=""
|
||||
if [[ -z "${ALLOW_SECURITY_CONTEXT}" ]]; then
|
||||
if [[ -n "${DENY_SECURITY_CONTEXT_ADMISSION}" ]]; then
|
||||
security_admission=",SecurityContextDeny"
|
||||
fi
|
||||
if [[ -n "${PSP_ADMISSION}" ]]; then
|
||||
|
@ -3503,10 +3503,8 @@ run_kubectl_local_proxy_tests() {
|
||||
|
||||
kube::log::status "Testing kubectl local proxy"
|
||||
|
||||
# Make sure the UI can be proxied
|
||||
start-proxy
|
||||
check-curl-proxy-code /ui 307
|
||||
check-curl-proxy-code /api/ui 404
|
||||
check-curl-proxy-code /api/kubernetes 404
|
||||
check-curl-proxy-code /api/v1/namespaces 200
|
||||
if kube::test::if_supports_resource "${metrics}" ; then
|
||||
check-curl-proxy-code /metrics 200
|
||||
@ -3524,7 +3522,8 @@ run_kubectl_local_proxy_tests() {
|
||||
|
||||
# Custom paths let you see everything.
|
||||
start-proxy /custom
|
||||
check-curl-proxy-code /custom/ui 307
|
||||
check-curl-proxy-code /custom/api/kubernetes 404
|
||||
check-curl-proxy-code /custom/api/v1/namespaces 200
|
||||
if kube::test::if_supports_resource "${metrics}" ; then
|
||||
check-curl-proxy-code /custom/metrics 200
|
||||
fi
|
||||
|
@ -72,7 +72,7 @@ INTERNAL_DIRS_CSV=$(IFS=',';echo "${INTERNAL_DIRS[*]// /,}";IFS=$)
|
||||
# This can be called with one flag, --verify-only, so it works for both the
|
||||
# update- and verify- scripts.
|
||||
${clientgen} --input-base="k8s.io/kubernetes/pkg/apis" --input="${INTERNAL_DIRS_CSV}" "$@"
|
||||
${clientgen} --output-base "${KUBE_ROOT}/vendor" --output-package="k8s.io/client-go" --clientset-name="kubernetes" --input-base="k8s.io/kubernetes/vendor/k8s.io/api" --input="${GV_DIRS_CSV}" "$@"
|
||||
${clientgen} --output-base "${KUBE_ROOT}/vendor" --output-package="k8s.io/client-go" --clientset-name="kubernetes" --input-base="k8s.io/kubernetes/vendor/k8s.io/api" --input="${GV_DIRS_CSV}" --go-header-file ${KUBE_ROOT}/hack/boilerplate/boilerplate.go.txt "$@"
|
||||
|
||||
listergen_internal_apis=(
|
||||
$(
|
||||
@ -91,7 +91,7 @@ $(
|
||||
)
|
||||
)
|
||||
listergen_external_apis_csv=$(IFS=,; echo "${listergen_external_apis[*]}")
|
||||
${listergen} --output-base "${KUBE_ROOT}/vendor" --output-package "k8s.io/client-go/listers" --input-dirs "${listergen_external_apis_csv}" "$@"
|
||||
${listergen} --output-base "${KUBE_ROOT}/vendor" --output-package "k8s.io/client-go/listers" --input-dirs "${listergen_external_apis_csv}" --go-header-file ${KUBE_ROOT}/hack/boilerplate/boilerplate.go.txt "$@"
|
||||
|
||||
informergen_internal_apis=(
|
||||
$(
|
||||
@ -105,6 +105,7 @@ ${informergen} \
|
||||
--input-dirs "${informergen_internal_apis_csv}" \
|
||||
--internal-clientset-package k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset \
|
||||
--listers-package k8s.io/kubernetes/pkg/client/listers \
|
||||
--go-header-file ${KUBE_ROOT}/hack/boilerplate/boilerplate.go.txt \
|
||||
"$@"
|
||||
|
||||
informergen_external_apis=(
|
||||
@ -124,6 +125,7 @@ ${informergen} \
|
||||
--input-dirs "${informergen_external_apis_csv}" \
|
||||
--versioned-clientset-package k8s.io/client-go/kubernetes \
|
||||
--listers-package k8s.io/client-go/listers \
|
||||
--go-header-file ${KUBE_ROOT}/hack/boilerplate/boilerplate.go.txt \
|
||||
"$@"
|
||||
|
||||
# You may add additional calls of code generators like set-gen above.
|
||||
|
@ -92,5 +92,6 @@ PATH="${KUBE_ROOT}/_output/bin:${PATH}" \
|
||||
"${gotoprotobuf}" \
|
||||
--proto-import="${KUBE_ROOT}/vendor" \
|
||||
--proto-import="${KUBE_ROOT}/third_party/protobuf" \
|
||||
--packages=$(IFS=, ; echo "${PACKAGES[*]}")
|
||||
--packages=$(IFS=, ; echo "${PACKAGES[*]}") \
|
||||
--go-header-file ${KUBE_ROOT}/hack/boilerplate/boilerplate.go.txt \
|
||||
"$@"
|
||||
|
@ -27,7 +27,6 @@ reviewers:
|
||||
- ncdc
|
||||
- tallclair
|
||||
- eparis
|
||||
- timothysc
|
||||
- piosz
|
||||
- jsafrane
|
||||
- dims
|
||||
|
@ -33,7 +33,6 @@ reviewers:
|
||||
- yifan-gu
|
||||
- eparis
|
||||
- mwielgus
|
||||
- timothysc
|
||||
- feiskyer
|
||||
- soltysh
|
||||
- piosz
|
||||
|
@ -9,7 +9,6 @@ reviewers:
|
||||
- saad-ali
|
||||
- ncdc
|
||||
- tallclair
|
||||
- timothysc
|
||||
- dims
|
||||
- errordeveloper
|
||||
- mml
|
||||
|
@ -4,7 +4,6 @@ reviewers:
|
||||
- wojtek-t
|
||||
- deads2k
|
||||
- sttts
|
||||
- timothysc
|
||||
- mbohlool
|
||||
- jianhuiz
|
||||
- enj
|
||||
|
@ -9,7 +9,6 @@ reviewers:
|
||||
- erictune
|
||||
- sttts
|
||||
- ncdc
|
||||
- timothysc
|
||||
- dims
|
||||
- mml
|
||||
- mbohlool
|
||||
|
@ -8,7 +8,6 @@ reviewers:
|
||||
- erictune
|
||||
- sttts
|
||||
- ncdc
|
||||
- timothysc
|
||||
- piosz
|
||||
- dims
|
||||
- errordeveloper
|
||||
|
@ -9,7 +9,6 @@ reviewers:
|
||||
- sttts
|
||||
- saad-ali
|
||||
- ncdc
|
||||
- timothysc
|
||||
- soltysh
|
||||
- dims
|
||||
- errordeveloper
|
||||
|
@ -6,7 +6,6 @@ reviewers:
|
||||
- caesarxuchao
|
||||
- liggitt
|
||||
- sttts
|
||||
- timothysc
|
||||
- dims
|
||||
- errordeveloper
|
||||
- mbohlool
|
||||
|
@ -21,7 +21,6 @@ reviewers:
|
||||
- ncdc
|
||||
- yifan-gu
|
||||
- mwielgus
|
||||
- timothysc
|
||||
- feiskyer
|
||||
- dims
|
||||
- errordeveloper
|
||||
|
@ -37,7 +37,6 @@ reviewers:
|
||||
- yifan-gu
|
||||
- eparis
|
||||
- mwielgus
|
||||
- timothysc
|
||||
- soltysh
|
||||
- piosz
|
||||
- jsafrane
|
||||
|
@ -27,7 +27,6 @@ reviewers:
|
||||
- ncdc
|
||||
- tallclair
|
||||
- eparis
|
||||
- timothysc
|
||||
- piosz
|
||||
- jsafrane
|
||||
- dims
|
||||
|
@ -19,7 +19,6 @@ reviewers:
|
||||
- ncdc
|
||||
- tallclair
|
||||
- mwielgus
|
||||
- timothysc
|
||||
- soltysh
|
||||
- piosz
|
||||
- dims
|
||||
|
@ -5,7 +5,6 @@ reviewers:
|
||||
- deads2k
|
||||
- sttts
|
||||
- ncdc
|
||||
- timothysc
|
||||
- dims
|
||||
- krousey
|
||||
- mml
|
||||
|
@ -1,6 +1,7 @@
|
||||
approvers:
|
||||
- justinsb
|
||||
- zmerlynn
|
||||
- gnufied
|
||||
reviewers:
|
||||
- gnufied
|
||||
- jsafrane
|
||||
|
@ -22,10 +22,8 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"sync"
|
||||
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
@ -60,15 +58,11 @@ type BlobDiskController struct {
|
||||
}
|
||||
|
||||
var (
|
||||
defaultContainerName = ""
|
||||
storageAccountNamePrefix = ""
|
||||
storageAccountNameMatch = ""
|
||||
accountsLock = &sync.Mutex{}
|
||||
accountsLock = &sync.Mutex{}
|
||||
)
|
||||
|
||||
func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error) {
|
||||
c := BlobDiskController{common: common}
|
||||
c.setUniqueStrings()
|
||||
|
||||
// get accounts
|
||||
accounts, err := c.getAllStorageAccounts()
|
||||
@ -84,46 +78,26 @@ func newBlobDiskController(common *controllerCommon) (*BlobDiskController, error
|
||||
// CreateVolume creates a VHD blob in a storage account that has storageType and location using the given storage account.
|
||||
// If no storage account is given, search all the storage accounts associated with the resource group and pick one that
|
||||
// fits storage type and location.
|
||||
func (c *BlobDiskController) CreateVolume(name, storageAccount, storageAccountType, location string, requestGB int) (string, string, int, error) {
|
||||
var err error
|
||||
accounts := []accountWithLocation{}
|
||||
if len(storageAccount) > 0 {
|
||||
accounts = append(accounts, accountWithLocation{Name: storageAccount})
|
||||
} else {
|
||||
// find a storage account
|
||||
accounts, err = c.common.cloud.getStorageAccounts(storageAccountType, location)
|
||||
if err != nil {
|
||||
// TODO: create a storage account and container
|
||||
return "", "", 0, err
|
||||
}
|
||||
func (c *BlobDiskController) CreateVolume(blobName, accountName, accountType, location string, requestGB int) (string, string, int, error) {
|
||||
account, key, err := c.common.cloud.ensureStorageAccount(accountName, accountType, location, dedicatedDiskAccountNamePrefix)
|
||||
if err != nil {
|
||||
return "", "", 0, fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
for _, account := range accounts {
|
||||
glog.V(4).Infof("account %s type %s location %s", account.Name, account.StorageType, account.Location)
|
||||
if (storageAccountType == "" || account.StorageType == storageAccountType) && (location == "" || account.Location == location) || len(storageAccount) > 0 {
|
||||
// find the access key with this account
|
||||
key, err := c.common.cloud.getStorageAccesskey(account.Name)
|
||||
if err != nil {
|
||||
glog.V(2).Infof("no key found for storage account %s", account.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
client, err := azstorage.NewBasicClientOnSovereignCloud(account.Name, key, c.common.cloud.Environment)
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
blobClient := client.GetBlobService()
|
||||
|
||||
// create a page blob in this account's vhd container
|
||||
diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account.Name, name, vhdContainerName, int64(requestGB))
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
|
||||
return diskName, diskURI, requestGB, err
|
||||
}
|
||||
client, err := azstorage.NewBasicClientOnSovereignCloud(account, key, c.common.cloud.Environment)
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
return "", "", 0, fmt.Errorf("failed to find a matching storage account")
|
||||
blobClient := client.GetBlobService()
|
||||
|
||||
// create a page blob in this account's vhd container
|
||||
diskName, diskURI, err := c.createVHDBlobDisk(blobClient, account, blobName, vhdContainerName, int64(requestGB))
|
||||
if err != nil {
|
||||
return "", "", 0, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - created vhd blob uri: %s", diskURI)
|
||||
return diskName, diskURI, requestGB, err
|
||||
}
|
||||
|
||||
// DeleteVolume deletes a VHD blob
|
||||
@ -252,7 +226,7 @@ func (c *BlobDiskController) CreateBlobDisk(dataDiskName string, storageAccountT
|
||||
return "", err
|
||||
}
|
||||
|
||||
_, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, defaultContainerName, int64(sizeGB))
|
||||
_, diskURI, err := c.createVHDBlobDisk(blobClient, storageAccountName, dataDiskName, vhdContainerName, int64(sizeGB))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@ -281,9 +255,9 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, defaultContainerName)
|
||||
glog.V(4).Infof("azureDisk - About to delete vhd file %s on storage account %s container %s", vhdName, storageAccountName, vhdContainerName)
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
blob := container.GetBlobReference(vhdName)
|
||||
_, err = blob.DeleteIfExists(nil)
|
||||
|
||||
@ -299,19 +273,6 @@ func (c *BlobDiskController) DeleteBlobDisk(diskURI string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
//Sets unique strings to be used as accountnames && || blob containers names
|
||||
func (c *BlobDiskController) setUniqueStrings() {
|
||||
uniqueString := c.common.resourceGroup + c.common.location + c.common.subscriptionID
|
||||
hash := MakeCRC32(uniqueString)
|
||||
//used to generate a unique container name used by this cluster PVC
|
||||
defaultContainerName = hash
|
||||
|
||||
storageAccountNamePrefix = fmt.Sprintf(storageAccountNameTemplate, hash)
|
||||
// Used to filter relevant accounts (accounts used by shared PVC)
|
||||
storageAccountNameMatch = storageAccountNamePrefix
|
||||
// Used as a template to create new names for relevant accounts
|
||||
storageAccountNamePrefix = storageAccountNamePrefix + "%s"
|
||||
}
|
||||
func (c *BlobDiskController) getStorageAccountKey(SAName string) (string, error) {
|
||||
if account, exists := c.accounts[SAName]; exists && account.key != "" {
|
||||
return c.accounts[SAName].key, nil
|
||||
@ -426,13 +387,13 @@ func (c *BlobDiskController) ensureDefaultContainer(storageAccountName string) e
|
||||
return err
|
||||
}
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
bCreated, err := container.CreateIfNotExists(&azstorage.CreateContainerOptions{Access: azstorage.ContainerAccessTypePrivate})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if bCreated {
|
||||
glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, defaultContainerName)
|
||||
glog.V(2).Infof("azureDisk - storage account:%s had no default container(%s) and it was created \n", storageAccountName, vhdContainerName)
|
||||
}
|
||||
|
||||
// flag so we no longer have to check on ARM
|
||||
@ -459,7 +420,7 @@ func (c *BlobDiskController) getDiskCount(SAName string) (int, error) {
|
||||
}
|
||||
params := azstorage.ListBlobsParameters{}
|
||||
|
||||
container := blobSvc.GetContainerReference(defaultContainerName)
|
||||
container := blobSvc.GetContainerReference(vhdContainerName)
|
||||
response, err := container.ListBlobs(params)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@ -481,13 +442,13 @@ func (c *BlobDiskController) getAllStorageAccounts() (map[string]*storageAccount
|
||||
|
||||
accounts := make(map[string]*storageAccountState)
|
||||
for _, v := range *accountListResult.Value {
|
||||
if strings.Index(*v.Name, storageAccountNameMatch) != 0 {
|
||||
continue
|
||||
}
|
||||
if v.Name == nil || v.Sku == nil {
|
||||
glog.Info("azureDisk - accountListResult Name or Sku is nil")
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(*v.Name, sharedDiskAccountNamePrefix) {
|
||||
continue
|
||||
}
|
||||
glog.Infof("azureDisk - identified account %s as part of shared PVC accounts", *v.Name)
|
||||
|
||||
sastate := &storageAccountState{
|
||||
@ -555,7 +516,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
countAccounts := 0 // account of this type.
|
||||
for _, v := range c.accounts {
|
||||
// filter out any stand-alone disks/accounts
|
||||
if strings.Index(v.name, storageAccountNameMatch) != 0 {
|
||||
if !strings.HasPrefix(v.name, sharedDiskAccountNamePrefix) {
|
||||
continue
|
||||
}
|
||||
|
||||
@ -587,7 +548,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
// if we failed to find storageaccount
|
||||
if SAName == "" {
|
||||
glog.V(2).Infof("azureDisk - failed to identify a suitable account for new disk and will attempt to create new account")
|
||||
SAName = getAccountNameForNum(c.getNextAccountNum())
|
||||
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -603,7 +564,7 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
// avg are not create and we should create more accounts if we can
|
||||
if aboveAvg && countAccounts < maxStorageAccounts {
|
||||
glog.V(2).Infof("azureDisk - shared storageAccounts utilization(%v) > grow-at-avg-utilization (%v). New storage account will be created", avgUtilization, storageAccountUtilizationBeforeGrowing)
|
||||
SAName = getAccountNameForNum(c.getNextAccountNum())
|
||||
SAName = generateStorageAccountName(sharedDiskAccountNamePrefix)
|
||||
err := c.createStorageAccount(SAName, storageAccountType, c.common.location, true)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@ -620,22 +581,6 @@ func (c *BlobDiskController) findSANameForDisk(storageAccountType storage.SkuNam
|
||||
// we found a storage accounts && [ avg are ok || we reached max sa count ]
|
||||
return SAName, nil
|
||||
}
|
||||
func (c *BlobDiskController) getNextAccountNum() int {
|
||||
max := 0
|
||||
|
||||
for k := range c.accounts {
|
||||
// filter out accounts that are for standalone
|
||||
if strings.Index(k, storageAccountNameMatch) != 0 {
|
||||
continue
|
||||
}
|
||||
num := getAccountNumFromName(k)
|
||||
if num > max {
|
||||
max = num
|
||||
}
|
||||
}
|
||||
|
||||
return max + 1
|
||||
}
|
||||
|
||||
//Gets storage account exist, provisionStatus, Error if any
|
||||
func (c *BlobDiskController) getStorageAccountState(storageAccountName string) (bool, storage.ProvisioningState, error) {
|
||||
@ -655,27 +600,6 @@ func (c *BlobDiskController) addAccountState(key string, state *storageAccountSt
|
||||
}
|
||||
}
|
||||
|
||||
// pads account num with zeros as needed
|
||||
func getAccountNameForNum(num int) string {
|
||||
sNum := strconv.Itoa(num)
|
||||
missingZeros := 3 - len(sNum)
|
||||
strZero := ""
|
||||
for missingZeros > 0 {
|
||||
strZero = strZero + "0"
|
||||
missingZeros = missingZeros - 1
|
||||
}
|
||||
|
||||
sNum = strZero + sNum
|
||||
return fmt.Sprintf(storageAccountNamePrefix, sNum)
|
||||
}
|
||||
|
||||
func getAccountNumFromName(accountName string) int {
|
||||
nameLen := len(accountName)
|
||||
num, _ := strconv.Atoi(accountName[nameLen-3:])
|
||||
|
||||
return num
|
||||
}
|
||||
|
||||
func createVHDHeader(size uint64) ([]byte, error) {
|
||||
h := vhd.CreateFixedHeader(size, &vhd.VHDOptions{})
|
||||
b := new(bytes.Buffer)
|
||||
|
@ -20,66 +20,28 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultStorageAccountType = string(storage.StandardLRS)
|
||||
fileShareAccountNamePrefix = "f"
|
||||
defaultStorageAccountType = string(storage.StandardLRS)
|
||||
fileShareAccountNamePrefix = "f"
|
||||
sharedDiskAccountNamePrefix = "ds"
|
||||
dedicatedDiskAccountNamePrefix = "dd"
|
||||
)
|
||||
|
||||
// CreateFileShare creates a file share, using a matching storage account
|
||||
func (az *Cloud) CreateFileShare(shareName, accountName, accountType, location string, requestGiB int) (string, string, error) {
|
||||
if len(accountName) == 0 {
|
||||
// find a storage account that matches accountType
|
||||
accounts, err := az.getStorageAccounts(accountType, location)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err)
|
||||
}
|
||||
|
||||
if len(accounts) > 0 {
|
||||
accountName = accounts[0].Name
|
||||
glog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location)
|
||||
}
|
||||
|
||||
if len(accountName) == 0 {
|
||||
// not found a matching account, now create a new account in current resource group
|
||||
accountName = generateStorageAccountName(fileShareAccountNamePrefix)
|
||||
if location == "" {
|
||||
location = az.Location
|
||||
}
|
||||
if accountType == "" {
|
||||
accountType = defaultStorageAccountType
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azureFile - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s",
|
||||
accountName, az.ResourceGroup, location, accountType)
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
Tags: &map[string]*string{"created-by": to.StringPtr("azure-file")},
|
||||
Location: &location}
|
||||
cancel := make(chan struct{})
|
||||
|
||||
_, errchan := az.StorageAccountClient.Create(az.ResourceGroup, accountName, cp, cancel)
|
||||
err := <-errchan
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find the access key with this account
|
||||
accountKey, err := az.getStorageAccesskey(accountName)
|
||||
account, key, err := az.ensureStorageAccount(accountName, accountType, location, fileShareAccountNamePrefix)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
|
||||
if err := az.createFileShare(accountName, accountKey, shareName, requestGiB); err != nil {
|
||||
return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareName, accountName, err)
|
||||
if err := az.createFileShare(account, key, shareName, requestGiB); err != nil {
|
||||
return "", "", fmt.Errorf("failed to create share %s in account %s: %v", shareName, account, err)
|
||||
}
|
||||
glog.V(4).Infof("created share %s in account %s", shareName, accountName)
|
||||
return accountName, accountKey, nil
|
||||
glog.V(4).Infof("created share %s in account %s", shareName, account)
|
||||
return account, key, nil
|
||||
}
|
||||
|
||||
// DeleteFileShare deletes a file share using storage account name and key
|
||||
|
@ -19,13 +19,17 @@ package azure
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/arm/storage"
|
||||
"github.com/Azure/go-autorest/autorest/to"
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
type accountWithLocation struct {
|
||||
Name, StorageType, Location string
|
||||
}
|
||||
|
||||
// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType
|
||||
// getStorageAccounts gets name, type, location of all storage accounts in a resource group which matches matchingAccountType, matchingLocation
|
||||
func (az *Cloud) getStorageAccounts(matchingAccountType, matchingLocation string) ([]accountWithLocation, error) {
|
||||
result, err := az.StorageAccountClient.ListByResourceGroup(az.ResourceGroup)
|
||||
if err != nil {
|
||||
@ -75,3 +79,52 @@ func (az *Cloud) getStorageAccesskey(account string) (string, error) {
|
||||
}
|
||||
return "", fmt.Errorf("no valid keys")
|
||||
}
|
||||
|
||||
// ensureStorageAccount search storage account, create one storage account(with genAccountNamePrefix) if not found, return accountName, accountKey
|
||||
func (az *Cloud) ensureStorageAccount(accountName, accountType, location, genAccountNamePrefix string) (string, string, error) {
|
||||
if len(accountName) == 0 {
|
||||
// find a storage account that matches accountType
|
||||
accounts, err := az.getStorageAccounts(accountType, location)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not list storage accounts for account type %s: %v", accountType, err)
|
||||
}
|
||||
|
||||
if len(accounts) > 0 {
|
||||
accountName = accounts[0].Name
|
||||
glog.V(4).Infof("found a matching account %s type %s location %s", accounts[0].Name, accounts[0].StorageType, accounts[0].Location)
|
||||
}
|
||||
|
||||
if len(accountName) == 0 {
|
||||
// not found a matching account, now create a new account in current resource group
|
||||
accountName = generateStorageAccountName(genAccountNamePrefix)
|
||||
if location == "" {
|
||||
location = az.Location
|
||||
}
|
||||
if accountType == "" {
|
||||
accountType = defaultStorageAccountType
|
||||
}
|
||||
|
||||
glog.V(2).Infof("azure - no matching account found, begin to create a new account %s in resource group %s, location: %s, accountType: %s",
|
||||
accountName, az.ResourceGroup, location, accountType)
|
||||
cp := storage.AccountCreateParameters{
|
||||
Sku: &storage.Sku{Name: storage.SkuName(accountType)},
|
||||
Tags: &map[string]*string{"created-by": to.StringPtr("azure")},
|
||||
Location: &location}
|
||||
cancel := make(chan struct{})
|
||||
|
||||
_, errchan := az.StorageAccountClient.Create(az.ResourceGroup, accountName, cp, cancel)
|
||||
err := <-errchan
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf(fmt.Sprintf("Failed to create storage account %s, error: %s", accountName, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// find the access key with this account
|
||||
accountKey, err := az.getStorageAccesskey(accountName)
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("could not get storage key for storage account %s: %v", accountName, err)
|
||||
}
|
||||
|
||||
return accountName, accountKey, nil
|
||||
}
|
||||
|
@ -341,19 +341,17 @@ func (o *BuiltInAuthenticationOptions) ApplyTo(c *genericapiserver.Config) error
|
||||
|
||||
var err error
|
||||
if o.ClientCert != nil {
|
||||
c, err = c.ApplyClientCert(o.ClientCert.ClientCA)
|
||||
if err != nil {
|
||||
if err = c.Authentication.ApplyClientCert(o.ClientCert.ClientCA, c.SecureServing); err != nil {
|
||||
return fmt.Errorf("unable to load client CA file: %v", err)
|
||||
}
|
||||
}
|
||||
if o.RequestHeader != nil {
|
||||
c, err = c.ApplyClientCert(o.RequestHeader.ClientCAFile)
|
||||
if err != nil {
|
||||
if err = c.Authentication.ApplyClientCert(o.RequestHeader.ClientCAFile, c.SecureServing); err != nil {
|
||||
return fmt.Errorf("unable to load client CA file: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
c.SupportsBasicAuth = o.PasswordFile != nil && len(o.PasswordFile.BasicAuthFile) > 0
|
||||
c.Authentication.SupportsBasicAuth = o.PasswordFile != nil && len(o.PasswordFile.BasicAuthFile) > 0
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -33,15 +33,15 @@ import (
|
||||
|
||||
// NewSecureServingOptions gives default values for the kube-apiserver which are not the options wanted by
|
||||
// "normal" API servers running on the platform
|
||||
func NewSecureServingOptions() *genericoptions.SecureServingOptions {
|
||||
return &genericoptions.SecureServingOptions{
|
||||
func NewSecureServingOptions() *genericoptions.SecureServingOptionsWithLoopback {
|
||||
return genericoptions.WithLoopback(&genericoptions.SecureServingOptions{
|
||||
BindAddress: net.ParseIP("0.0.0.0"),
|
||||
BindPort: 6443,
|
||||
ServerCert: genericoptions.GeneratableKeyCert{
|
||||
PairName: "apiserver",
|
||||
CertDirectory: "/var/run/kubernetes",
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// DefaultAdvertiseAddress sets the field AdvertiseAddress if
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
"net/url"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
@ -51,15 +52,32 @@ type PortForwardOptions struct {
|
||||
}
|
||||
|
||||
var (
|
||||
portforwardLong = templates.LongDesc(i18n.T(`
|
||||
Forward one or more local ports to a pod.
|
||||
|
||||
Use resource type/name such as deployment/mydeployment to select a pod. Resource type defaults to 'pod' if omitted.
|
||||
|
||||
If there are multiple pods matching the criteria, a pod will be selected automatically. The
|
||||
forwarding session ends when the selected pod terminates, and rerun of the command is needed
|
||||
to resume forwarding.`))
|
||||
|
||||
portforwardExample = templates.Examples(i18n.T(`
|
||||
# Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in the pod
|
||||
kubectl port-forward mypod 5000 6000
|
||||
kubectl port-forward pod/mypod 5000 6000
|
||||
|
||||
# Listen on ports 5000 and 6000 locally, forwarding data to/from ports 5000 and 6000 in a pod selected by the deployment
|
||||
kubectl port-forward deployment/mydeployment 5000 6000
|
||||
|
||||
# Listen on port 8888 locally, forwarding to 5000 in the pod
|
||||
kubectl port-forward mypod 8888:5000
|
||||
kubectl port-forward pod/mypod 8888:5000
|
||||
|
||||
# Listen on a random port locally, forwarding to 5000 in the pod
|
||||
kubectl port-forward mypod :5000`))
|
||||
kubectl port-forward pod/mypod :5000`))
|
||||
)
|
||||
|
||||
const (
|
||||
// Amount of time to wait until at least one pod is running
|
||||
defaultPodPortForwardWaitTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
func NewCmdPortForward(f cmdutil.Factory, cmdOut, cmdErr io.Writer) *cobra.Command {
|
||||
@ -70,10 +88,10 @@ func NewCmdPortForward(f cmdutil.Factory, cmdOut, cmdErr io.Writer) *cobra.Comma
|
||||
},
|
||||
}
|
||||
cmd := &cobra.Command{
|
||||
Use: "port-forward POD [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]",
|
||||
Use: "port-forward TYPE/NAME [LOCAL_PORT:]REMOTE_PORT [...[LOCAL_PORT_N:]REMOTE_PORT_N]",
|
||||
DisableFlagsInUseLine: true,
|
||||
Short: i18n.T("Forward one or more local ports to a pod"),
|
||||
Long: "Forward one or more local ports to a pod.",
|
||||
Long: portforwardLong,
|
||||
Example: portforwardExample,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if err := opts.Complete(f, cmd, args); err != nil {
|
||||
@ -87,7 +105,7 @@ func NewCmdPortForward(f cmdutil.Factory, cmdOut, cmdErr io.Writer) *cobra.Comma
|
||||
}
|
||||
},
|
||||
}
|
||||
cmd.Flags().StringP("pod", "p", "", "Pod name")
|
||||
cmdutil.AddPodRunningTimeoutFlag(cmd, defaultPodPortForwardWaitTimeout)
|
||||
// TODO support UID
|
||||
return cmd
|
||||
}
|
||||
@ -116,17 +134,8 @@ func (f *defaultPortForwarder) ForwardPorts(method string, url *url.URL, opts Po
|
||||
// Complete completes all the required options for port-forward cmd.
|
||||
func (o *PortForwardOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, args []string) error {
|
||||
var err error
|
||||
o.PodName = cmdutil.GetFlagString(cmd, "pod")
|
||||
if len(o.PodName) == 0 && len(args) == 0 {
|
||||
return cmdutil.UsageErrorf(cmd, "POD is required for port-forward")
|
||||
}
|
||||
|
||||
if len(o.PodName) != 0 {
|
||||
printDeprecationWarning("port-forward POD", "-p POD")
|
||||
o.Ports = args
|
||||
} else {
|
||||
o.PodName = args[0]
|
||||
o.Ports = args[1:]
|
||||
if len(args) < 2 {
|
||||
return cmdutil.UsageErrorf(cmd, "TYPE/NAME and list of ports are required for port-forward")
|
||||
}
|
||||
|
||||
o.Namespace, _, err = f.DefaultNamespace()
|
||||
@ -134,6 +143,32 @@ func (o *PortForwardOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg
|
||||
return err
|
||||
}
|
||||
|
||||
builder := f.NewBuilder().
|
||||
Internal().
|
||||
ContinueOnError().
|
||||
NamespaceParam(o.Namespace).DefaultNamespace()
|
||||
|
||||
getPodTimeout, err := cmdutil.GetPodRunningTimeoutFlag(cmd)
|
||||
if err != nil {
|
||||
return cmdutil.UsageErrorf(cmd, err.Error())
|
||||
}
|
||||
|
||||
resourceName := args[0]
|
||||
builder.ResourceNames("pods", resourceName)
|
||||
|
||||
obj, err := builder.Do().Object()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
forwardablePod, err := f.AttachablePodForObject(obj, getPodTimeout)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
o.PodName = forwardablePod.Name
|
||||
o.Ports = args[1:]
|
||||
|
||||
clientset, err := f.ClientSet()
|
||||
if err != nil {
|
||||
return err
|
||||
@ -157,7 +192,7 @@ func (o *PortForwardOptions) Complete(f cmdutil.Factory, cmd *cobra.Command, arg
|
||||
// Validate validates all the required options for port-forward cmd.
|
||||
func (o PortForwardOptions) Validate() error {
|
||||
if len(o.PodName) == 0 {
|
||||
return fmt.Errorf("pod name must be specified")
|
||||
return fmt.Errorf("pod name or resource type/name must be specified")
|
||||
}
|
||||
|
||||
if len(o.Ports) < 1 {
|
||||
|
@ -70,6 +70,7 @@ func testPortForward(t *testing.T, flags map[string]string, args []string) {
|
||||
var err error
|
||||
f, tf, codec, ns := cmdtesting.NewAPIFactory()
|
||||
tf.Client = &fake.RESTClient{
|
||||
VersionedAPIPath: "/api/v1",
|
||||
GroupVersion: schema.GroupVersion{Group: ""},
|
||||
NegotiatedSerializer: ns,
|
||||
Client: fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {
|
||||
@ -131,7 +132,3 @@ func testPortForward(t *testing.T, flags map[string]string, args []string) {
|
||||
func TestPortForward(t *testing.T) {
|
||||
testPortForward(t, nil, []string{"foo", ":5000", ":1000"})
|
||||
}
|
||||
|
||||
func TestPortForwardWithPFlag(t *testing.T) {
|
||||
testPortForward(t, map[string]string{"pod": "foo"}, []string{":5000", ":1000"})
|
||||
}
|
||||
|
@ -209,10 +209,10 @@ type KubeletConfiguration struct {
|
||||
SerializeImagePulls *bool `json:"serializeImagePulls"`
|
||||
// Map of signal names to quantities that defines hard eviction thresholds. For example: {"memory.available": "300Mi"}.
|
||||
// +optional
|
||||
EvictionHard map[string]string `json:"evictionHard"`
|
||||
EvictionHard map[string]string `json:"evictionHard,omitempty"`
|
||||
// Map of signal names to quantities that defines soft eviction thresholds. For example: {"memory.available": "300Mi"}.
|
||||
// +optional
|
||||
EvictionSoft map[string]string `json:"evictionSoft"`
|
||||
EvictionSoft map[string]string `json:"evictionSoft,omitempty"`
|
||||
// Map of signal names to quantities that defines grace periods for each soft eviction signal. For example: {"memory.available": "30s"}.
|
||||
// +optional
|
||||
EvictionSoftGracePeriod map[string]string `json:"evictionSoftGracePeriod"`
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user