diff --git a/cluster/gce/config-default.sh b/cluster/gce/config-default.sh index 108250847f9..0fbd89da041 100755 --- a/cluster/gce/config-default.sh +++ b/cluster/gce/config-default.sh @@ -125,6 +125,8 @@ NODE_SCOPES="${NODE_SCOPES:-monitoring,logging-write,storage-ro}" # Extra docker options for nodes. EXTRA_DOCKER_OPTS="${EXTRA_DOCKER_OPTS:-}" +VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/etc/srv/kubernetes/kubelet-plugins/volume/exec}" + SERVICE_CLUSTER_IP_RANGE="${SERVICE_CLUSTER_IP_RANGE:-10.0.0.0/16}" # formerly PORTAL_NET ALLOCATE_NODE_CIDRS=true diff --git a/cluster/gce/config-test.sh b/cluster/gce/config-test.sh index e915ed3494c..7d83125bffe 100755 --- a/cluster/gce/config-test.sh +++ b/cluster/gce/config-test.sh @@ -175,6 +175,8 @@ CONTROLLER_MANAGER_TEST_LOG_LEVEL="${CONTROLLER_MANAGER_TEST_LOG_LEVEL:-$TEST_CL SCHEDULER_TEST_LOG_LEVEL="${SCHEDULER_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}" KUBEPROXY_TEST_LOG_LEVEL="${KUBEPROXY_TEST_LOG_LEVEL:-$TEST_CLUSTER_LOG_LEVEL}" +VOLUME_PLUGIN_DIR="${VOLUME_PLUGIN_DIR:-/etc/srv/kubernetes/kubelet-plugins/volume/exec}" + # TODO: change this and flex e2e test when default flex volume install path is changed for GCI # Set flex dir to one that's readable from controller-manager container and writable by the flex e2e test. if [[ "${MASTER_OS_DISTRIBUTION}" == "gci" ]]; then diff --git a/cluster/gce/gci/configure-helper.sh b/cluster/gce/gci/configure-helper.sh index 697b5eb48f9..bceb458fe31 100644 --- a/cluster/gce/gci/configure-helper.sh +++ b/cluster/gce/gci/configure-helper.sh @@ -1090,8 +1090,8 @@ EOF function start-kubelet { echo "Start kubelet" - local -r kubelet_cert_dir="/var/lib/kubelet/pki/" - mkdir -p "${kubelet_cert_dir}" + # TODO(#60123): The kubelet should create the cert-dir directory if it doesn't exist + mkdir -p /var/lib/kubelet/pki/ local kubelet_bin="${KUBE_HOME}/bin/kubelet" local -r version="$("${kubelet_bin}" --version=true | cut -f2 -d " ")" @@ -1109,114 +1109,9 @@ function start-kubelet { fi fi echo "Using kubelet binary at ${kubelet_bin}" - local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}" - flags+=" --allow-privileged=true" - flags+=" --cgroup-root=/" - flags+=" --cloud-provider=gce" - flags+=" --cluster-dns=${DNS_SERVER_IP}" - flags+=" --cluster-domain=${DNS_DOMAIN}" - flags+=" --pod-manifest-path=/etc/kubernetes/manifests" - flags+=" --experimental-mounter-path=${CONTAINERIZED_MOUNTER_HOME}/mounter" - flags+=" --experimental-check-node-capabilities-before-mount=true" - flags+=" --cert-dir=${kubelet_cert_dir}" - - if [[ -n "${KUBELET_PORT:-}" ]]; then - flags+=" --port=${KUBELET_PORT}" - fi - if [[ "${KUBERNETES_MASTER:-}" == "true" ]]; then - flags+=" ${MASTER_KUBELET_TEST_ARGS:-}" - flags+=" --enable-debugging-handlers=false" - flags+=" --hairpin-mode=none" - if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then - #TODO(mikedanese): allow static pods to start before creating a client - #flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig" - #flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig" - flags+=" --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig" - flags+=" --register-schedulable=false" - else - # Standalone mode (not widely used?) - flags+=" --pod-cidr=${MASTER_IP_RANGE}" - fi - else # For nodes - flags+=" ${NODE_KUBELET_TEST_ARGS:-}" - flags+=" --enable-debugging-handlers=true" - flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig" - flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig" - if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \ - [[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \ - [[ "${HAIRPIN_MODE:-}" == "none" ]]; then - flags+=" --hairpin-mode=${HAIRPIN_MODE}" - fi - flags+=" --anonymous-auth=false --authorization-mode=Webhook --client-ca-file=${CA_CERT_BUNDLE_PATH}" - fi - # Network plugin - if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then - flags+=" --cni-bin-dir=/home/kubernetes/bin" - if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then - # Calico uses CNI always. - # Keep KUBERNETES_PRIVATE_MASTER for backward compatibility. - # Note that network policy won't work for master node. - if [[ "${KUBERNETES_PRIVATE_MASTER:-}" == "true" || "${KUBERNETES_MASTER:-}" == "true" ]]; then - flags+=" --network-plugin=${NETWORK_PROVIDER}" - else - flags+=" --network-plugin=cni" - fi - else - # Otherwise use the configured value. - flags+=" --network-plugin=${NETWORK_PROVIDER}" - fi - fi - if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then - flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}" - fi - # FlexVolume plugin - if [[ -n "${VOLUME_PLUGIN_DIR:-}" ]]; then - flags+=" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}" - fi - if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]]; then - flags+=" --manifest-url=${MANIFEST_URL}" - flags+=" --manifest-url-header=${MANIFEST_URL_HEADER}" - fi - if [[ -n "${ENABLE_CUSTOM_METRICS:-}" ]]; then - flags+=" --enable-custom-metrics=${ENABLE_CUSTOM_METRICS}" - fi - local node_labels="" - if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" && "${KUBERNETES_MASTER:-}" != "true" ]]; then - # Add kube-proxy daemonset label to node to avoid situation during cluster - # upgrade/downgrade when there are two instances of kube-proxy running on a node. - node_labels="beta.kubernetes.io/kube-proxy-ds-ready=true" - fi - if [[ -n "${NODE_LABELS:-}" ]]; then - node_labels="${node_labels:+${node_labels},}${NODE_LABELS}" - fi - if [[ -n "${NON_MASTER_NODE_LABELS:-}" && "${KUBERNETES_MASTER:-}" != "true" ]]; then - node_labels="${node_labels:+${node_labels},}${NON_MASTER_NODE_LABELS}" - fi - if [[ -n "${node_labels:-}" ]]; then - flags+=" --node-labels=${node_labels}" - fi - if [[ -n "${NODE_TAINTS:-}" ]]; then - flags+=" --register-with-taints=${NODE_TAINTS}" - fi - if [[ -n "${EVICTION_HARD:-}" ]]; then - flags+=" --eviction-hard=${EVICTION_HARD}" - fi - if [[ -n "${FEATURE_GATES:-}" ]]; then - flags+=" --feature-gates=${FEATURE_GATES}" - fi - if [[ -n "${ROTATE_CERTIFICATES:-}" ]]; then - flags+=" --rotate-certificates=true" - fi - if [[ -n "${CONTAINER_RUNTIME:-}" ]]; then - flags+=" --container-runtime=${CONTAINER_RUNTIME}" - fi - if [[ -n "${CONTAINER_RUNTIME_ENDPOINT:-}" ]]; then - flags+=" --container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}" - fi - local -r kubelet_env_file="/etc/default/kubelet" - echo "KUBELET_OPTS=\"${flags}\"" > "${kubelet_env_file}" + echo "KUBELET_OPTS=\"${KUBELET_ARGS}\"" > "${kubelet_env_file}" # Write the systemd service file for kubelet. cat </etc/systemd/system/kubelet.service diff --git a/cluster/gce/util.sh b/cluster/gce/util.sh index c462c7895c7..e7d87893623 100755 --- a/cluster/gce/util.sh +++ b/cluster/gce/util.sh @@ -500,6 +500,7 @@ function write-master-env { KUBERNETES_MASTER_NAME="${MASTER_NAME}" fi + construct-kubelet-flags true build-kube-env true "${KUBE_TEMP}/master-kube-env.yaml" build-kube-master-certs "${KUBE_TEMP}/kube-master-certs.yaml" } @@ -509,9 +510,120 @@ function write-node-env { KUBERNETES_MASTER_NAME="${MASTER_NAME}" fi + construct-kubelet-flags false build-kube-env false "${KUBE_TEMP}/node-kube-env.yaml" } +# $1: if 'true', we're rendering flags for a master, else a node +function construct-kubelet-flags { + local master=$1 + local flags="${KUBELET_TEST_LOG_LEVEL:-"--v=2"} ${KUBELET_TEST_ARGS:-}" + flags+=" --allow-privileged=true" + flags+=" --cgroup-root=/" + flags+=" --cloud-provider=gce" + flags+=" --cluster-dns=${DNS_SERVER_IP}" + flags+=" --cluster-domain=${DNS_DOMAIN}" + flags+=" --pod-manifest-path=/etc/kubernetes/manifests" + # Keep in sync with CONTAINERIZED_MOUNTER_HOME in configure-helper.sh + flags+=" --experimental-mounter-path=/home/kubernetes/containerized_mounter/mounter" + flags+=" --experimental-check-node-capabilities-before-mount=true" + # Keep in sync with the mkdir command in configure-helper.sh (until the TODO is resolved) + flags+=" --cert-dir=/var/lib/kubelet/pki/" + + if [[ "${master}" == "true" ]]; then + flags+=" ${MASTER_KUBELET_TEST_ARGS:-}" + flags+=" --enable-debugging-handlers=false" + flags+=" --hairpin-mode=none" + if [[ "${REGISTER_MASTER_KUBELET:-false}" == "true" ]]; then + #TODO(mikedanese): allow static pods to start before creating a client + #flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig" + #flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig" + flags+=" --kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig" + flags+=" --register-schedulable=false" + else + # Note: Standalone mode is used by GKE + flags+=" --pod-cidr=${MASTER_IP_RANGE}" + fi + else # For nodes + flags+=" ${NODE_KUBELET_TEST_ARGS:-}" + flags+=" --enable-debugging-handlers=true" + flags+=" --bootstrap-kubeconfig=/var/lib/kubelet/bootstrap-kubeconfig" + flags+=" --kubeconfig=/var/lib/kubelet/kubeconfig" + if [[ "${HAIRPIN_MODE:-}" == "promiscuous-bridge" ]] || \ + [[ "${HAIRPIN_MODE:-}" == "hairpin-veth" ]] || \ + [[ "${HAIRPIN_MODE:-}" == "none" ]]; then + flags+=" --hairpin-mode=${HAIRPIN_MODE}" + fi + # Keep client-ca-file in sync with CA_CERT_BUNDLE_PATH in configure-helper.sh + flags+=" --anonymous-auth=false --authorization-mode=Webhook --client-ca-file=/etc/srv/kubernetes/pki/ca-certificates.crt" + fi + # Network plugin + if [[ -n "${NETWORK_PROVIDER:-}" || -n "${NETWORK_POLICY_PROVIDER:-}" ]]; then + flags+=" --cni-bin-dir=/home/kubernetes/bin" + if [[ "${NETWORK_POLICY_PROVIDER:-}" == "calico" ]]; then + # Calico uses CNI always. + # Note that network policy won't work for master node. + if [[ "${master}" == "true" ]]; then + flags+=" --network-plugin=${NETWORK_PROVIDER}" + else + flags+=" --network-plugin=cni" + fi + else + # Otherwise use the configured value. + flags+=" --network-plugin=${NETWORK_PROVIDER}" + fi + fi + if [[ -n "${NON_MASQUERADE_CIDR:-}" ]]; then + flags+=" --non-masquerade-cidr=${NON_MASQUERADE_CIDR}" + fi + flags+=" --volume-plugin-dir=${VOLUME_PLUGIN_DIR}" + # Note: ENABLE_MANIFEST_URL is used by GKE + if [[ "${ENABLE_MANIFEST_URL:-}" == "true" ]]; then + flags+=" --manifest-url=${MANIFEST_URL}" + flags+=" --manifest-url-header=${MANIFEST_URL_HEADER}" + fi + if [[ -n "${ENABLE_CUSTOM_METRICS:-}" ]]; then + flags+=" --enable-custom-metrics=${ENABLE_CUSTOM_METRICS}" + fi + local node_labels="" + if [[ "${KUBE_PROXY_DAEMONSET:-}" == "true" && "${master}" != "true" ]]; then + # Add kube-proxy daemonset label to node to avoid situation during cluster + # upgrade/downgrade when there are two instances of kube-proxy running on a node. + node_labels="beta.kubernetes.io/kube-proxy-ds-ready=true" + fi + if [[ -n "${NODE_LABELS:-}" ]]; then + node_labels="${node_labels:+${node_labels},}${NODE_LABELS}" + fi + if [[ -n "${NON_MASTER_NODE_LABELS:-}" && "${master}" != "true" ]]; then + node_labels="${node_labels:+${node_labels},}${NON_MASTER_NODE_LABELS}" + fi + if [[ -n "${node_labels:-}" ]]; then + flags+=" --node-labels=${node_labels}" + fi + if [[ -n "${NODE_TAINTS:-}" ]]; then + flags+=" --register-with-taints=${NODE_TAINTS}" + fi + if [[ -n "${EVICTION_HARD:-}" ]]; then + flags+=" --eviction-hard=${EVICTION_HARD}" + fi + if [[ -n "${FEATURE_GATES:-}" ]]; then + flags+=" --feature-gates=${FEATURE_GATES}" + fi + # TODO(mtaufen): ROTATE_CERTIFICATES seems unused; delete it? + if [[ -n "${ROTATE_CERTIFICATES:-}" ]]; then + flags+=" --rotate-certificates=true" + fi + if [[ -n "${CONTAINER_RUNTIME:-}" ]]; then + flags+=" --container-runtime=${CONTAINER_RUNTIME}" + fi + # TODO(mtaufen): CONTAINER_RUNTIME_ENDPOINT seems unused; delete it? + if [[ -n "${CONTAINER_RUNTIME_ENDPOINT:-}" ]]; then + flags+=" --container-runtime-endpoint=${CONTAINER_RUNTIME_ENDPOINT}" + fi + + KUBELET_ARGS="${flags}" +} + function build-kube-master-certs { local file=$1 rm -f ${file} @@ -622,12 +734,9 @@ CONTAINER_RUNTIME_NAME: $(yaml-quote ${CONTAINER_RUNTIME_NAME:-}) NODE_LOCAL_SSDS_EXT: $(yaml-quote ${NODE_LOCAL_SSDS_EXT:-}) LOAD_IMAGE_COMMAND: $(yaml-quote ${LOAD_IMAGE_COMMAND:-}) ZONE: $(yaml-quote ${ZONE}) +VOLUME_PLUGIN_DIR: $(yaml-quote ${VOLUME_PLUGIN_DIR}) +KUBELET_ARGS: $(yaml-quote ${KUBELET_ARGS}) EOF - if [ -n "${KUBELET_PORT:-}" ]; then - cat >>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <>$file <