mirror of
				https://github.com/k3s-io/kubernetes.git
				synced 2025-10-30 21:30:16 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			725 lines
		
	
	
		
			22 KiB
		
	
	
	
		
			Bash
		
	
	
		
			Executable File
		
	
	
	
	
			
		
		
	
	
			725 lines
		
	
	
		
			22 KiB
		
	
	
	
		
			Bash
		
	
	
		
			Executable File
		
	
	
	
	
| #!/usr/bin/env bash
 | |
| 
 | |
| # Copyright 2014 The Kubernetes Authors.
 | |
| #
 | |
| # Licensed under the Apache License, Version 2.0 (the "License");
 | |
| # you may not use this file except in compliance with the License.
 | |
| # You may obtain a copy of the License at
 | |
| #
 | |
| #     http://www.apache.org/licenses/LICENSE-2.0
 | |
| #
 | |
| # Unless required by applicable law or agreed to in writing, software
 | |
| # distributed under the License is distributed on an "AS IS" BASIS,
 | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 | |
| # See the License for the specific language governing permissions and
 | |
| # limitations under the License.
 | |
| 
 | |
| function kube::util::sourced_variable {
 | |
|   # Call this function to tell shellcheck that a variable is supposed to
 | |
|   # be used from other calling context. This helps quiet an "unused
 | |
|   # variable" warning from shellcheck and also document your code.
 | |
|   true
 | |
| }
 | |
| 
 | |
| kube::util::sortable_date() {
 | |
|   date "+%Y%m%d-%H%M%S"
 | |
| }
 | |
| 
 | |
| # arguments: target, item1, item2, item3, ...
 | |
| # returns 0 if target is in the given items, 1 otherwise.
 | |
| kube::util::array_contains() {
 | |
|   local search="$1"
 | |
|   local element
 | |
|   shift
 | |
|   for element; do
 | |
|     if [[ "${element}" == "${search}" ]]; then
 | |
|       return 0
 | |
|      fi
 | |
|   done
 | |
|   return 1
 | |
| }
 | |
| 
 | |
| kube::util::wait_for_url() {
 | |
|   local url=$1
 | |
|   local prefix=${2:-}
 | |
|   local wait=${3:-1}
 | |
|   local times=${4:-30}
 | |
|   local maxtime=${5:-1}
 | |
| 
 | |
|   command -v curl >/dev/null || {
 | |
|     kube::log::usage "curl must be installed"
 | |
|     exit 1
 | |
|   }
 | |
| 
 | |
|   local i
 | |
|   for i in $(seq 1 "${times}"); do
 | |
|     local out
 | |
|     if out=$(curl --max-time "${maxtime}" -gkfs "${url}" 2>/dev/null); then
 | |
|       kube::log::status "On try ${i}, ${prefix}: ${out}"
 | |
|       return 0
 | |
|     fi
 | |
|     sleep "${wait}"
 | |
|   done
 | |
|   kube::log::error "Timed out waiting for ${prefix} to answer at ${url}; tried ${times} waiting ${wait} between each"
 | |
|   return 1
 | |
| }
 | |
| 
 | |
| # Example:  kube::util::trap_add 'echo "in trap DEBUG"' DEBUG
 | |
| # See: http://stackoverflow.com/questions/3338030/multiple-bash-traps-for-the-same-signal
 | |
| kube::util::trap_add() {
 | |
|   local trap_add_cmd
 | |
|   trap_add_cmd=$1
 | |
|   shift
 | |
| 
 | |
|   for trap_add_name in "$@"; do
 | |
|     local existing_cmd
 | |
|     local new_cmd
 | |
| 
 | |
|     # Grab the currently defined trap commands for this trap
 | |
|     existing_cmd=$(trap -p "${trap_add_name}" |  awk -F"'" '{print $2}')
 | |
| 
 | |
|     if [[ -z "${existing_cmd}" ]]; then
 | |
|       new_cmd="${trap_add_cmd}"
 | |
|     else
 | |
|       new_cmd="${trap_add_cmd};${existing_cmd}"
 | |
|     fi
 | |
| 
 | |
|     # Assign the test. Disable the shellcheck warning telling that trap
 | |
|     # commands should be single quoted to avoid evaluating them at this
 | |
|     # point instead evaluating them at run time. The logic of adding new
 | |
|     # commands to a single trap requires them to be evaluated right away.
 | |
|     # shellcheck disable=SC2064
 | |
|     trap "${new_cmd}" "${trap_add_name}"
 | |
|   done
 | |
| }
 | |
| 
 | |
| # Opposite of kube::util::ensure-temp-dir()
 | |
| kube::util::cleanup-temp-dir() {
 | |
|   rm -rf "${KUBE_TEMP}"
 | |
| }
 | |
| 
 | |
| # Create a temp dir that'll be deleted at the end of this bash session.
 | |
| #
 | |
| # Vars set:
 | |
| #   KUBE_TEMP
 | |
| kube::util::ensure-temp-dir() {
 | |
|   if [[ -z ${KUBE_TEMP-} ]]; then
 | |
|     KUBE_TEMP=$(mktemp -d 2>/dev/null || mktemp -d -t kubernetes.XXXXXX)
 | |
|     kube::util::trap_add kube::util::cleanup-temp-dir EXIT
 | |
|   fi
 | |
| }
 | |
| 
 | |
| kube::util::host_os() {
 | |
|   local host_os
 | |
|   case "$(uname -s)" in
 | |
|     Darwin)
 | |
|       host_os=darwin
 | |
|       ;;
 | |
|     Linux)
 | |
|       host_os=linux
 | |
|       ;;
 | |
|     *)
 | |
|       kube::log::error "Unsupported host OS.  Must be Linux or Mac OS X."
 | |
|       exit 1
 | |
|       ;;
 | |
|   esac
 | |
|   echo "${host_os}"
 | |
| }
 | |
| 
 | |
| kube::util::host_arch() {
 | |
|   local host_arch
 | |
|   case "$(uname -m)" in
 | |
|     x86_64*)
 | |
|       host_arch=amd64
 | |
|       ;;
 | |
|     i?86_64*)
 | |
|       host_arch=amd64
 | |
|       ;;
 | |
|     amd64*)
 | |
|       host_arch=amd64
 | |
|       ;;
 | |
|     aarch64*)
 | |
|       host_arch=arm64
 | |
|       ;;
 | |
|     arm64*)
 | |
|       host_arch=arm64
 | |
|       ;;
 | |
|     arm*)
 | |
|       host_arch=arm
 | |
|       ;;
 | |
|     i?86*)
 | |
|       host_arch=x86
 | |
|       ;;
 | |
|     s390x*)
 | |
|       host_arch=s390x
 | |
|       ;;
 | |
|     ppc64le*)
 | |
|       host_arch=ppc64le
 | |
|       ;;
 | |
|     *)
 | |
|       kube::log::error "Unsupported host arch. Must be x86_64, 386, arm, arm64, s390x or ppc64le."
 | |
|       exit 1
 | |
|       ;;
 | |
|   esac
 | |
|   echo "${host_arch}"
 | |
| }
 | |
| 
 | |
| # This figures out the host platform without relying on golang.  We need this as
 | |
| # we don't want a golang install to be a prerequisite to building yet we need
 | |
| # this info to figure out where the final binaries are placed.
 | |
| kube::util::host_platform() {
 | |
|   echo "$(kube::util::host_os)/$(kube::util::host_arch)"
 | |
| }
 | |
| 
 | |
| kube::util::find-binary-for-platform() {
 | |
|   local -r lookfor="$1"
 | |
|   local -r platform="$2"
 | |
|   local locations=(
 | |
|     "${KUBE_ROOT}/_output/bin/${lookfor}"
 | |
|     "${KUBE_ROOT}/_output/dockerized/bin/${platform}/${lookfor}"
 | |
|     "${KUBE_ROOT}/_output/local/bin/${platform}/${lookfor}"
 | |
|     "${KUBE_ROOT}/platforms/${platform}/${lookfor}"
 | |
|   )
 | |
|   # Also search for binary in bazel build tree.
 | |
|   # The bazel go rules place some binaries in subtrees like
 | |
|   # "bazel-bin/source/path/linux_amd64_pure_stripped/binaryname", so make sure
 | |
|   # the platform name is matched in the path.
 | |
|   while IFS=$'\n' read -r location; do
 | |
|     locations+=("$location");
 | |
|   done < <(find "${KUBE_ROOT}/bazel-bin/" -type f -executable \
 | |
|     \( -path "*/${platform/\//_}*/${lookfor}" -o -path "*/${lookfor}" \) 2>/dev/null || true)
 | |
| 
 | |
|   # List most recently-updated location.
 | |
|   local -r bin=$( (ls -t "${locations[@]}" 2>/dev/null || true) | head -1 )
 | |
|   echo -n "${bin}"
 | |
| }
 | |
| 
 | |
| kube::util::find-binary() {
 | |
|   kube::util::find-binary-for-platform "$1" "$(kube::util::host_platform)"
 | |
| }
 | |
| 
 | |
| # Run all known doc generators (today gendocs and genman for kubectl)
 | |
| # $1 is the directory to put those generated documents
 | |
| kube::util::gen-docs() {
 | |
|   local dest="$1"
 | |
| 
 | |
|   # Find binary
 | |
|   gendocs=$(kube::util::find-binary "gendocs")
 | |
|   genkubedocs=$(kube::util::find-binary "genkubedocs")
 | |
|   genman=$(kube::util::find-binary "genman")
 | |
|   genyaml=$(kube::util::find-binary "genyaml")
 | |
|   genfeddocs=$(kube::util::find-binary "genfeddocs")
 | |
| 
 | |
|   # TODO: If ${genfeddocs} is not used from anywhere (it isn't used at
 | |
|   # least from k/k tree), remove it completely.
 | |
|   kube::util::sourced_variable "${genfeddocs}"
 | |
| 
 | |
|   mkdir -p "${dest}/docs/user-guide/kubectl/"
 | |
|   "${gendocs}" "${dest}/docs/user-guide/kubectl/"
 | |
|   mkdir -p "${dest}/docs/admin/"
 | |
|   "${genkubedocs}" "${dest}/docs/admin/" "kube-apiserver"
 | |
|   "${genkubedocs}" "${dest}/docs/admin/" "kube-controller-manager"
 | |
|   "${genkubedocs}" "${dest}/docs/admin/" "cloud-controller-manager"
 | |
|   "${genkubedocs}" "${dest}/docs/admin/" "kube-proxy"
 | |
|   "${genkubedocs}" "${dest}/docs/admin/" "kube-scheduler"
 | |
|   "${genkubedocs}" "${dest}/docs/admin/" "kubelet"
 | |
|   "${genkubedocs}" "${dest}/docs/admin/" "kubeadm"
 | |
| 
 | |
|   mkdir -p "${dest}/docs/man/man1/"
 | |
|   "${genman}" "${dest}/docs/man/man1/" "kube-apiserver"
 | |
|   "${genman}" "${dest}/docs/man/man1/" "kube-controller-manager"
 | |
|   "${genman}" "${dest}/docs/man/man1/" "cloud-controller-manager"
 | |
|   "${genman}" "${dest}/docs/man/man1/" "kube-proxy"
 | |
|   "${genman}" "${dest}/docs/man/man1/" "kube-scheduler"
 | |
|   "${genman}" "${dest}/docs/man/man1/" "kubelet"
 | |
|   "${genman}" "${dest}/docs/man/man1/" "kubectl"
 | |
|   "${genman}" "${dest}/docs/man/man1/" "kubeadm"
 | |
| 
 | |
|   mkdir -p "${dest}/docs/yaml/kubectl/"
 | |
|   "${genyaml}" "${dest}/docs/yaml/kubectl/"
 | |
| 
 | |
|   # create the list of generated files
 | |
|   pushd "${dest}" > /dev/null || return 1
 | |
|   touch docs/.generated_docs
 | |
|   find . -type f | cut -sd / -f 2- | LC_ALL=C sort > docs/.generated_docs
 | |
|   popd > /dev/null || return 1
 | |
| }
 | |
| 
 | |
| # Removes previously generated docs-- we don't want to check them in. $KUBE_ROOT
 | |
| # must be set.
 | |
| kube::util::remove-gen-docs() {
 | |
|   if [ -e "${KUBE_ROOT}/docs/.generated_docs" ]; then
 | |
|     # remove all of the old docs; we don't want to check them in.
 | |
|     while read -r file; do
 | |
|       rm "${KUBE_ROOT}/${file}" 2>/dev/null || true
 | |
|     done <"${KUBE_ROOT}/docs/.generated_docs"
 | |
|     # The docs/.generated_docs file lists itself, so we don't need to explicitly
 | |
|     # delete it.
 | |
|   fi
 | |
| }
 | |
| 
 | |
| # Takes a group/version and returns the path to its location on disk, sans
 | |
| # "pkg". E.g.:
 | |
| # * default behavior: extensions/v1beta1 -> apis/extensions/v1beta1
 | |
| # * default behavior for only a group: experimental -> apis/experimental
 | |
| # * Special handling for empty group: v1 -> api/v1, unversioned -> api/unversioned
 | |
| # * Special handling for groups suffixed with ".k8s.io": foo.k8s.io/v1 -> apis/foo/v1
 | |
| # * Very special handling for when both group and version are "": / -> api
 | |
| kube::util::group-version-to-pkg-path() {
 | |
|   local group_version="$1"
 | |
| 
 | |
|   while IFS=$'\n' read -r api; do
 | |
|     if [[ "${api}" = "${group_version/.*k8s.io/}" ]]; then
 | |
|       echo "vendor/k8s.io/api/${group_version/.*k8s.io/}"
 | |
|       return
 | |
|     fi
 | |
|   done < <(cd "${KUBE_ROOT}/staging/src/k8s.io/api" && find . -name types.go -exec dirname {} \; | sed "s|\./||g" | sort)
 | |
| 
 | |
|   # "v1" is the API GroupVersion
 | |
|   if [[ "${group_version}" == "v1" ]]; then
 | |
|     echo "vendor/k8s.io/api/core/v1"
 | |
|     return
 | |
|   fi
 | |
| 
 | |
|   # Special cases first.
 | |
|   # TODO(lavalamp): Simplify this by moving pkg/api/v1 and splitting pkg/api,
 | |
|   # moving the results to pkg/apis/api.
 | |
|   case "${group_version}" in
 | |
|     # both group and version are "", this occurs when we generate deep copies for internal objects of the legacy v1 API.
 | |
|     __internal)
 | |
|       echo "pkg/apis/core"
 | |
|       ;;
 | |
|     meta/v1)
 | |
|       echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1"
 | |
|       ;;
 | |
|     meta/v1beta1)
 | |
|       echo "vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1"
 | |
|       ;;
 | |
|     *.k8s.io)
 | |
|       echo "pkg/apis/${group_version%.*k8s.io}"
 | |
|       ;;
 | |
|     *.k8s.io/*)
 | |
|       echo "pkg/apis/${group_version/.*k8s.io/}"
 | |
|       ;;
 | |
|     *)
 | |
|       echo "pkg/apis/${group_version%__internal}"
 | |
|       ;;
 | |
|   esac
 | |
| }
 | |
| 
 | |
| # Takes a group/version and returns the swagger-spec file name.
 | |
| # default behavior: extensions/v1beta1 -> extensions_v1beta1
 | |
| # special case for v1: v1 -> v1
 | |
| kube::util::gv-to-swagger-name() {
 | |
|   local group_version="$1"
 | |
|   case "${group_version}" in
 | |
|     v1)
 | |
|       echo "v1"
 | |
|       ;;
 | |
|     *)
 | |
|       echo "${group_version%/*}_${group_version#*/}"
 | |
|       ;;
 | |
|   esac
 | |
| }
 | |
| 
 | |
| # Returns the name of the upstream remote repository name for the local git
 | |
| # repo, e.g. "upstream" or "origin".
 | |
| kube::util::git_upstream_remote_name() {
 | |
|   git remote -v | grep fetch |\
 | |
|     grep -E 'github.com[/:]kubernetes/kubernetes|k8s.io/kubernetes' |\
 | |
|     head -n 1 | awk '{print $1}'
 | |
| }
 | |
| 
 | |
| # Exits script if working directory is dirty. If it's run interactively in the terminal
 | |
| # the user can commit changes in a second terminal. This script will wait.
 | |
| kube::util::ensure_clean_working_dir() {
 | |
|   while ! git diff HEAD --exit-code &>/dev/null; do
 | |
|     echo -e "\nUnexpected dirty working directory:\n"
 | |
|     if tty -s; then
 | |
|         git status -s
 | |
|     else
 | |
|         git diff -a # be more verbose in log files without tty
 | |
|         exit 1
 | |
|     fi | sed 's/^/  /'
 | |
|     echo -e "\nCommit your changes in another terminal and then continue here by pressing enter."
 | |
|     read -r
 | |
|   done 1>&2
 | |
| }
 | |
| 
 | |
| # Find the base commit using:
 | |
| # $PULL_BASE_SHA if set (from Prow)
 | |
| # current ref from the remote upstream branch
 | |
| kube::util::base_ref() {
 | |
|   local -r git_branch=$1
 | |
| 
 | |
|   if [[ -n ${PULL_BASE_SHA:-} ]]; then
 | |
|     echo "${PULL_BASE_SHA}"
 | |
|     return
 | |
|   fi
 | |
| 
 | |
|   full_branch="$(kube::util::git_upstream_remote_name)/${git_branch}"
 | |
| 
 | |
|   # make sure the branch is valid, otherwise the check will pass erroneously.
 | |
|   if ! git describe "${full_branch}" >/dev/null; then
 | |
|     # abort!
 | |
|     exit 1
 | |
|   fi
 | |
| 
 | |
|   echo "${full_branch}"
 | |
| }
 | |
| 
 | |
| # Checks whether there are any files matching pattern $2 changed between the
 | |
| # current branch and upstream branch named by $1.
 | |
| # Returns 1 (false) if there are no changes
 | |
| #         0 (true) if there are changes detected.
 | |
| kube::util::has_changes() {
 | |
|   local -r git_branch=$1
 | |
|   local -r pattern=$2
 | |
|   local -r not_pattern=${3:-totallyimpossiblepattern}
 | |
| 
 | |
|   local base_ref
 | |
|   base_ref=$(kube::util::base_ref "${git_branch}")
 | |
|   echo "Checking for '${pattern}' changes against '${base_ref}'"
 | |
| 
 | |
|   # notice this uses ... to find the first shared ancestor
 | |
|   if git diff --name-only "${base_ref}...HEAD" | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
 | |
|     return 0
 | |
|   fi
 | |
|   # also check for pending changes
 | |
|   if git status --porcelain | grep -v -E "${not_pattern}" | grep "${pattern}" > /dev/null; then
 | |
|     echo "Detected '${pattern}' uncommitted changes."
 | |
|     return 0
 | |
|   fi
 | |
|   echo "No '${pattern}' changes detected."
 | |
|   return 1
 | |
| }
 | |
| 
 | |
| kube::util::download_file() {
 | |
|   local -r url=$1
 | |
|   local -r destination_file=$2
 | |
| 
 | |
|   rm "${destination_file}" 2&> /dev/null || true
 | |
| 
 | |
|   for i in $(seq 5)
 | |
|   do
 | |
|     if ! curl -fsSL --retry 3 --keepalive-time 2 "${url}" -o "${destination_file}"; then
 | |
|       echo "Downloading ${url} failed. $((5-i)) retries left."
 | |
|       sleep 1
 | |
|     else
 | |
|       echo "Downloading ${url} succeed"
 | |
|       return 0
 | |
|     fi
 | |
|   done
 | |
|   return 1
 | |
| }
 | |
| 
 | |
| # Test whether openssl is installed.
 | |
| # Sets:
 | |
| #  OPENSSL_BIN: The path to the openssl binary to use
 | |
| function kube::util::test_openssl_installed {
 | |
|     if ! openssl version >& /dev/null; then
 | |
|       echo "Failed to run openssl. Please ensure openssl is installed"
 | |
|       exit 1
 | |
|     fi
 | |
| 
 | |
|     OPENSSL_BIN=$(command -v openssl)
 | |
| }
 | |
| 
 | |
| # creates a client CA, args are sudo, dest-dir, ca-id, purpose
 | |
| # purpose is dropped in after "key encipherment", you usually want
 | |
| # '"client auth"'
 | |
| # '"server auth"'
 | |
| # '"client auth","server auth"'
 | |
| function kube::util::create_signing_certkey {
 | |
|     local sudo=$1
 | |
|     local dest_dir=$2
 | |
|     local id=$3
 | |
|     local purpose=$4
 | |
|     # Create client ca
 | |
|     ${sudo} /usr/bin/env bash -e <<EOF
 | |
|     rm -f "${dest_dir}/${id}-ca.crt" "${dest_dir}/${id}-ca.key"
 | |
|     ${OPENSSL_BIN} req -x509 -sha256 -new -nodes -days 365 -newkey rsa:2048 -keyout "${dest_dir}/${id}-ca.key" -out "${dest_dir}/${id}-ca.crt" -subj "/C=xx/ST=x/L=x/O=x/OU=x/CN=ca/emailAddress=x/"
 | |
|     echo '{"signing":{"default":{"expiry":"43800h","usages":["signing","key encipherment",${purpose}]}}}' > "${dest_dir}/${id}-ca-config.json"
 | |
| EOF
 | |
| }
 | |
| 
 | |
| # signs a client certificate: args are sudo, dest-dir, CA, filename (roughly), username, groups...
 | |
| function kube::util::create_client_certkey {
 | |
|     local sudo=$1
 | |
|     local dest_dir=$2
 | |
|     local ca=$3
 | |
|     local id=$4
 | |
|     local cn=${5:-$4}
 | |
|     local groups=""
 | |
|     local SEP=""
 | |
|     shift 5
 | |
|     while [ -n "${1:-}" ]; do
 | |
|         groups+="${SEP}{\"O\":\"$1\"}"
 | |
|         SEP=","
 | |
|         shift 1
 | |
|     done
 | |
|     ${sudo} /usr/bin/env bash -e <<EOF
 | |
|     cd ${dest_dir}
 | |
|     echo '{"CN":"${cn}","names":[${groups}],"hosts":[""],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare client-${id}
 | |
|     mv "client-${id}-key.pem" "client-${id}.key"
 | |
|     mv "client-${id}.pem" "client-${id}.crt"
 | |
|     rm -f "client-${id}.csr"
 | |
| EOF
 | |
| }
 | |
| 
 | |
| # signs a serving certificate: args are sudo, dest-dir, ca, filename (roughly), subject, hosts...
 | |
| function kube::util::create_serving_certkey {
 | |
|     local sudo=$1
 | |
|     local dest_dir=$2
 | |
|     local ca=$3
 | |
|     local id=$4
 | |
|     local cn=${5:-$4}
 | |
|     local hosts=""
 | |
|     local SEP=""
 | |
|     shift 5
 | |
|     while [ -n "${1:-}" ]; do
 | |
|         hosts+="${SEP}\"$1\""
 | |
|         SEP=","
 | |
|         shift 1
 | |
|     done
 | |
|     ${sudo} /usr/bin/env bash -e <<EOF
 | |
|     cd ${dest_dir}
 | |
|     echo '{"CN":"${cn}","hosts":[${hosts}],"key":{"algo":"rsa","size":2048}}' | ${CFSSL_BIN} gencert -ca=${ca}.crt -ca-key=${ca}.key -config=${ca}-config.json - | ${CFSSLJSON_BIN} -bare serving-${id}
 | |
|     mv "serving-${id}-key.pem" "serving-${id}.key"
 | |
|     mv "serving-${id}.pem" "serving-${id}.crt"
 | |
|     rm -f "serving-${id}.csr"
 | |
| EOF
 | |
| }
 | |
| 
 | |
| # creates a self-contained kubeconfig: args are sudo, dest-dir, ca file, host, port, client id, token(optional)
 | |
| function kube::util::write_client_kubeconfig {
 | |
|     local sudo=$1
 | |
|     local dest_dir=$2
 | |
|     local ca_file=$3
 | |
|     local api_host=$4
 | |
|     local api_port=$5
 | |
|     local client_id=$6
 | |
|     local token=${7:-}
 | |
|     cat <<EOF | ${sudo} tee "${dest_dir}"/"${client_id}".kubeconfig > /dev/null
 | |
| apiVersion: v1
 | |
| kind: Config
 | |
| clusters:
 | |
|   - cluster:
 | |
|       certificate-authority: ${ca_file}
 | |
|       server: https://${api_host}:${api_port}/
 | |
|     name: local-up-cluster
 | |
| users:
 | |
|   - user:
 | |
|       token: ${token}
 | |
|       client-certificate: ${dest_dir}/client-${client_id}.crt
 | |
|       client-key: ${dest_dir}/client-${client_id}.key
 | |
|     name: local-up-cluster
 | |
| contexts:
 | |
|   - context:
 | |
|       cluster: local-up-cluster
 | |
|       user: local-up-cluster
 | |
|     name: local-up-cluster
 | |
| current-context: local-up-cluster
 | |
| EOF
 | |
| 
 | |
|     # flatten the kubeconfig files to make them self contained
 | |
|     username=$(whoami)
 | |
|     ${sudo} /usr/bin/env bash -e <<EOF
 | |
|     $(kube::util::find-binary kubectl) --kubeconfig="${dest_dir}/${client_id}.kubeconfig" config view --minify --flatten > "/tmp/${client_id}.kubeconfig"
 | |
|     mv -f "/tmp/${client_id}.kubeconfig" "${dest_dir}/${client_id}.kubeconfig"
 | |
|     chown ${username} "${dest_dir}/${client_id}.kubeconfig"
 | |
| EOF
 | |
| }
 | |
| 
 | |
| # Determines if docker can be run, failures may simply require that the user be added to the docker group.
 | |
| function kube::util::ensure_docker_daemon_connectivity {
 | |
|   IFS=" " read -ra DOCKER <<< "${DOCKER_OPTS}"
 | |
|   # Expand ${DOCKER[@]} only if it's not unset. This is to work around
 | |
|   # Bash 3 issue with unbound variable.
 | |
|   DOCKER=(docker ${DOCKER[@]:+"${DOCKER[@]}"})
 | |
|   if ! "${DOCKER[@]}" info > /dev/null 2>&1 ; then
 | |
|     cat <<'EOF' >&2
 | |
| Can't connect to 'docker' daemon.  please fix and retry.
 | |
| 
 | |
| Possible causes:
 | |
|   - Docker Daemon not started
 | |
|     - Linux: confirm via your init system
 | |
|     - macOS w/ docker-machine: run `docker-machine ls` and `docker-machine start <name>`
 | |
|     - macOS w/ Docker for Mac: Check the menu bar and start the Docker application
 | |
|   - DOCKER_HOST hasn't been set or is set incorrectly
 | |
|     - Linux: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
 | |
|     - macOS w/ docker-machine: run `eval "$(docker-machine env <name>)"`
 | |
|     - macOS w/ Docker for Mac: domain socket is used, DOCKER_* should be unset. In Bash run `unset ${!DOCKER_*}`
 | |
|   - Other things to check:
 | |
|     - Linux: User isn't in 'docker' group.  Add and relogin.
 | |
|       - Something like 'sudo usermod -a -G docker ${USER}'
 | |
|       - RHEL7 bug and workaround: https://bugzilla.redhat.com/show_bug.cgi?id=1119282#c8
 | |
| EOF
 | |
|     return 1
 | |
|   fi
 | |
| }
 | |
| 
 | |
| # Wait for background jobs to finish. Return with
 | |
| # an error status if any of the jobs failed.
 | |
| kube::util::wait-for-jobs() {
 | |
|   local fail=0
 | |
|   local job
 | |
|   for job in $(jobs -p); do
 | |
|     wait "${job}" || fail=$((fail + 1))
 | |
|   done
 | |
|   return ${fail}
 | |
| }
 | |
| 
 | |
| # kube::util::join <delim> <list...>
 | |
| # Concatenates the list elements with the delimiter passed as first parameter
 | |
| #
 | |
| # Ex: kube::util::join , a b c
 | |
| #  -> a,b,c
 | |
| function kube::util::join {
 | |
|   local IFS="$1"
 | |
|   shift
 | |
|   echo "$*"
 | |
| }
 | |
| 
 | |
| # Downloads cfssl/cfssljson into $1 directory if they do not already exist in PATH
 | |
| #
 | |
| # Assumed vars:
 | |
| #   $1 (cfssl directory) (optional)
 | |
| #
 | |
| # Sets:
 | |
| #  CFSSL_BIN: The path of the installed cfssl binary
 | |
| #  CFSSLJSON_BIN: The path of the installed cfssljson binary
 | |
| #
 | |
| function kube::util::ensure-cfssl {
 | |
|   if command -v cfssl &>/dev/null && command -v cfssljson &>/dev/null; then
 | |
|     CFSSL_BIN=$(command -v cfssl)
 | |
|     CFSSLJSON_BIN=$(command -v cfssljson)
 | |
|     return 0
 | |
|   fi
 | |
| 
 | |
|   host_arch=$(kube::util::host_arch)
 | |
| 
 | |
|   if [[ "${host_arch}" != "amd64" ]]; then
 | |
|     echo "Cannot download cfssl on non-amd64 hosts and cfssl does not appear to be installed."
 | |
|     echo "Please install cfssl and cfssljson and verify they are in \$PATH."
 | |
|     echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
 | |
|     exit 1
 | |
|   fi
 | |
| 
 | |
|   # Create a temp dir for cfssl if no directory was given
 | |
|   local cfssldir=${1:-}
 | |
|   if [[ -z "${cfssldir}" ]]; then
 | |
|     kube::util::ensure-temp-dir
 | |
|     cfssldir="${KUBE_TEMP}/cfssl"
 | |
|   fi
 | |
| 
 | |
|   mkdir -p "${cfssldir}"
 | |
|   pushd "${cfssldir}" > /dev/null || return 1
 | |
| 
 | |
|     echo "Unable to successfully run 'cfssl' from ${PATH}; downloading instead..."
 | |
|     kernel=$(uname -s)
 | |
|     case "${kernel}" in
 | |
|       Linux)
 | |
|         curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_linux-amd64
 | |
|         curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64
 | |
|         ;;
 | |
|       Darwin)
 | |
|         curl --retry 10 -L -o cfssl https://pkg.cfssl.org/R1.2/cfssl_darwin-amd64
 | |
|         curl --retry 10 -L -o cfssljson https://pkg.cfssl.org/R1.2/cfssljson_darwin-amd64
 | |
|         ;;
 | |
|       *)
 | |
|         echo "Unknown, unsupported platform: ${kernel}." >&2
 | |
|         echo "Supported platforms: Linux, Darwin." >&2
 | |
|         exit 2
 | |
|     esac
 | |
| 
 | |
|     chmod +x cfssl || true
 | |
|     chmod +x cfssljson || true
 | |
| 
 | |
|     CFSSL_BIN="${cfssldir}/cfssl"
 | |
|     CFSSLJSON_BIN="${cfssldir}/cfssljson"
 | |
|     if [[ ! -x ${CFSSL_BIN} || ! -x ${CFSSLJSON_BIN} ]]; then
 | |
|       echo "Failed to download 'cfssl'. Please install cfssl and cfssljson and verify they are in \$PATH."
 | |
|       echo "Hint: export PATH=\$PATH:\$GOPATH/bin; go get -u github.com/cloudflare/cfssl/cmd/..."
 | |
|       exit 1
 | |
|     fi
 | |
|   popd > /dev/null || return 1
 | |
| }
 | |
| 
 | |
| # kube::util::ensure_dockerized
 | |
| # Confirms that the script is being run inside a kube-build image
 | |
| #
 | |
| function kube::util::ensure_dockerized {
 | |
|   if [[ -f /kube-build-image ]]; then
 | |
|     return 0
 | |
|   else
 | |
|     echo "ERROR: This script is designed to be run inside a kube-build container"
 | |
|     exit 1
 | |
|   fi
 | |
| }
 | |
| 
 | |
| # kube::util::ensure-gnu-sed
 | |
| # Determines which sed binary is gnu-sed on linux/darwin
 | |
| #
 | |
| # Sets:
 | |
| #  SED: The name of the gnu-sed binary
 | |
| #
 | |
| function kube::util::ensure-gnu-sed {
 | |
|   if LANG=C sed --help 2>&1 | grep -q GNU; then
 | |
|     SED="sed"
 | |
|   elif command -v gsed &>/dev/null; then
 | |
|     SED="gsed"
 | |
|   else
 | |
|     kube::log::error "Failed to find GNU sed as sed or gsed. If you are on Mac: brew install gnu-sed." >&2
 | |
|     return 1
 | |
|   fi
 | |
|   kube::util::sourced_variable "${SED}"
 | |
| }
 | |
| 
 | |
| # kube::util::check-file-in-alphabetical-order <file>
 | |
| # Check that the file is in alphabetical order
 | |
| #
 | |
| function kube::util::check-file-in-alphabetical-order {
 | |
|   local failure_file="$1"
 | |
|   if ! diff -u "${failure_file}" <(LC_ALL=C sort "${failure_file}"); then
 | |
|     {
 | |
|       echo
 | |
|       echo "${failure_file} is not in alphabetical order. Please sort it:"
 | |
|       echo
 | |
|       echo "  LC_ALL=C sort -o ${failure_file} ${failure_file}"
 | |
|       echo
 | |
|     } >&2
 | |
|     false
 | |
|   fi
 | |
| }
 | |
| 
 | |
| # kube::util::require-jq
 | |
| # Checks whether jq is installed.
 | |
| function kube::util::require-jq {
 | |
|   if ! command -v jq &>/dev/null; then
 | |
|     echo "jq not found. Please install." 1>&2
 | |
|     return 1
 | |
|   fi
 | |
| }
 | |
| 
 | |
| # Some useful colors.
 | |
| if [[ -z "${color_start-}" ]]; then
 | |
|   declare -r color_start="\033["
 | |
|   declare -r color_red="${color_start}0;31m"
 | |
|   declare -r color_yellow="${color_start}0;33m"
 | |
|   declare -r color_green="${color_start}0;32m"
 | |
|   declare -r color_blue="${color_start}1;34m"
 | |
|   declare -r color_cyan="${color_start}1;36m"
 | |
|   declare -r color_norm="${color_start}0m"
 | |
| 
 | |
|   kube::util::sourced_variable "${color_start}"
 | |
|   kube::util::sourced_variable "${color_red}"
 | |
|   kube::util::sourced_variable "${color_yellow}"
 | |
|   kube::util::sourced_variable "${color_green}"
 | |
|   kube::util::sourced_variable "${color_blue}"
 | |
|   kube::util::sourced_variable "${color_cyan}"
 | |
|   kube::util::sourced_variable "${color_norm}"
 | |
| fi
 | |
| 
 | |
| # ex: ts=2 sw=2 et filetype=sh
 |