mirror of
				https://github.com/k3s-io/kubernetes.git
				synced 2025-10-31 13:50:01 +00:00 
			
		
		
		
	
		
			
				
	
	
		
			112 lines
		
	
	
		
			11 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			112 lines
		
	
	
		
			11 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
| Vagrantfile:      node_ip = $node_ips[n]
 | |
| cluster/addons/addon-manager/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits
 | |
| cluster/addons/registry/images/Dockerfile:ADD run_proxy.sh /usr/bin/run_proxy
 | |
| cluster/addons/registry/images/Dockerfile:CMD ["/usr/bin/run_proxy"]
 | |
| cluster/aws/templates/configure-vm-aws.sh:  # We set the hostname_override to the full EC2 private dns name
 | |
| cluster/aws/templates/configure-vm-aws.sh:  api_servers: '${API_SERVERS}'
 | |
| cluster/aws/templates/configure-vm-aws.sh:  env-to-grains "hostname_override"
 | |
| cluster/aws/templates/configure-vm-aws.sh:  env-to-grains "runtime_config"
 | |
| cluster/aws/templates/configure-vm-aws.sh:  kubelet_api_servers: '${KUBELET_APISERVER}'
 | |
| cluster/centos/util.sh:  local node_ip=${node#*@}
 | |
| cluster/gce/configure-vm.sh:  advertise_address: '${EXTERNAL_IP}'
 | |
| cluster/gce/configure-vm.sh:  api_servers: '${KUBERNETES_MASTER_NAME}'
 | |
| cluster/gce/configure-vm.sh:  cloud_config: ${CLOUD_CONFIG}
 | |
| cluster/gce/configure-vm.sh:  env-to-grains "runtime_config"
 | |
| cluster/gce/configure-vm.sh:  kubelet_api_servers: '${KUBELET_APISERVER}'
 | |
| cluster/gce/gci/configure-helper.sh:      reconcile_cidr="false"
 | |
| cluster/gce/gci/configure-helper.sh:  local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
 | |
| cluster/gce/gci/configure-helper.sh:  local reconcile_cidr="true"
 | |
| cluster/gce/gci/configure-helper.sh:  sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
 | |
| cluster/gce/trusty/configure-helper.sh:  sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
 | |
| cluster/gce/util.sh:    local node_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
 | |
| cluster/juju/layers/kubernetes/reactive/k8s.py:    check_call(split(cmd.format(kubeconfig, cluster_name, server, ca)))
 | |
| cluster/juju/layers/kubernetes/reactive/k8s.py:    check_call(split(cmd.format(kubeconfig, context, cluster_name, user)))
 | |
| cluster/juju/layers/kubernetes/reactive/k8s.py:    client_key = '/srv/kubernetes/client.key'
 | |
| cluster/juju/layers/kubernetes/reactive/k8s.py:    cluster_name = 'kubernetes'
 | |
| cluster/juju/layers/kubernetes/reactive/k8s.py:    tlslib.client_key(None, client_key, user='ubuntu', group='ubuntu')
 | |
| cluster/lib/logging.sh:      local source_file=${BASH_SOURCE[$frame_no]}
 | |
| cluster/lib/logging.sh:    local source_file=${BASH_SOURCE[$stack_skip]}
 | |
| cluster/log-dump.sh:    for node_name in "${NODE_NAMES[@]}"; do
 | |
| cluster/log-dump.sh:    local -r node_name="${1}"
 | |
| cluster/log-dump.sh:readonly report_dir="${1:-_artifacts}"
 | |
| cluster/mesos/docker/km/build.sh:  km_path=$(find-binary km darwin/amd64)
 | |
| cluster/photon-controller/templates/salt-master.sh:  api_servers: $MASTER_NAME
 | |
| cluster/photon-controller/templates/salt-minion.sh:  hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}')
 | |
| cluster/photon-controller/util.sh:    node_ip=$(${PHOTON} vm networks "${node_id}" | grep -i $'\t'"00:0C:29" | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1 | awk -F'\t' '{print $3}')
 | |
| cluster/photon-controller/util.sh:  local cert_dir="/srv/kubernetes"
 | |
| cluster/photon-controller/util.sh:  node_name=${1}
 | |
| cluster/rackspace/util.sh:    local node_ip=$(nova show --minimal ${NODE_NAMES[$i]} \
 | |
| cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest:{% set params = pillar['autoscaler_mig_config'] + " " + cloud_config -%}
 | |
| cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
 | |
| cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set enable_garbage_collector = pillar['enable_garbage_collector'] -%}
 | |
| cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + admission_control + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector -%}
 | |
| cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set enable_garbage_collector = pillar['enable_garbage_collector'] -%}
 | |
| cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider  + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
 | |
| cluster/saltbase/salt/kube-proxy/kube-proxy.manifest:  {% set api_servers_with_port = api_servers + ":6443" -%}
 | |
| cluster/saltbase/salt/kube-proxy/kube-proxy.manifest:  {% set api_servers_with_port = api_servers -%}
 | |
| cluster/saltbase/salt/kube-proxy/kube-proxy.manifest:  {% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}
 | |
| cluster/saltbase/salt/kubelet/default:  {% set api_servers_with_port = api_servers + ":6443" -%}
 | |
| cluster/saltbase/salt/kubelet/default:  {% set api_servers_with_port = api_servers -%}
 | |
| cluster/saltbase/salt/kubelet/default:  {% set enable_custom_metrics="--enable-custom-metrics=" + pillar['enable_custom_metrics'] %}
 | |
| cluster/saltbase/salt/kubelet/default:  {% set eviction_hard="--eviction-hard=" + pillar['eviction_hard'] %}
 | |
| cluster/saltbase/salt/kubelet/default:  {% set kubelet_port="--port=" + pillar['kubelet_port'] %}
 | |
| cluster/saltbase/salt/kubelet/default:  {% set node_labels="--node-labels=" + pillar['node_labels'] %}
 | |
| cluster/saltbase/salt/kubelet/default:{% if pillar.get('non_masquerade_cidr','') -%}
 | |
| cluster/saltbase/salt/opencontrail-networking-master/init.sls:      - 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
 | |
| cluster/saltbase/salt/opencontrail-networking-minion/init.sls:      - 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
 | |
| cluster/saltbase/salt/supervisor/kubelet-checker.sh:	{% set kubelet_port = pillar['kubelet_port'] -%}
 | |
| cluster/saltbase/salt/supervisor/supervisor_watcher.sh:# Apply oom_score_adj: -901 to processes
 | |
| cluster/ubuntu/util.sh:  local node_ip=${1}
 | |
| cluster/vagrant/provision-utils.sh:  api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
 | |
| cluster/vagrant/provision-utils.sh:  node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
 | |
| cluster/vagrant/provision-utils.sh:  runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
 | |
| cluster/vsphere/templates/salt-minion.sh:  hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}')
 | |
| examples/cluster-dns/images/frontend/client.py:  service_address = socket.gethostbyname(hostname)
 | |
| examples/storage/cassandra/image/run.sh:	cluster_name \
 | |
| examples/storage/vitess/env.sh:    node_ip=$(get_node_ip)
 | |
| federation/config.default.json:      "cloud_provider": "gce",
 | |
| federation/config.default.json:      "cloud_provider": "gce",
 | |
| federation/config.default.json:      "cloud_provider": "gce",
 | |
| federation/config.default.json:      "cluster_cidr": "10.180.0.0/14",
 | |
| federation/config.default.json:      "cluster_cidr": "10.184.0.0/14",
 | |
| federation/config.default.json:      "cluster_cidr": "10.188.0.0/14",
 | |
| federation/config.default.json:      "cluster_name": "cluster1-kubernetes",
 | |
| federation/config.default.json:      "cluster_name": "cluster2-kubernetes",
 | |
| federation/config.default.json:      "cluster_name": "cluster3-kubernetes",
 | |
| federation/config.default.json:      "num_nodes": 3,
 | |
| federation/config.default.json:      "num_nodes": 3,
 | |
| federation/config.default.json:      "num_nodes": 3,
 | |
| hack/local-up-cluster.sh:        advertise_address="--advertise_address=${API_HOST}"
 | |
| hack/local-up-cluster.sh:      runtime_config="--runtime-config=${RUNTIME_CONFIG}"
 | |
| hack/local-up-cluster.sh:    advertise_address=""
 | |
| hack/local-up-cluster.sh:    runtime_config=""
 | |
| hack/test-update-storage-objects.sh:  local storage_media_type=${2:-""}
 | |
| hack/test-update-storage-objects.sh:  local storage_versions=${1:-""}
 | |
| hack/test-update-storage-objects.sh:  source_file=${test_data[0]}
 | |
| hack/test-update-storage-objects.sh:# source_file,resource,namespace,name,old_version,new_version
 | |
| pkg/kubelet/api/v1alpha1/runtime/api.pb.go:	ContainerPort *int32 `protobuf:"varint,3,opt,name=container_port,json=containerPort" json:"container_port,omitempty"`
 | |
| pkg/kubelet/api/v1alpha1/runtime/api.pb.go:	OomScoreAdj      *int64 `protobuf:"varint,5,opt,name=oom_score_adj,json=oomScoreAdj" json:"oom_score_adj,omitempty"`
 | |
| pkg/kubelet/api/v1alpha1/runtime/api.proto:    optional int32 container_port = 3;
 | |
| pkg/kubelet/api/v1alpha1/runtime/api.proto:    optional int64 oom_score_adj = 5;
 | |
| pkg/kubelet/network/hairpin/hairpin.go:	hairpinModeRelativePath = "hairpin_mode"
 | |
| pkg/kubelet/qos/policy_test.go:			t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOOMScoreAdj, test.highOOMScoreAdj, oomScoreAdj)
 | |
| pkg/kubelet/qos/policy_test.go:	highOOMScoreAdj int // The min oom_score_adj score the container should be assigned.
 | |
| pkg/kubelet/qos/policy_test.go:	lowOOMScoreAdj  int // The max oom_score_adj score the container should be assigned.
 | |
| pkg/util/oom/oom_linux.go:		return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid)
 | |
| pkg/util/oom/oom_linux.go:	oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj")
 | |
| pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj for all processes in cgroup cgroupName.
 | |
| pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj. PID = 0 means self
 | |
| test/e2e/common/configmap.go:						Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"},
 | |
| test/e2e/common/downwardapi_volume.go:			Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
 | |
| test/e2e/es_cluster_logging.go:		framework.Failf("No cluster_name field in Elasticsearch response: %v", esResponse)
 | |
| test/e2e/es_cluster_logging.go:	// Check to see if have a cluster_name field.
 | |
| test/e2e/es_cluster_logging.go:	clusterName, ok := esResponse["cluster_name"]
 | |
| test/e2e/common/host_path.go:			fmt.Sprintf("--file_content_in_loop=%v", filePath),
 | |
| test/e2e/common/host_path.go:			fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
 | |
| test/e2e/common/host_path.go:			fmt.Sprintf("--retry_time=%d", retryDuration),
 | |
| test/e2e/common/host_path.go:			fmt.Sprintf("--retry_time=%d", retryDuration),
 | |
| test/e2e_node/configmap_test.go:						Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"},
 | |
| test/images/mount-tester/mt.go:	flag.BoolVar(&breakOnExpectedContent, "break_on_expected_content", true, "Break out of loop on expected content, (use with --file_content_in_loop flag only)")
 | |
| test/images/mount-tester/mt.go:	flag.IntVar(&retryDuration, "retry_time", 180, "Retry time during the loop")
 | |
| test/images/mount-tester/mt.go:	flag.StringVar(&readFileContentInLoopPath, "file_content_in_loop", "", "Path to read the file content in loop from")
 |