Merge pull request #50355 from shashidharatd/verify-flags

Automatic merge from submit-queue (batch tested with PRs 49615, 49321, 49982, 49788, 50355)

Simplify hack/verify-flags-underscore.py

**What this PR does / why we need it**:
This PR removes the need for `hack/verify-flags/known-flags.txt` and verify-flags-underscore.py will always parse the flags from go files to check if they have underscore.

It is much faster compared to earlier checks and it does its job to check for underscore in flags.
Now:
```
# time ./hack/verify-flags-underscore.py 
real	0m1.638s
user	0m1.560s
sys	0m0.076s
```
Before:
```
# time ./hack/verify-flags-underscore.py 
real	0m22.585s
user	0m22.464s
sys	0m0.112s
```

It has become a pain to keep adding new flag to `known-flags.txt` whenever a new flag is introduced. with this PR this is step is not required anymore.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #40329  #50319

**Special notes for your reviewer**:

**Release note**:
```
NONE
```
/cc @fejta @mtaufen
This commit is contained in:
Kubernetes Submit Queue 2017-08-09 23:56:07 -07:00 committed by GitHub
commit b94f7eabd9
3 changed files with 5 additions and 1056 deletions

View File

@ -25,7 +25,6 @@ import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
@ -81,77 +80,10 @@ def get_all_files(rootdir):
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', '_gopath', 'third_party', '.git', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
if f.endswith(".md"):
continue
if f.endswith(".yaml"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
def check_known_flags(rootdir):
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
illegal_known_flags = set()
for flag in flags:
if len(flag) > 0:
if not "-" in flag:
illegal_known_flags.add(flag)
if len(illegal_known_flags) != 0:
print("All flags in hack/verify-flags/known-flags.txt should contain character -, found these flags without -")
l = list(illegal_known_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# Collects all the flags used in golang files and verifies the flags do
# not contain underscore. If any flag needs to be excluded from this check,
# need to add that flag in hack/verify-flags/excluded-flags.txt.
def check_underscore_in_flags(rootdir, files):
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
@ -165,7 +97,6 @@ def get_flags(rootdir, files):
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
@ -182,10 +113,6 @@ def get_flags(rootdir, files):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
@ -193,79 +120,17 @@ def get_flags(rootdir, files):
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags with character - in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
l = list(new_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
print("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
check_known_flags(rootdir)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false negatives you should run `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
return 1
check_underscore_in_flags(rootdir, files)
if __name__ == "__main__":
sys.exit(main())

View File

@ -1,175 +0,0 @@
Vagrantfile: node_ip = $node_ips[n]
cluster/addons/addon-manager/kube-addons.sh:# Create admission_control objects if defined before any other addon services. If the limits
cluster/aws/templates/configure-vm-aws.sh: # We set the hostname_override to the full EC2 private dns name
cluster/aws/templates/configure-vm-aws.sh: api_servers: '${API_SERVERS}'
cluster/aws/templates/configure-vm-aws.sh: env-to-grains "hostname_override"
cluster/aws/templates/configure-vm-aws.sh: env-to-grains "runtime_config"
cluster/aws/templates/configure-vm-aws.sh: kubelet_api_servers: '${KUBELET_APISERVER}'
cluster/centos/config-default.sh: etcd_servers="${prefix}http://${master_ip}:2379"
cluster/centos/config-default.sh: local etcd_servers=""
cluster/centos/util.sh: local node_ip=${node#*@}
cluster/gce/configure-vm.sh: advertise_address: '${EXTERNAL_IP}'
cluster/gce/configure-vm.sh: api_servers: '${KUBERNETES_MASTER_NAME}'
cluster/gce/configure-vm.sh: cloud_config: ${CLOUD_CONFIG}
cluster/gce/configure-vm.sh: env-to-grains "feature_gates"
cluster/gce/configure-vm.sh: env-to-grains "runtime_config"
cluster/gce/configure-vm.sh: kubelet_api_servers: '${KUBELET_APISERVER}'
cluster/gce/container-linux/configure-helper.sh: authorization_mode+=",ABAC"
cluster/gce/container-linux/configure-helper.sh: authorization_mode+=",Webhook"
cluster/gce/container-linux/configure-helper.sh: grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \
cluster/gce/container-linux/configure-helper.sh: sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
cluster/gce/container-linux/configure-helper.sh: sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
cluster/gce/container-linux/configure-helper.sh: sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}"
cluster/gce/container-linux/configure-helper.sh: local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
cluster/gce/container-linux/configure-helper.sh: local authorization_mode="RBAC"
cluster/gce/container-linux/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
cluster/gce/gci/configure-helper.sh: authorization_mode+=",ABAC"
cluster/gce/gci/configure-helper.sh: authorization_mode+=",Webhook"
cluster/gce/gci/configure-helper.sh: grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \
cluster/gce/gci/configure-helper.sh: sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}"
cluster/gce/gci/configure-helper.sh: local api_servers="--master=https://${KUBERNETES_MASTER_NAME}"
cluster/gce/gci/configure-helper.sh: local authorization_mode="RBAC"
cluster/gce/gci/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
cluster/gce/trusty/configure-helper.sh: grep -o "{{ *pillar\.get('storage_backend', '\(.*\)') *}}" | \
cluster/gce/trusty/configure-helper.sh: sed -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g")
cluster/gce/trusty/configure-helper.sh: sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@${STORAGE_BACKEND}@g" "${temp_file}"
cluster/gce/trusty/configure-helper.sh: sed -i -e "s@{{ *pillar\.get('storage_backend', '\(.*\)') *}}@\1@g" "${temp_file}"
cluster/gce/trusty/configure-helper.sh: sed -i -e "s@{{pillar\['allow_privileged'\]}}@true@g" "${src_file}"
cluster/gce/util.sh: local node_ip=$(gcloud compute instances describe --project "${PROJECT}" --zone "${ZONE}" \
cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py: context['pillar'] = {'num_nodes': get_node_count()}
cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py: ca_cert_path = layer_options.get('ca_certificate_path')
cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py: cluster_dns.set_dns_info(53, hookenv.config('dns_domain'), dns_ip)
cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py: ip = service_cidr().split('/')[0]
cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py: ip = service_cidr().split('/')[0]
cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py:def send_cluster_dns_detail(cluster_dns):
cluster/juju/layers/kubernetes-master/reactive/kubernetes_master.py:def service_cidr():
cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py: context.update({'kube_api_endpoint': ','.join(api_servers),
cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py: ca_cert_path = layer_options.get('ca_certificate_path')
cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py:def configure_worker_services(api_servers, dns, cluster_cidr):
cluster/lib/logging.sh: local source_file=${BASH_SOURCE[$frame_no]}
cluster/lib/logging.sh: local source_file=${BASH_SOURCE[$stack_skip]}
cluster/log-dump/log-dump.sh:readonly report_dir="${1:-_artifacts}"
cluster/photon-controller/templates/salt-master.sh: api_servers: $MASTER_NAME
cluster/photon-controller/templates/salt-minion.sh: hostname_override: $(ip route get 1.1.1.1 | awk '{print $7}')
cluster/photon-controller/util.sh: node_ip=$(${PHOTON} vm networks "${node_id}" | grep -i $'\t'"00:0C:29" | grep -E '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | head -1 | awk -F'\t' '{print $3}')
cluster/photon-controller/util.sh: local cert_dir="/srv/kubernetes"
cluster/photon-controller/util.sh: node_name=${1}
cluster/photon-controller/util.sh: ssh_key=$(ssh-add -L | head -1)
cluster/rackspace/util.sh: local node_ip=$(nova show --minimal ${NODE_NAMES[$i]} \
cluster/saltbase/salt/cluster-autoscaler/cluster-autoscaler.manifest:{% set params = pillar['autoscaler_mig_config'] + " " + cloud_config -%}
cluster/saltbase/salt/etcd/etcd.manifest: "value": "{{ pillar.get('storage_backend', 'etcd3') }}"
cluster/saltbase/salt/etcd/etcd.manifest:{% if pillar.get('storage_backend', 'etcd3') == 'etcd3' -%}
cluster/saltbase/salt/kube-admission-controls/init.sls:{% if 'LimitRanger' in pillar.get('admission_control', '') %}
cluster/saltbase/salt/kube-apiserver/kube-apiserver.manifest:{% set params = address + " " + storage_backend + " " + storage_media_type + " " + etcd_servers + " " + etcd_servers_overrides + " " + cloud_provider + " " + cloud_config + " " + runtime_config + " " + feature_gates + " " + admission_control + " " + max_requests_inflight + " " + target_ram_mb + " " + service_cluster_ip_range + " " + client_ca_file + basic_auth_file + " " + min_request_timeout + " " + enable_garbage_collector + " " + etcd_quorum_read + " " + audit_log -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% if pillar.get('enable_hostpath_provisioner', '').lower() == 'true' -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = "--master=127.0.0.1:8080" + " " + cluster_name + " " + cluster_cidr + " " + allocate_node_cidrs + " " + service_cluster_ip_range + " " + terminated_pod_gc + " " + enable_garbage_collector + " " + cloud_provider + " " + cloud_config + " " + service_account_key + " " + log_level + " " + root_ca_file -%}
cluster/saltbase/salt/kube-controller-manager/kube-controller-manager.manifest:{% set params = params + " " + feature_gates -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: - echo -998 > /proc/$$$/oom_score_adj && kube-proxy {{api_servers_with_port}} {{kubeconfig}} {{cluster_cidr}} --resource-container="" {{params}} 1>>/var/log/kube-proxy.log 2>&1
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers + ":6443" -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set api_servers_with_port = api_servers -%}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest: {% set cluster_cidr=" --cluster-cidr=" + pillar['cluster_cidr'] %}
cluster/saltbase/salt/kube-proxy/kube-proxy.manifest:{% set params = log_level + " " + feature_gates + " " + test_args -%}
cluster/saltbase/salt/kube-scheduler/kube-scheduler.manifest:{% set params = params + log_level + " " + feature_gates + " " + scheduling_algorithm_provider -%}
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers + ":6443" -%}
cluster/saltbase/salt/kubelet/default: {% set api_servers_with_port = api_servers -%}
cluster/saltbase/salt/kubelet/default: {% set enable_custom_metrics="--enable-custom-metrics=" + pillar['enable_custom_metrics'] %}
cluster/saltbase/salt/kubelet/default: {% set eviction_hard="--eviction-hard=" + pillar['eviction_hard'] %}
cluster/saltbase/salt/kubelet/default: {% set kubelet_port="--port=" + pillar['kubelet_port'] %}
cluster/saltbase/salt/kubelet/default: {% set node_labels="--node-labels=" + pillar['node_labels'] %}
cluster/saltbase/salt/kubelet/default:{% if grains['feature_gates'] is defined -%}
cluster/saltbase/salt/kubelet/default:{% if pillar.get('non_masquerade_cidr','') -%}
cluster/saltbase/salt/opencontrail-networking-master/init.sls: - 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
cluster/saltbase/salt/opencontrail-networking-minion/init.sls: - 'SERVICE_CLUSTER_IP_RANGE': '{{ pillar.get('service_cluster_ip_range') }}'
cluster/saltbase/salt/supervisor/supervisor_watcher.sh:# Apply oom_score_adj: -901 to processes
cluster/ubuntu/util.sh: local node_ip=${1}
cluster/vagrant/provision-utils.sh: api_servers: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
cluster/vagrant/provision-utils.sh: node_ip: '$(echo "$MASTER_IP" | sed -e "s/'/''/g")'
cluster/vagrant/provision-utils.sh: runtime_config: '$(echo "$RUNTIME_CONFIG" | sed -e "s/'/''/g")'
examples/cluster-dns/images/frontend/client.py: service_address = socket.gethostbyname(hostname)
examples/storage/cassandra/image/files/jvm.options:# information in cassandra.yaml (such as listen_address).
examples/storage/cassandra/image/files/jvm.options:#-Dcassandra.replace_address=listen_address or broadcast_address of dead node
examples/storage/cassandra/image/files/run.sh: cluster_name \
examples/storage/cassandra/image/files/run.sh: listen_address \
examples/storage/vitess/env.sh: node_ip=$(get_node_ip)
federation/cluster/common.sh: local cert_dir="${kube_temp}/easy-rsa-master/easyrsa3"
federation/deploy/config.json.sample: "cloud_provider": "gce",
federation/deploy/config.json.sample: "cloud_provider": "gce",
federation/deploy/config.json.sample: "cloud_provider": "gce",
federation/deploy/config.json.sample: "cluster_cidr": "10.180.0.0/14",
federation/deploy/config.json.sample: "cluster_cidr": "10.184.0.0/14",
federation/deploy/config.json.sample: "cluster_cidr": "10.188.0.0/14",
federation/deploy/config.json.sample: "cluster_name": "cluster1-kubernetes",
federation/deploy/config.json.sample: "cluster_name": "cluster2-kubernetes",
federation/deploy/config.json.sample: "cluster_name": "cluster3-kubernetes",
federation/deploy/config.json.sample: "kubernetes_version": "v1.4.0"
federation/deploy/config.json.sample: "kubernetes_version": "v1.4.0"
federation/deploy/config.json.sample: "kubernetes_version": "v1.4.0"
federation/deploy/config.json.sample: "num_nodes": 3,
federation/deploy/config.json.sample: "num_nodes": 3,
federation/deploy/config.json.sample: "num_nodes": 3,
hack/lib/util.sh: local api_port=$5
hack/local-up-cluster.sh: advertise_address="--advertise_address=${API_HOST_IP}"
hack/local-up-cluster.sh: runtime_config="--runtime-config=${RUNTIME_CONFIG}"
hack/local-up-cluster.sh: advertise_address=""
hack/local-up-cluster.sh: runtime_config=""
hack/make-rules/test-e2e-node.sh: image_project=${IMAGE_PROJECT:-"google-containers"}
hack/make-rules/test-e2e-node.sh: delete_instances=${DELETE_INSTANCES:-"false"}
hack/make-rules/test-e2e-node.sh: image_project=${IMAGE_PROJECT:-"kubernetes-node-e2e-images"}
hack/test-update-storage-objects.sh: local storage_backend=${1:-"${STORAGE_BACKEND_ETCD2}"}
hack/test-update-storage-objects.sh: local storage_media_type=${3:-""}
hack/test-update-storage-objects.sh: local storage_versions=${2:-""}
hack/test-update-storage-objects.sh: source_file=${test_data[0]}
hack/test-update-storage-objects.sh:# source_file,resource,namespace,name,old_version,new_version
pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go: ContainerPort int32 `protobuf:"varint,2,opt,name=container_port,json=containerPort,proto3" json:"container_port,omitempty"`
pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go: OomScoreAdj int64 `protobuf:"varint,5,opt,name=oom_score_adj,json=oomScoreAdj,proto3" json:"oom_score_adj,omitempty"`
pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go: PodCidr string `protobuf:"bytes,1,opt,name=pod_cidr,json=podCidr,proto3" json:"pod_cidr,omitempty"`
pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go: RuntimeConfig *RuntimeConfig `protobuf:"bytes,1,opt,name=runtime_config,json=runtimeConfig" json:"runtime_config,omitempty"`
pkg/kubelet/apis/cri/v1alpha1/runtime/api.proto: RuntimeConfig runtime_config = 1;
pkg/kubelet/apis/cri/v1alpha1/runtime/api.proto: int32 container_port = 2;
pkg/kubelet/apis/cri/v1alpha1/runtime/api.proto: int64 oom_score_adj = 5;
pkg/kubelet/apis/cri/v1alpha1/runtime/api.proto: string pod_cidr = 1;
pkg/kubelet/cm/container_manager_linux.go: glog.V(3).Infof("Failed to apply oom_score_adj %d for pid %d: %v", oomScoreAdj, pid, err)
pkg/kubelet/cm/container_manager_linux.go: glog.V(5).Infof("attempting to apply oom_score_adj of %d to pid %d", oomScoreAdj, pid)
pkg/kubelet/dockershim/docker_checkpoint.go: ContainerPort *int32 `json:"container_port,omitempty"`
pkg/kubelet/network/hairpin/hairpin.go: hairpinModeRelativePath = "hairpin_mode"
pkg/kubelet/qos/policy_test.go: t.Errorf("oom_score_adj should be between %d and %d, but was %d", test.lowOOMScoreAdj, test.highOOMScoreAdj, oomScoreAdj)
pkg/kubelet/qos/policy_test.go: highOOMScoreAdj int // The min oom_score_adj score the container should be assigned.
pkg/kubelet/qos/policy_test.go: lowOOMScoreAdj int // The max oom_score_adj score the container should be assigned.
pkg/util/oom/oom_linux.go: return fmt.Errorf("invalid PID %d specified for oom_score_adj", pid)
pkg/util/oom/oom_linux.go: oomScoreAdjPath := path.Join("/proc", pidStr, "oom_score_adj")
pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj for all processes in cgroup cgroupName.
pkg/util/oom/oom_linux.go:// Writes 'value' to /proc/<pid>/oom_score_adj. PID = 0 means self
test/e2e/common/configmap.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"},
test/e2e/common/configmap.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volumes/create/data-1"},
test/e2e/common/configmap.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volumes/delete/data-1"},
test/e2e/common/configmap.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volumes/update/data-3"},
test/e2e/common/downwardapi_volume.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
test/e2e/common/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePath),
test/e2e/common/host_path.go: fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
test/e2e/common/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
test/e2e/common/host_path.go: fmt.Sprintf("--retry_time=%d", retryDuration),
test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volume/data-1"},
test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volumes/create/data-1"},
test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volumes/delete/data-1"},
test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-configmap-volumes/update/data-3"},
test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-secret-volumes/create/data-1"},
test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-secret-volumes/delete/data-1"},
test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/projected-secret-volumes/update/data-3"},
test/e2e/common/projected.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
test/e2e/common/secrets.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/secret-volumes/create/data-1"},
test/e2e/common/secrets.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/secret-volumes/delete/data-1"},
test/e2e/common/secrets.go: Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/secret-volumes/update/data-3"},
test/e2e/network/no_snat.go: node_ip := v1.EnvVar{
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be %d; found %d", pid, expectedOOMScoreAdj, oomScore)
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be < %d; found %d", pid, expectedMaxOOMScoreAdj, oomScore)
test/e2e_node/container_manager_test.go: return fmt.Errorf("expected pid %d's oom_score_adj to be >= %d; found %d", pid, expectedMinOOMScoreAdj, oomScore)
test/e2e_node/container_manager_test.go: return fmt.Errorf("failed to get oom_score_adj for %d", pid)
test/e2e_node/container_manager_test.go: return fmt.Errorf("failed to get oom_score_adj for %d: %v", pid, err)
test/e2e_node/container_manager_test.go: procfsPath := path.Join("/proc", strconv.Itoa(pid), "oom_score_adj")
test/e2e_node/node_container_manager_test.go: kubeReservedCgroup = "/kube_reserved"
test/e2e_node/node_container_manager_test.go: systemReservedCgroup = "/system_reserved"
test/images/mount-tester/mt.go: flag.BoolVar(&breakOnExpectedContent, "break_on_expected_content", true, "Break out of loop on expected content, (use with --file_content_in_loop flag only)")
test/images/mount-tester/mt.go: flag.IntVar(&retryDuration, "retry_time", 180, "Retry time during the loop")
test/images/mount-tester/mt.go: flag.StringVar(&readFileContentInLoopPath, "file_content_in_loop", "", "Path to read the file content in loop from")

View File

@ -1,741 +0,0 @@
accept-hosts
accept-paths
admission-control
admission-control-config-file
advertise-address
advertised-address
algorithm-provider
all-namespaces
allocate-node-cidrs
allowed-not-ready-nodes
allow-missing-template-keys
allow-privileged
anonymous-auth
api-burst
api-external-dns-names
api-prefix
api-rate
api-server-advertise-address
apiserver-advertise-address
apiserver-arg-overrides
apiserver-arg-overrides
apiserver-bind-port
apiserver-cert-extra-sans
apiserver-count
apiserver-count
apiserver-count
apiserver-count
api-server-port
api-server-port
api-servers
api-servers
apiserver-count
apiserver-count
api-server-port
api-servers
api-server-service-type
api-token
api-version
apiserver-arg-overrides
apiserver-count
apiserver-count
apiserver-enable-basic-auth
apiserver-enable-token-auth
attach-detach-reconcile-sync-period
audit-log-maxage
audit-log-maxbackup
audit-log-maxsize
audit-log-path
audit-policy-file
audit-webhook-config-file
audit-webhook-mode
authentication-kubeconfig
authentication-token-webhook
authentication-token-webhook-cache-ttl
authentication-token-webhook-config-file
authorization-kubeconfig
authorization-mode
authorization-policy-file
authorization-rbac-super-user
authorization-webhook-cache-authorized-ttl
authorization-webhook-cache-unauthorized-ttl
authorization-webhook-config-file
auth-provider
auth-provider
auth-provider-arg
auth-provider-arg
azure-container-registry-config
basic-auth-file
bench-pods
bench-quiet
bench-tasks
bench-workers
bind-address
bind-pods-burst
bind-pods-qps
bounding-dirs
build-dependencies
build-only
build-tag
ca-cert-path
cadvisor-port
cert-altnames
cert-dir
certificate-authority
cgroup-driver
cgroup-root
cgroups-per-qos
chaos-chance
cidr-allocator-type
clean-start
cleanup-iptables
client-ca-file
client-certificate
client-key
client-name
clientset-api-path
clientset-name
clientset-only
clientset-path
cloud-config
cloud-config-file
cloud-provider
cloud-provider-gce-lb-src-cidrs
cluster-cidr
cluster-context
cluster-dns
cluster-domain
cluster-ip
cluster-ip-range
cluster-monitor-period
cluster-name
cluster-signing-cert-file
cluster-signing-gke-kubeconfig
cluster-signing-gke-retry-backoff
cluster-signing-key-file
cluster-tag
cni-bin-dir
cni-conf-dir
concurrent-deployment-syncs
concurrent-endpoint-syncs
concurrent-gc-syncs
concurrent-job-syncs
concurrent-namespace-syncs
concurrent-replicaset-syncs
concurrent-resource-quota-syncs
concurrent-serviceaccount-token-syncs
concurrent-service-syncs
config-map
config-map-namespace
config-sync-period
configure-cloud-routes
conntrack-max
conntrack-max-per-core
conntrack-min
conntrack-tcp-timeout-close-wait
conntrack-tcp-timeout-established
consumer-port
consumer-service-name
consumer-service-namespace
container-port
container-runtime
container-runtime-endpoint
contain-pod-resources
contention-profiling
controllermanager-arg-overrides
controller-start-interval
core-kubeconfig
cors-allowed-origins
cpu-cfs-quota
cpu-percent
create-annotation
current-release-pr
current-replicas
daemonset-lookup-cache-size
data-dir
default-container-cpu-limit
default-container-mem-limit
delay-shutdown
delete-collection-workers
delete-instances
delete-local-data
delete-namespace
delete-namespace-on-failure
deleting-pods-burst
deleting-pods-qps
deployment-controller-sync-period
deployment-label-key
deserialization-cache-size
dest-file
disable-attach-detach-reconcile-sync
disable-filter
disable-kubenet
disable-log-dump
discovery-file
discovery-port
discovery-token
dns-bind-address
dns-domain
dns-port
dns-provider
dns-provider-config
dns-zone-name
dockercfg-path
docker-disable-shared-pid
docker-email
docker-endpoint
docker-exec-handler
docker-password
docker-server
docker-username
dockershim-checkpoint-dir
driver-port
drop-embedded-fields
dry-run
dump-logs-on-failure
duration-sec
dynamic-config-dir
e2e-output-dir
e2e-verify-service-account
enable-aggregator-routing
enable-controller-attach-detach
enable-custom-metrics
enable-debugging-handlers
enable-dynamic-provisioning
enable-garbage-collector
enable-garbage-collector
enable-garbage-collector
enable-hostpath-provisioner
enable-logs-handler
enable-server
enable-swagger-ui
enable-taint-manager
enforce-node-allocatable
etcd-address
etcd-cafile
etcd-certfile
etcd-config
etcd-keyfile
etcd-image
etcd-metrics-scrape-uri
etcd-metrics-scrape-uri
etcd-mutation-timeout
etcd-persistent-storage
etcd-prefix
etcd-pv-capacity
etcd-pv-storage-class
etcd-quorum-read
etcd-server
etcd-servers
etcd-servers-overrides
etcd-upgrade-storage
etcd-upgrade-version
etcd-version-scrape-uri
etcd-version-scrape-uri
event-burst
event-qps
event-ttl
eviction-hard
eviction-max-pod-grace-period
eviction-minimum-reclaim
eviction-pressure-transition-period
eviction-soft
eviction-soft-grace-period
executor-bindall
executor-logv
executor-path
executor-suicide-timeout
exit-on-lock-contention
experimental-allocatable-ignore-eviction
experimental-allowed-unsafe-sysctls
experimental-bootstrap-kubeconfig
bootstrap-kubeconfig
experimental-bootstrap-token-auth
experimental-check-node-capabilities-before-mount
experimental-cluster-signing-duration
experimental-cri
experimental-dockershim
experimental-dockershim-root-directory
experimental-fail-swap-on
experimental-kernel-memcg-notification
experimental-keystone-ca-file
experimental-keystone-url
experimental-keystone-url
experimental-mounter-path
experimental-nvidia-gpus
experimental-prefix
experimental-qos-reserved
external-etcd-cafile
external-etcd-certfile
external-etcd-endpoints
external-etcd-keyfile
external-hostname
external-ip
external-name
extra-peer-dirs
fail-swap-on
failover-timeout
failure-domains
fake-clientset
feature-gates
federated-api-burst
federated-api-qps
federated-kube-context
federation-config-from-cluster
federation-name
federation-system-namespace
federation-upgrade-target
file-check-frequency
file-suffix
flex-volume-plugin-dir
forward-services
framework-name
framework-store-uri
framework-weburi
from-env-file
from-file
from-literal
func-dest
fuzz-iters
garbage-collector-enabled
garbage-collector-enabled
gather-logs-sizes
gather-metrics-at-teardown
gather-resource-usage
gather-suite-metrics-at-teardown
gce-api-endpoint
gce-multizone
gce-project
gce-region
gce-service-account
gce-upgrade-script
gce-zone
ginkgo-flags
gke-cluster
go-header-file
google-json-key
grace-period
ha-domain
hairpin-mode
hard-pod-affinity-symmetric-weight
healthz-bind-address
healthz-port
heapster-namespace
heapster-port
heapster-scheme
heapster-service
horizontal-pod-autoscaler-sync-period
horizontal-pod-autoscaler-upscale-delay
horizontal-pod-autoscaler-downscale-delay
host-cluster-context
host-ipc-sources
hostname-override
host-network-sources
host-pid-sources
host-port-endpoints
host-system-namespace
hpa-scale-forbidden-window
http-check-frequency
http-port
ignore-daemonsets
ignore-not-found
image-config-file
image-description
image-gc-high-threshold
image-gc-low-threshold
image-project
image-pull-policy
image-pull-progress-deadline
image-service-endpoint
included-types-overrides
include-extended-apis
include-extended-apis
init-config-dir
initial-sync-timeout
input-base
input-dirs
insecure-bind-address
insecure-experimental-approve-all-kubelet-csrs-for-group
insecure-port
insecure-skip-tls-verify
instance-metadata
instance-name-prefix
internal-clientset-package
iptables-drop-bit
iptables-masquerade-bit
iptables-min-sync-period
iptables-sync-period
ir-data-source
ir-dbname
ir-hawkular
ir-influxdb-host
ir-namespace-only
ir-password
ir-user
jenkins-host
jenkins-jobs
junit-file-number
k8s-bin-dir
k8s-build-output
keep-gogoproto
keep-terminated-pod-volumes
km-path
kops-admin-access
kops-cluster
kops-kubernetes-version
kops-nodes
kops-ssh-key
kops-state
kops-up-timeout
kops-zones
kubeadm-cmd-skip
kubeadm-cmd-skip
kubeadm-path
kubeadm-path
kube-api-burst
kube-api-content-type
kube-api-qps
kubecfg-file
kubectl-path
kubelet-address
kubelet-api-servers
kubelet-cadvisor-port
kubelet-certificate-authority
kubelet-cgroups
kubelet-client-certificate
kubelet-client-key
kubelet-docker-endpoint
kubelet-enable-debugging-handlers
kubelet-flags
kubelet-host-network-sources
kubelet-https
kubelet-kubeconfig
kubelet-network-plugin
kubelet-pod-infra-container-image
kubelet-port
kubelet-preferred-address-types
kubelet-read-only-port
kubelet-root-dir
kubelet-sync-frequency
kubelet-timeout
kube-master
kube-master
kube-master
kube-master
kube-master-url
kube-master-url
kube-reserved
kube-reserved
kube-reserved-cgroup
kube-master-url
kube-reserved
kubemark-external-kubeconfig
kubernetes-anywhere-cluster
kubernetes-anywhere-path
kubernetes-anywhere-phase2-provider
kubernetes-anywhere-up-timeout
kubernetes-service-node-port
kubernetes-version
label-columns
large-cluster-size-threshold
last-release-pr
leader-elect
leader-elect-lease-duration
leader-elect-lock-type
leader-elect-renew-deadline
leader-elect-resource-lock
leader-elect-retry-period
lease-duration
leave-stdin-open
limit-bytes
listen-address
listers-package
load-balancer-ip
lock-file
log-flush-frequency
log-lines-total
logexporter-gcs-path
long-running-request-regexp
make-iptables-util-chains
make-symlinks
manifest-url
manifest-url-header
masquerade-all
master-os-distro
master-service-namespace
master-tag
max-concurrency
max-connection-bytes-per-sec
maximum-dead-containers
maximum-dead-containers-per-container
max-log-age
max-log-backups
max-log-size
max-mutating-requests-inflight
max-open-files
max-outgoing-burst
max-outgoing-qps
max-pods
max-requests-inflight
max-unavailable
metrics-bind-address
metrics-path
min-available
minimum-container-ttl-duration
minimum-image-ttl-duration
minion-max-log-age
minion-max-log-backups
minion-max-log-size
minion-path-override
min-pr-number
min-request-timeout
min-resync-period
namespace-sync-period
network-plugin
network-plugin-dir
network-plugin-mtu
node-cidr-mask-size
node-config-dir
node-eviction-rate
node-instance-group
node-ip
node-labels
node-max-log-age
node-max-log-backups
node-max-log-size
node-monitor-grace-period
node-monitor-period
node-name
node-os-distro
node-path-override
node-port
node-schedulable-timeout
node-startup-grace-period
node-status-update-frequency
node-sync-period
node-tag
no-headers
no-headers
non-masquerade-cidr
non-resource-url
no-suggestions
no-suggestions
num-nodes
oidc-ca-file
oidc-client-id
oidc-groups-claim
oidc-issuer-url
oidc-username-claim
only-idl
oom-score-adj
output-base
output-directory
output-file-base
output-package
output-patch
output-print-type
output-version
out-version
path-override
pod-cidr
pod-eviction-timeout
pod-infra-container-image
pod-manifest-path
pod-network-cidr
pod-running
pod-running-timeout
pods-per-core
policy-config-file
policy-configmap
policy-configmap-namespace
poll-interval
portal-net
prepull-images
private-mountns
prom-push-gateway
protect-kernel-defaults
proto-import
provider-id
proxy-bindall
proxy-client-cert-file
proxy-client-key-file
proxy-kubeconfig
proxy-logv
proxy-mode
proxy-port-range
public-address-override
pvclaimbinder-sync-period
pvclaimbinder-sync-period
pv-recycler-increment-timeout-nfs
pv-recycler-maximum-retry
pv-recycler-minimum-timeout-hostpath
pv-recycler-minimum-timeout-nfs
pv-recycler-pod-template-filepath-hostpath
pv-recycler-pod-template-filepath-nfs
pv-recycler-timeout-increment-hostpath
read-only-port
really-crash-for-testing
reconcile-cidr
reconcile-cooldown
reconcile-interval
register-node
register-retry-count
register-schedulable
register-with-taints
registry-burst
registry-qps
reject-methods
reject-paths
remove-node
repair-malformed-updates
replicaset-lookup-cache-size
replication-controller-lookup-cache-size
repo-root
report-dir
report-prefix
requestheader-allowed-names
requestheader-client-ca-file
requestheader-extra-headers-prefix
requestheader-group-headers
requestheader-username-headers
required-contexts
require-kubeconfig
resolv-conf
resource-container
resource-name
resource-quota-sync-period
resource-version
results-dir
rkt-api-endpoint
rkt-path
rkt-stage1-image
root-ca-file
root-dir
route-reconciliation-period
run-duration
run-kubelet-mode
run-proxy
run-services-mode
runtime-cgroups
runtime-config
runtime-request-timeout
save-config
schedule-pods-here
scheduler-config
scheduler-name
schema-cache-dir
scrape-timeout
seccomp-profile-root
secondary-node-eviction-rate
secret-name
secure-port
self-hosted
serialize-image-pulls
server-start-timeout
service-account-key-file
service-account-lookup
service-account-private-key-file
service-address
service-cidr
service-cluster-ip-range
service-dns-domain
service-dns-suffix
service-generator
service-node-port-range
service-node-ports
service-overrides
service-sync-period
session-affinity
show-all
show-events
show-kind
show-labels
shutdown-fd
shutdown-fifo
since-seconds
since-time
skip-generated-rewrite
skip-munges
skip-preflight-checks
skip-token-print
skip-unsafe
sort-by
source-file
ssh-env
ssh-key
ssh-keyfile
ssh-options
ssh-user
start-services
static-pods-config
stats-port
stop-services
storage-media-type
storage-version
storage-versions
streaming-connection-idle-timeout
suicide-timeout
sync-frequency
system-cgroups
system-pods-startup-timeout
system-reserved
system-reserved-cgroup
system-spec-file
system-spec-name
system-validate-mode
target-port
target-ram-mb
tcp-services
terminated-pod-gc-threshold
test-flags
test-timeout
tls-bootstrap-token
tls-ca-file
tls-cert-file
tls-private-key-file
tls-sni-cert-key
token-auth-file
token-ttl
to-version
to-version
ttl-keys-prefix
ttl-secs
type-src
udp-port
udp-timeout
unhealthy-zone-threshold
unix-socket
update-period
upgrade-image
upgrade-target
use-kubernetes-cluster-service
use-kubernetes-version
use-legacy-policy-config
use-real-proxier
use-service-account-credentials
user-whitelist
use-service-account-credentials
use-service-account-credentials
user-whitelist
use-service-account-credentials
use-taint-based-evictions
verify-only
versioned-clientset-package
viper-config
viper-config
volume-dir
volume-plugin-dir
volume-stats-agg-period
watch-cache
watch-cache-sizes
watch-only
whitelist-override-label
windows-line-endings
write-config-to
www-prefix
zone-id
zone-name
lock-object-name
lock-object-namespace
horizontal-pod-autoscaler-use-rest-clients