From 5aa495cdad06ef0465817fd7813bc13868101858 Mon Sep 17 00:00:00 2001 From: Eric Paris Date: Tue, 11 Aug 2015 16:29:50 -0400 Subject: [PATCH] Update code to use - in flag names instead of _ --- docs/admin/salt.md | 2 +- docs/design/admission_control.md | 4 ++-- docs/design/admission_control_limit_range.md | 2 +- docs/design/admission_control_resource_quota.md | 2 +- docs/getting-started-guides/fedora/fedora_manual_config.md | 2 +- docs/user-guide/services.md | 2 +- hack/test-update-storage-objects.sh | 2 +- pkg/api/serialization_test.go | 2 +- pkg/controller/node/nodecontroller.go | 2 +- pkg/conversion/scheme_test.go | 2 +- pkg/kubelet/container_manager_linux.go | 2 +- pkg/kubelet/kubelet.go | 2 +- pkg/proxy/userspace/proxier.go | 4 ++-- 13 files changed, 15 insertions(+), 15 deletions(-) diff --git a/docs/admin/salt.md b/docs/admin/salt.md index 5d13c6360ce..e921b4a8773 100644 --- a/docs/admin/salt.md +++ b/docs/admin/salt.md @@ -99,7 +99,7 @@ Key | Value `etcd_servers` | (Optional) Comma-delimited list of IP addresses the kube-apiserver and kubelet use to reach etcd. Uses the IP of the first machine in the kubernetes_master role, or 127.0.0.1 on GCE. `hostnamef` | (Optional) The full host name of the machine, i.e. uname -n `node_ip` | (Optional) The IP address to use to address this node -`hostname_override` | (Optional) Mapped to the kubelet hostname_override +`hostname_override` | (Optional) Mapped to the kubelet hostname-override `network_mode` | (Optional) Networking model to use among nodes: *openvswitch* `networkInterfaceName` | (Optional) Networking interface to use to bind addresses, default value *eth0* `publicAddressOverride` | (Optional) The IP address the kube-apiserver should use to bind against for external read-only access diff --git a/docs/design/admission_control.md b/docs/design/admission_control.md index 9245aa7d3fa..a2b5700b037 100644 --- a/docs/design/admission_control.md +++ b/docs/design/admission_control.md @@ -63,8 +63,8 @@ The kube-apiserver takes the following OPTIONAL arguments to enable admission co | Option | Behavior | | ------ | -------- | -| admission_control | Comma-delimited, ordered list of admission control choices to invoke prior to modifying or deleting an object. | -| admission_control_config_file | File with admission control configuration parameters to boot-strap plug-in. | +| admission-control | Comma-delimited, ordered list of admission control choices to invoke prior to modifying or deleting an object. | +| admission-control-config-file | File with admission control configuration parameters to boot-strap plug-in. | An **AdmissionControl** plug-in is an implementation of the following interface: diff --git a/docs/design/admission_control_limit_range.md b/docs/design/admission_control_limit_range.md index 885ef664ba8..621fd564918 100644 --- a/docs/design/admission_control_limit_range.md +++ b/docs/design/admission_control_limit_range.md @@ -137,7 +137,7 @@ If a constraint is not specified for an enumerated resource, it is not enforced To enable the plug-in and support for LimitRange, the kube-apiserver must be configured as follows: ```console -$ kube-apiserver -admission_control=LimitRanger +$ kube-apiserver --admission-control=LimitRanger ``` ### Enforcement of constraints diff --git a/docs/design/admission_control_resource_quota.md b/docs/design/admission_control_resource_quota.md index bb7c6e0a3fc..86fae45172d 100644 --- a/docs/design/admission_control_resource_quota.md +++ b/docs/design/admission_control_resource_quota.md @@ -178,7 +178,7 @@ The **ResourceQuota** plug-in introspects all incoming admission requests. To enable the plug-in and support for ResourceQuota, the kube-apiserver must be configured as follows: ``` -$ kube-apiserver -admission_control=ResourceQuota +$ kube-apiserver --admission-control=ResourceQuota ``` It makes decisions by evaluating the incoming object against all defined **ResourceQuota.Status.Hard** resource limits in the request diff --git a/docs/getting-started-guides/fedora/fedora_manual_config.md b/docs/getting-started-guides/fedora/fedora_manual_config.md index bd075c2c062..f306cd80549 100644 --- a/docs/getting-started-guides/fedora/fedora_manual_config.md +++ b/docs/getting-started-guides/fedora/fedora_manual_config.md @@ -107,7 +107,7 @@ systemctl stop iptables-services firewalld **Configure the Kubernetes services on the master.** -* Edit /etc/kubernetes/apiserver to appear as such. The service_cluster_ip_range IP addresses must be an unused block of addresses, not used anywhere else. They do not need to be routed or assigned to anything. +* Edit /etc/kubernetes/apiserver to appear as such. The service-cluster-ip-range IP addresses must be an unused block of addresses, not used anywhere else. They do not need to be routed or assigned to anything. ```sh # The address on the local server to listen to. diff --git a/docs/user-guide/services.md b/docs/user-guide/services.md index 6aa79061970..9f5f8c09185 100644 --- a/docs/user-guide/services.md +++ b/docs/user-guide/services.md @@ -266,7 +266,7 @@ request. To do this, set the `spec.clusterIP` field. For example, if you already have an existing DNS entry that you wish to replace, or legacy systems that are configured for a specific IP address and difficult to re-configure. The IP address that a user chooses must be a valid IP address and within the -service_cluster_ip_range CIDR range that is specified by flag to the API +service-cluster-ip-range CIDR range that is specified by flag to the API server. If the IP address value is invalid, the apiserver returns a 422 HTTP status code to indicate that the value is invalid. diff --git a/hack/test-update-storage-objects.sh b/hack/test-update-storage-objects.sh index 31a8cde237f..ce59df5764f 100755 --- a/hack/test-update-storage-objects.sh +++ b/hack/test-update-storage-objects.sh @@ -41,7 +41,7 @@ KUBECTL="${KUBE_OUTPUT_HOSTBIN}/kubectl" UPDATE_ETCD_OBJECTS_SCRIPT="${KUBE_ROOT}/cluster/update-storage-objects.sh" function startApiServer() { - kube::log::status "Starting kube-apiserver with KUBE_API_VERSIONS: ${KUBE_API_VERSIONS} and runtime_config: ${RUNTIME_CONFIG}" + kube::log::status "Starting kube-apiserver with KUBE_API_VERSIONS: ${KUBE_API_VERSIONS} and runtime-config: ${RUNTIME_CONFIG}" KUBE_API_VERSIONS="${KUBE_API_VERSIONS}" \ "${KUBE_OUTPUT_HOSTBIN}/kube-apiserver" \ diff --git a/pkg/api/serialization_test.go b/pkg/api/serialization_test.go index 03b9875ab93..ba3b0b86bc9 100644 --- a/pkg/api/serialization_test.go +++ b/pkg/api/serialization_test.go @@ -35,7 +35,7 @@ import ( flag "github.com/spf13/pflag" ) -var fuzzIters = flag.Int("fuzz_iters", 20, "How many fuzzing iterations to do.") +var fuzzIters = flag.Int("fuzz-iters", 20, "How many fuzzing iterations to do.") func fuzzInternalObject(t *testing.T, forVersion string, item runtime.Object, seed int64) runtime.Object { apitesting.FuzzerFor(t, forVersion, rand.NewSource(seed)).Fuzz(item) diff --git a/pkg/controller/node/nodecontroller.go b/pkg/controller/node/nodecontroller.go index a90ddf87987..09ecdf82209 100644 --- a/pkg/controller/node/nodecontroller.go +++ b/pkg/controller/node/nodecontroller.go @@ -65,7 +65,7 @@ type NodeController struct { // sync node status in this case, but will monitor node status updated from kubelet. If // it doesn't receive update for this amount of time, it will start posting "NodeReady== // ConditionUnknown". The amount of time before which NodeController start evicting pods - // is controlled via flag 'pod_eviction_timeout'. + // is controlled via flag 'pod-eviction-timeout'. // Note: be cautious when changing the constant, it must work with nodeStatusUpdateFrequency // in kubelet. There are several constraints: // 1. nodeMonitorGracePeriod must be N times more than nodeStatusUpdateFrequency, where diff --git a/pkg/conversion/scheme_test.go b/pkg/conversion/scheme_test.go index 9a94c6f5b38..05fd320696c 100644 --- a/pkg/conversion/scheme_test.go +++ b/pkg/conversion/scheme_test.go @@ -30,7 +30,7 @@ import ( flag "github.com/spf13/pflag" ) -var fuzzIters = flag.Int("fuzz_iters", 50, "How many fuzzing iterations to do.") +var fuzzIters = flag.Int("fuzz-iters", 50, "How many fuzzing iterations to do.") // Test a weird version/kind embedding format. type MyWeirdCustomEmbeddedVersionKindField struct { diff --git a/pkg/kubelet/container_manager_linux.go b/pkg/kubelet/container_manager_linux.go index 768598c846d..914d4a76e1c 100644 --- a/pkg/kubelet/container_manager_linux.go +++ b/pkg/kubelet/container_manager_linux.go @@ -229,7 +229,7 @@ func ensureDockerInContainer(cadvisor cadvisor.Interface, oomScoreAdj int, manag } } - // Also apply oom_score_adj to processes + // Also apply oom-score-adj to processes oomAdjuster := oom.NewOomAdjuster() if err := oomAdjuster.ApplyOomScoreAdj(pid, oomScoreAdj); err != nil { errs = append(errs, fmt.Errorf("failed to apply oom score %d to PID %d", oomScoreAdj, pid)) diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index d3d12884785..ded82d9c065 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -1740,7 +1740,7 @@ func (kl *Kubelet) admitPods(allPods []*api.Pod, podSyncTypes map[types.UID]Sync // three channels (file, apiserver, and http) and creates a union of them. For // any new change seen, will run a sync against desired state and running state. If // no changes are seen to the configuration, will synchronize the last known desired -// state every sync_frequency seconds. Never returns. +// state every sync-frequency seconds. Never returns. func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) { glog.Info("Starting kubelet main sync loop.") for { diff --git a/pkg/proxy/userspace/proxier.go b/pkg/proxy/userspace/proxier.go index f7fab54bda2..fd8ab8b20e0 100644 --- a/pkg/proxy/userspace/proxier.go +++ b/pkg/proxy/userspace/proxier.go @@ -582,7 +582,7 @@ var iptablesHostNodePortChain iptables.Chain = "KUBE-NODEPORT-HOST" // Ensure that the iptables infrastructure we use is set up. This can safely be called periodically. func iptablesInit(ipt iptables.Interface) error { // TODO: There is almost certainly room for optimization here. E.g. If - // we knew the service_cluster_ip_range CIDR we could fast-track outbound packets not + // we knew the service-cluster-ip-range CIDR we could fast-track outbound packets not // destined for a service. There's probably more, help wanted. // Danger - order of these rules matters here: @@ -602,7 +602,7 @@ func iptablesInit(ipt iptables.Interface) error { // the NodePort would take priority (incorrectly). // This is unlikely (and would only affect outgoing traffic from the cluster to the load balancer, which seems // doubly-unlikely), but we need to be careful to keep the rules in the right order. - args := []string{ /* service_cluster_ip_range matching could go here */ } + args := []string{ /* service-cluster-ip-range matching could go here */ } args = append(args, "-m", "comment", "--comment", "handle ClusterIPs; NOTE: this must be before the NodePort rules") if _, err := ipt.EnsureChain(iptables.TableNAT, iptablesContainerPortalChain); err != nil { return err